repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/research/slim/preprocessing/__init__.py | 1 | 0 | 0 | py |
|
models | models-master/research/slim/preprocessing/cifarnet_preprocessing.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images in CIFAR-10.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
_PADDING = 4
def preprocess_for_train(image,
output_height,
output_width,
padding=_PADDING,
add_image_summaries=True,
use_grayscale=False):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
padding: The amound of padding before and after each dimension of the image.
add_image_summaries: Enable image summaries.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
if add_image_summaries:
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
if padding > 0:
image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]])
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(image,
[output_height, output_width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
if add_image_summaries:
tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0))
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_standardization(distorted_image)
def preprocess_for_eval(image,
output_height,
output_width,
add_image_summaries=True,
use_grayscale=False):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
add_image_summaries: Enable image summaries.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
if add_image_summaries:
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
# Resize and crop if needed.
resized_image = tf.image.resize_image_with_crop_or_pad(image,
output_width,
output_height)
if add_image_summaries:
tf.summary.image('resized_image', tf.expand_dims(resized_image, 0))
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_standardization(resized_image)
def preprocess_image(image,
output_height,
output_width,
is_training=False,
add_image_summaries=True,
use_grayscale=False):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
add_image_summaries: Enable image summaries.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(
image,
output_height,
output_width,
add_image_summaries=add_image_summaries,
use_grayscale=use_grayscale)
else:
return preprocess_for_eval(
image,
output_height,
output_width,
add_image_summaries=add_image_summaries,
use_grayscale=use_grayscale)
| 5,330 | 34.778523 | 80 | py |
models | models-master/research/slim/preprocessing/lenet_preprocessing.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities for preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def preprocess_image(image,
output_height,
output_width,
is_training,
use_grayscale=False):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
A preprocessed image.
"""
del is_training # Unused argument
image = tf.to_float(image)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
image = tf.image.resize_image_with_crop_or_pad(
image, output_width, output_height)
image = tf.subtract(image, 128.0)
image = tf.div(image, 128.0)
return image
| 1,804 | 34.392157 | 80 | py |
models | models-master/research/vid2depth/inference.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates depth estimates for an entire KITTI video."""
# Example usage:
#
# python inference.py \
# --logtostderr \
# --kitti_dir ~/vid2depth/kitti-raw-uncompressed \
# --kitti_video 2011_09_26/2011_09_26_drive_0009_sync \
# --output_dir ~/vid2depth/inference \
# --model_ckpt ~/vid2depth/trained-model/model-119496
#
# python inference.py \
# --logtostderr \
# --kitti_dir ~/vid2depth/kitti-raw-uncompressed \
# --kitti_video test_files_eigen \
# --output_dir ~/vid2depth/inference \
# --model_ckpt ~/vid2depth/trained-model/model-119496
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import matplotlib.pyplot as plt
import model
import numpy as np
import scipy.misc
import tensorflow as tf
import util
gfile = tf.gfile
HOME_DIR = os.path.expanduser('~')
DEFAULT_OUTPUT_DIR = os.path.join(HOME_DIR, 'vid2depth/inference')
DEFAULT_KITTI_DIR = os.path.join(HOME_DIR, 'kitti-raw-uncompressed')
flags.DEFINE_string('output_dir', DEFAULT_OUTPUT_DIR,
'Directory to store estimated depth maps.')
flags.DEFINE_string('kitti_dir', DEFAULT_KITTI_DIR, 'KITTI dataset directory.')
flags.DEFINE_string('model_ckpt', None, 'Model checkpoint to load.')
flags.DEFINE_string('kitti_video', None, 'KITTI video directory name.')
flags.DEFINE_integer('batch_size', 4, 'The size of a sample batch.')
flags.DEFINE_integer('img_height', 128, 'Image height.')
flags.DEFINE_integer('img_width', 416, 'Image width.')
flags.DEFINE_integer('seq_length', 3, 'Sequence length for each example.')
FLAGS = flags.FLAGS
flags.mark_flag_as_required('kitti_video')
flags.mark_flag_as_required('model_ckpt')
CMAP = 'plasma'
def _run_inference():
"""Runs all images through depth model and saves depth maps."""
ckpt_basename = os.path.basename(FLAGS.model_ckpt)
ckpt_modelname = os.path.basename(os.path.dirname(FLAGS.model_ckpt))
output_dir = os.path.join(FLAGS.output_dir,
FLAGS.kitti_video.replace('/', '_') + '_' +
ckpt_modelname + '_' + ckpt_basename)
if not gfile.Exists(output_dir):
gfile.MakeDirs(output_dir)
inference_model = model.Model(is_training=False,
seq_length=FLAGS.seq_length,
batch_size=FLAGS.batch_size,
img_height=FLAGS.img_height,
img_width=FLAGS.img_width)
vars_to_restore = util.get_vars_to_restore(FLAGS.model_ckpt)
saver = tf.train.Saver(vars_to_restore)
sv = tf.train.Supervisor(logdir='/tmp/', saver=None)
with sv.managed_session() as sess:
saver.restore(sess, FLAGS.model_ckpt)
if FLAGS.kitti_video == 'test_files_eigen':
im_files = util.read_text_lines(
util.get_resource_path('dataset/kitti/test_files_eigen.txt'))
im_files = [os.path.join(FLAGS.kitti_dir, f) for f in im_files]
else:
video_path = os.path.join(FLAGS.kitti_dir, FLAGS.kitti_video)
im_files = gfile.Glob(os.path.join(video_path, 'image_02/data', '*.png'))
im_files = [f for f in im_files if 'disp' not in f]
im_files = sorted(im_files)
for i in range(0, len(im_files), FLAGS.batch_size):
if i % 100 == 0:
logging.info('Generating from %s: %d/%d', ckpt_basename, i,
len(im_files))
inputs = np.zeros(
(FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width, 3),
dtype=np.uint8)
for b in range(FLAGS.batch_size):
idx = i + b
if idx >= len(im_files):
break
im = scipy.misc.imread(im_files[idx])
inputs[b] = scipy.misc.imresize(im, (FLAGS.img_height, FLAGS.img_width))
results = inference_model.inference(inputs, sess, mode='depth')
for b in range(FLAGS.batch_size):
idx = i + b
if idx >= len(im_files):
break
if FLAGS.kitti_video == 'test_files_eigen':
depth_path = os.path.join(output_dir, '%03d.png' % idx)
else:
depth_path = os.path.join(output_dir, '%04d.png' % idx)
depth_map = results['depth'][b]
depth_map = np.squeeze(depth_map)
colored_map = _normalize_depth_for_display(depth_map, cmap=CMAP)
input_float = inputs[b].astype(np.float32) / 255.0
vertical_stack = np.concatenate((input_float, colored_map), axis=0)
scipy.misc.imsave(depth_path, vertical_stack)
def _gray2rgb(im, cmap=CMAP):
cmap = plt.get_cmap(cmap)
rgba_img = cmap(im.astype(np.float32))
rgb_img = np.delete(rgba_img, 3, 2)
return rgb_img
def _normalize_depth_for_display(depth,
pc=95,
crop_percent=0,
normalizer=None,
cmap=CMAP):
"""Converts a depth map to an RGB image."""
# Convert to disparity.
disp = 1.0 / (depth + 1e-6)
if normalizer is not None:
disp /= normalizer
else:
disp /= (np.percentile(disp, pc) + 1e-6)
disp = np.clip(disp, 0, 1)
disp = _gray2rgb(disp, cmap=cmap)
keep_h = int(disp.shape[0] * (1 - crop_percent))
disp = disp[:keep_h]
return disp
def main(_):
_run_inference()
if __name__ == '__main__':
app.run(main)
| 6,027 | 35.981595 | 80 | py |
models | models-master/research/vid2depth/reader.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reads data that is produced by dataset/gen_data.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from absl import logging
import tensorflow as tf
import util
gfile = tf.gfile
QUEUE_SIZE = 2000
QUEUE_BUFFER = 3
class DataReader(object):
"""Reads stored sequences which are produced by dataset/gen_data.py."""
def __init__(self, data_dir, batch_size, img_height, img_width, seq_length,
num_scales):
self.data_dir = data_dir
self.batch_size = batch_size
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.num_scales = num_scales
def read_data(self):
"""Provides images and camera intrinsics."""
with tf.name_scope('data_loading'):
with tf.name_scope('enqueue_paths'):
seed = random.randint(0, 2**31 - 1)
self.file_lists = self.compile_file_list(self.data_dir, 'train')
image_paths_queue = tf.train.string_input_producer(
self.file_lists['image_file_list'], seed=seed, shuffle=True)
cam_paths_queue = tf.train.string_input_producer(
self.file_lists['cam_file_list'], seed=seed, shuffle=True)
img_reader = tf.WholeFileReader()
_, image_contents = img_reader.read(image_paths_queue)
image_seq = tf.image.decode_jpeg(image_contents)
with tf.name_scope('load_intrinsics'):
cam_reader = tf.TextLineReader()
_, raw_cam_contents = cam_reader.read(cam_paths_queue)
rec_def = []
for _ in range(9):
rec_def.append([1.0])
raw_cam_vec = tf.decode_csv(raw_cam_contents, record_defaults=rec_def)
raw_cam_vec = tf.stack(raw_cam_vec)
intrinsics = tf.reshape(raw_cam_vec, [3, 3])
with tf.name_scope('convert_image'):
image_seq = self.preprocess_image(image_seq) # Converts to float.
with tf.name_scope('image_augmentation'):
image_seq = self.augment_image_colorspace(image_seq)
image_stack = self.unpack_images(image_seq)
with tf.name_scope('image_augmentation_scale_crop'):
image_stack, intrinsics = self.augment_images_scale_crop(
image_stack, intrinsics, self.img_height, self.img_width)
with tf.name_scope('multi_scale_intrinsics'):
intrinsic_mat = self.get_multi_scale_intrinsics(intrinsics,
self.num_scales)
intrinsic_mat.set_shape([self.num_scales, 3, 3])
intrinsic_mat_inv = tf.matrix_inverse(intrinsic_mat)
intrinsic_mat_inv.set_shape([self.num_scales, 3, 3])
with tf.name_scope('batching'):
image_stack, intrinsic_mat, intrinsic_mat_inv = (
tf.train.shuffle_batch(
[image_stack, intrinsic_mat, intrinsic_mat_inv],
batch_size=self.batch_size,
capacity=QUEUE_SIZE + QUEUE_BUFFER * self.batch_size,
min_after_dequeue=QUEUE_SIZE))
logging.info('image_stack: %s', util.info(image_stack))
return image_stack, intrinsic_mat, intrinsic_mat_inv
def unpack_images(self, image_seq):
"""[h, w * seq_length, 3] -> [h, w, 3 * seq_length]."""
with tf.name_scope('unpack_images'):
image_list = [
image_seq[:, i * self.img_width:(i + 1) * self.img_width, :]
for i in range(self.seq_length)
]
image_stack = tf.concat(image_list, axis=2)
image_stack.set_shape(
[self.img_height, self.img_width, self.seq_length * 3])
return image_stack
@classmethod
def preprocess_image(cls, image):
# Convert from uint8 to float.
return tf.image.convert_image_dtype(image, dtype=tf.float32)
# Source: https://github.com/mrharicot/monodepth.
@classmethod
def augment_image_colorspace(cls, image_seq):
"""Apply data augmentation to inputs."""
# Randomly shift gamma.
random_gamma = tf.random_uniform([], 0.8, 1.2)
image_seq_aug = image_seq**random_gamma
# Randomly shift brightness.
random_brightness = tf.random_uniform([], 0.5, 2.0)
image_seq_aug *= random_brightness
# Randomly shift color.
random_colors = tf.random_uniform([3], 0.8, 1.2)
white = tf.ones([tf.shape(image_seq)[0], tf.shape(image_seq)[1]])
color_image = tf.stack([white * random_colors[i] for i in range(3)], axis=2)
image_seq_aug *= color_image
# Saturate.
image_seq_aug = tf.clip_by_value(image_seq_aug, 0, 1)
return image_seq_aug
@classmethod
def augment_images_scale_crop(cls, im, intrinsics, out_h, out_w):
"""Randomly scales and crops image."""
def scale_randomly(im, intrinsics):
"""Scales image and adjust intrinsics accordingly."""
in_h, in_w, _ = im.get_shape().as_list()
scaling = tf.random_uniform([2], 1, 1.15)
x_scaling = scaling[0]
y_scaling = scaling[1]
out_h = tf.cast(in_h * y_scaling, dtype=tf.int32)
out_w = tf.cast(in_w * x_scaling, dtype=tf.int32)
# Add batch.
im = tf.expand_dims(im, 0)
im = tf.image.resize_area(im, [out_h, out_w])
im = im[0]
fx = intrinsics[0, 0] * x_scaling
fy = intrinsics[1, 1] * y_scaling
cx = intrinsics[0, 2] * x_scaling
cy = intrinsics[1, 2] * y_scaling
intrinsics = cls.make_intrinsics_matrix(fx, fy, cx, cy)
return im, intrinsics
# Random cropping
def crop_randomly(im, intrinsics, out_h, out_w):
"""Crops image and adjust intrinsics accordingly."""
# batch_size, in_h, in_w, _ = im.get_shape().as_list()
in_h, in_w, _ = tf.unstack(tf.shape(im))
offset_y = tf.random_uniform([1], 0, in_h - out_h + 1, dtype=tf.int32)[0]
offset_x = tf.random_uniform([1], 0, in_w - out_w + 1, dtype=tf.int32)[0]
im = tf.image.crop_to_bounding_box(im, offset_y, offset_x, out_h, out_w)
fx = intrinsics[0, 0]
fy = intrinsics[1, 1]
cx = intrinsics[0, 2] - tf.cast(offset_x, dtype=tf.float32)
cy = intrinsics[1, 2] - tf.cast(offset_y, dtype=tf.float32)
intrinsics = cls.make_intrinsics_matrix(fx, fy, cx, cy)
return im, intrinsics
im, intrinsics = scale_randomly(im, intrinsics)
im, intrinsics = crop_randomly(im, intrinsics, out_h, out_w)
return im, intrinsics
def compile_file_list(self, data_dir, split, load_pose=False):
"""Creates a list of input files."""
logging.info('data_dir: %s', data_dir)
with gfile.Open(os.path.join(data_dir, '%s.txt' % split), 'r') as f:
frames = f.readlines()
subfolders = [x.split(' ')[0] for x in frames]
frame_ids = [x.split(' ')[1][:-1] for x in frames]
image_file_list = [
os.path.join(data_dir, subfolders[i], frame_ids[i] + '.jpg')
for i in range(len(frames))
]
cam_file_list = [
os.path.join(data_dir, subfolders[i], frame_ids[i] + '_cam.txt')
for i in range(len(frames))
]
file_lists = {}
file_lists['image_file_list'] = image_file_list
file_lists['cam_file_list'] = cam_file_list
if load_pose:
pose_file_list = [
os.path.join(data_dir, subfolders[i], frame_ids[i] + '_pose.txt')
for i in range(len(frames))
]
file_lists['pose_file_list'] = pose_file_list
self.steps_per_epoch = len(image_file_list) // self.batch_size
return file_lists
@classmethod
def make_intrinsics_matrix(cls, fx, fy, cx, cy):
r1 = tf.stack([fx, 0, cx])
r2 = tf.stack([0, fy, cy])
r3 = tf.constant([0., 0., 1.])
intrinsics = tf.stack([r1, r2, r3])
return intrinsics
@classmethod
def get_multi_scale_intrinsics(cls, intrinsics, num_scales):
"""Returns multiple intrinsic matrices for different scales."""
intrinsics_multi_scale = []
# Scale the intrinsics accordingly for each scale
for s in range(num_scales):
fx = intrinsics[0, 0] / (2**s)
fy = intrinsics[1, 1] / (2**s)
cx = intrinsics[0, 2] / (2**s)
cy = intrinsics[1, 2] / (2**s)
intrinsics_multi_scale.append(cls.make_intrinsics_matrix(fx, fy, cx, cy))
intrinsics_multi_scale = tf.stack(intrinsics_multi_scale)
return intrinsics_multi_scale
| 8,847 | 38.324444 | 80 | py |
models | models-master/research/vid2depth/project.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Geometry utilities for projecting frames based on depth and motion.
Modified from Spatial Transformer Networks:
https://github.com/tensorflow/models/blob/master/transformer/spatial_transformer.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
import tensorflow as tf
def inverse_warp(img, depth, egomotion, intrinsic_mat, intrinsic_mat_inv):
"""Inverse warp a source image to the target image plane.
Args:
img: The source image (to sample pixels from) -- [B, H, W, 3].
depth: Depth map of the target image -- [B, H, W].
egomotion: 6DoF egomotion vector from target to source -- [B, 6].
intrinsic_mat: Camera intrinsic matrix -- [B, 3, 3].
intrinsic_mat_inv: Inverse of the intrinsic matrix -- [B, 3, 3].
Returns:
Projected source image
"""
dims = tf.shape(img)
batch_size, img_height, img_width = dims[0], dims[1], dims[2]
depth = tf.reshape(depth, [batch_size, 1, img_height * img_width])
grid = _meshgrid_abs(img_height, img_width)
grid = tf.tile(tf.expand_dims(grid, 0), [batch_size, 1, 1])
cam_coords = _pixel2cam(depth, grid, intrinsic_mat_inv)
ones = tf.ones([batch_size, 1, img_height * img_width])
cam_coords_hom = tf.concat([cam_coords, ones], axis=1)
egomotion_mat = _egomotion_vec2mat(egomotion, batch_size)
# Get projection matrix for target camera frame to source pixel frame
hom_filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
hom_filler = tf.tile(hom_filler, [batch_size, 1, 1])
intrinsic_mat_hom = tf.concat(
[intrinsic_mat, tf.zeros([batch_size, 3, 1])], axis=2)
intrinsic_mat_hom = tf.concat([intrinsic_mat_hom, hom_filler], axis=1)
proj_target_cam_to_source_pixel = tf.matmul(intrinsic_mat_hom, egomotion_mat)
source_pixel_coords = _cam2pixel(cam_coords_hom,
proj_target_cam_to_source_pixel)
source_pixel_coords = tf.reshape(source_pixel_coords,
[batch_size, 2, img_height, img_width])
source_pixel_coords = tf.transpose(source_pixel_coords, perm=[0, 2, 3, 1])
projected_img, mask = _spatial_transformer(img, source_pixel_coords)
return projected_img, mask
def _pixel2cam(depth, pixel_coords, intrinsic_mat_inv):
"""Transform coordinates in the pixel frame to the camera frame."""
cam_coords = tf.matmul(intrinsic_mat_inv, pixel_coords) * depth
return cam_coords
def _cam2pixel(cam_coords, proj_c2p):
"""Transform coordinates in the camera frame to the pixel frame."""
pcoords = tf.matmul(proj_c2p, cam_coords)
x = tf.slice(pcoords, [0, 0, 0], [-1, 1, -1])
y = tf.slice(pcoords, [0, 1, 0], [-1, 1, -1])
z = tf.slice(pcoords, [0, 2, 0], [-1, 1, -1])
# Not tested if adding a small number is necessary
x_norm = x / (z + 1e-10)
y_norm = y / (z + 1e-10)
pixel_coords = tf.concat([x_norm, y_norm], axis=1)
return pixel_coords
def _meshgrid_abs(height, width):
"""Meshgrid in the absolute coordinates."""
x_t = tf.matmul(
tf.ones(shape=tf.stack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(
tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
tf.ones(shape=tf.stack([1, width])))
x_t = (x_t + 1.0) * 0.5 * tf.cast(width - 1, tf.float32)
y_t = (y_t + 1.0) * 0.5 * tf.cast(height - 1, tf.float32)
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat([x_t_flat, y_t_flat, ones], axis=0)
return grid
def _euler2mat(z, y, x):
"""Converts euler angles to rotation matrix.
From:
https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174
TODO: Remove the dimension for 'N' (deprecated for converting all source
poses altogether).
Args:
z: rotation angle along z axis (in radians) -- size = [B, n]
y: rotation angle along y axis (in radians) -- size = [B, n]
x: rotation angle along x axis (in radians) -- size = [B, n]
Returns:
Rotation matrix corresponding to the euler angles, with shape [B, n, 3, 3].
"""
batch_size = tf.shape(z)[0]
n = 1
z = tf.clip_by_value(z, -np.pi, np.pi)
y = tf.clip_by_value(y, -np.pi, np.pi)
x = tf.clip_by_value(x, -np.pi, np.pi)
# Expand to B x N x 1 x 1
z = tf.expand_dims(tf.expand_dims(z, -1), -1)
y = tf.expand_dims(tf.expand_dims(y, -1), -1)
x = tf.expand_dims(tf.expand_dims(x, -1), -1)
zeros = tf.zeros([batch_size, n, 1, 1])
ones = tf.ones([batch_size, n, 1, 1])
cosz = tf.cos(z)
sinz = tf.sin(z)
rotz_1 = tf.concat([cosz, -sinz, zeros], axis=3)
rotz_2 = tf.concat([sinz, cosz, zeros], axis=3)
rotz_3 = tf.concat([zeros, zeros, ones], axis=3)
zmat = tf.concat([rotz_1, rotz_2, rotz_3], axis=2)
cosy = tf.cos(y)
siny = tf.sin(y)
roty_1 = tf.concat([cosy, zeros, siny], axis=3)
roty_2 = tf.concat([zeros, ones, zeros], axis=3)
roty_3 = tf.concat([-siny, zeros, cosy], axis=3)
ymat = tf.concat([roty_1, roty_2, roty_3], axis=2)
cosx = tf.cos(x)
sinx = tf.sin(x)
rotx_1 = tf.concat([ones, zeros, zeros], axis=3)
rotx_2 = tf.concat([zeros, cosx, -sinx], axis=3)
rotx_3 = tf.concat([zeros, sinx, cosx], axis=3)
xmat = tf.concat([rotx_1, rotx_2, rotx_3], axis=2)
return tf.matmul(tf.matmul(xmat, ymat), zmat)
def _egomotion_vec2mat(vec, batch_size):
"""Converts 6DoF transform vector to transformation matrix.
Args:
vec: 6DoF parameters [tx, ty, tz, rx, ry, rz] -- [B, 6].
batch_size: Batch size.
Returns:
A transformation matrix -- [B, 4, 4].
"""
translation = tf.slice(vec, [0, 0], [-1, 3])
translation = tf.expand_dims(translation, -1)
rx = tf.slice(vec, [0, 3], [-1, 1])
ry = tf.slice(vec, [0, 4], [-1, 1])
rz = tf.slice(vec, [0, 5], [-1, 1])
rot_mat = _euler2mat(rz, ry, rx)
rot_mat = tf.squeeze(rot_mat, squeeze_dims=[1])
filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
filler = tf.tile(filler, [batch_size, 1, 1])
transform_mat = tf.concat([rot_mat, translation], axis=2)
transform_mat = tf.concat([transform_mat, filler], axis=1)
return transform_mat
def _bilinear_sampler(im, x, y, name='blinear_sampler'):
"""Perform bilinear sampling on im given list of x, y coordinates.
Implements the differentiable sampling mechanism with bilinear kernel
in https://arxiv.org/abs/1506.02025.
x,y are tensors specifying normalized coordinates [-1, 1] to be sampled on im.
For example, (-1, -1) in (x, y) corresponds to pixel location (0, 0) in im,
and (1, 1) in (x, y) corresponds to the bottom right pixel in im.
Args:
im: Batch of images with shape [B, h, w, channels].
x: Tensor of normalized x coordinates in [-1, 1], with shape [B, h, w, 1].
y: Tensor of normalized y coordinates in [-1, 1], with shape [B, h, w, 1].
name: Name scope for ops.
Returns:
Sampled image with shape [B, h, w, channels].
Principled mask with shape [B, h, w, 1], dtype:float32. A value of 1.0
in the mask indicates that the corresponding coordinate in the sampled
image is valid.
"""
with tf.variable_scope(name):
x = tf.reshape(x, [-1])
y = tf.reshape(y, [-1])
# Constants.
batch_size = tf.shape(im)[0]
_, height, width, channels = im.get_shape().as_list()
x = tf.to_float(x)
y = tf.to_float(y)
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
zero = tf.constant(0, dtype=tf.int32)
max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
# Scale indices from [-1, 1] to [0, width - 1] or [0, height - 1].
x = (x + 1.0) * (width_f - 1.0) / 2.0
y = (y + 1.0) * (height_f - 1.0) / 2.0
# Compute the coordinates of the 4 pixels to sample from.
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
mask = tf.logical_and(
tf.logical_and(x0 >= zero, x1 <= max_x),
tf.logical_and(y0 >= zero, y1 <= max_y))
mask = tf.to_float(mask)
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = width * height
# Create base index.
base = tf.range(batch_size) * dim1
base = tf.reshape(base, [-1, 1])
base = tf.tile(base, [1, height * width])
base = tf.reshape(base, [-1])
base_y0 = base + y0 * dim2
base_y1 = base + y1 * dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# Use indices to lookup pixels in the flat image and restore channels dim.
im_flat = tf.reshape(im, tf.stack([-1, channels]))
im_flat = tf.to_float(im_flat)
pixel_a = tf.gather(im_flat, idx_a)
pixel_b = tf.gather(im_flat, idx_b)
pixel_c = tf.gather(im_flat, idx_c)
pixel_d = tf.gather(im_flat, idx_d)
x1_f = tf.to_float(x1)
y1_f = tf.to_float(y1)
# And finally calculate interpolated values.
wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1)
wb = tf.expand_dims((x1_f - x) * (1.0 - (y1_f - y)), 1)
wc = tf.expand_dims(((1.0 - (x1_f - x)) * (y1_f - y)), 1)
wd = tf.expand_dims(((1.0 - (x1_f - x)) * (1.0 - (y1_f - y))), 1)
output = tf.add_n([wa * pixel_a, wb * pixel_b, wc * pixel_c, wd * pixel_d])
output = tf.reshape(output, tf.stack([batch_size, height, width, channels]))
mask = tf.reshape(mask, tf.stack([batch_size, height, width, 1]))
return output, mask
def _spatial_transformer(img, coords):
"""A wrapper over binlinear_sampler(), taking absolute coords as input."""
img_height = tf.cast(tf.shape(img)[1], tf.float32)
img_width = tf.cast(tf.shape(img)[2], tf.float32)
px = coords[:, :, :, :1]
py = coords[:, :, :, 1:]
# Normalize coordinates to [-1, 1] to send to _bilinear_sampler.
px = px / (img_width - 1) * 2.0 - 1.0
py = py / (img_height - 1) * 2.0 - 1.0
output_img, mask = _bilinear_sampler(img, px, py)
return output_img, mask
def get_cloud(depth, intrinsics_inv, name=None): # pylint: disable=unused-argument
"""Convert depth map to 3D point cloud."""
with tf.name_scope(name):
dims = depth.shape.as_list()
batch_size, img_height, img_width = dims[0], dims[1], dims[2]
depth = tf.reshape(depth, [batch_size, 1, img_height * img_width])
grid = _meshgrid_abs(img_height, img_width)
grid = tf.tile(tf.expand_dims(grid, 0), [batch_size, 1, 1])
cam_coords = _pixel2cam(depth, grid, intrinsics_inv)
cam_coords = tf.transpose(cam_coords, [0, 2, 1])
cam_coords = tf.reshape(cam_coords, [batch_size, img_height, img_width, 3])
logging.info('depth -> cloud: %s', cam_coords)
return cam_coords
| 11,467 | 36.477124 | 83 | py |
models | models-master/research/vid2depth/model.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build model for inference or training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import nets
from ops import icp_grad # pylint: disable=unused-import
from ops.icp_op import icp
import project
import reader
import tensorflow as tf
import util
gfile = tf.gfile
slim = tf.contrib.slim
NUM_SCALES = 4
class Model(object):
"""Model code from SfMLearner."""
def __init__(self,
data_dir=None,
is_training=True,
learning_rate=0.0002,
beta1=0.9,
reconstr_weight=0.85,
smooth_weight=0.05,
ssim_weight=0.15,
icp_weight=0.0,
batch_size=4,
img_height=128,
img_width=416,
seq_length=3,
legacy_mode=False):
self.data_dir = data_dir
self.is_training = is_training
self.learning_rate = learning_rate
self.reconstr_weight = reconstr_weight
self.smooth_weight = smooth_weight
self.ssim_weight = ssim_weight
self.icp_weight = icp_weight
self.beta1 = beta1
self.batch_size = batch_size
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.legacy_mode = legacy_mode
logging.info('data_dir: %s', data_dir)
logging.info('learning_rate: %s', learning_rate)
logging.info('beta1: %s', beta1)
logging.info('smooth_weight: %s', smooth_weight)
logging.info('ssim_weight: %s', ssim_weight)
logging.info('icp_weight: %s', icp_weight)
logging.info('batch_size: %s', batch_size)
logging.info('img_height: %s', img_height)
logging.info('img_width: %s', img_width)
logging.info('seq_length: %s', seq_length)
logging.info('legacy_mode: %s', legacy_mode)
if self.is_training:
self.reader = reader.DataReader(self.data_dir, self.batch_size,
self.img_height, self.img_width,
self.seq_length, NUM_SCALES)
self.build_train_graph()
else:
self.build_depth_test_graph()
self.build_egomotion_test_graph()
# At this point, the model is ready. Print some info on model params.
util.count_parameters()
def build_train_graph(self):
self.build_inference_for_training()
self.build_loss()
self.build_train_op()
self.build_summaries()
def build_inference_for_training(self):
"""Invokes depth and ego-motion networks and computes clouds if needed."""
(self.image_stack, self.intrinsic_mat, self.intrinsic_mat_inv) = (
self.reader.read_data())
with tf.name_scope('egomotion_prediction'):
self.egomotion, _ = nets.egomotion_net(self.image_stack, is_training=True,
legacy_mode=self.legacy_mode)
with tf.variable_scope('depth_prediction'):
# Organized by ...[i][scale]. Note that the order is flipped in
# variables in build_loss() below.
self.disp = {}
self.depth = {}
if self.icp_weight > 0:
self.cloud = {}
for i in range(self.seq_length):
image = self.image_stack[:, :, :, 3 * i:3 * (i + 1)]
multiscale_disps_i, _ = nets.disp_net(image, is_training=True)
multiscale_depths_i = [1.0 / d for d in multiscale_disps_i]
self.disp[i] = multiscale_disps_i
self.depth[i] = multiscale_depths_i
if self.icp_weight > 0:
multiscale_clouds_i = [
project.get_cloud(d,
self.intrinsic_mat_inv[:, s, :, :],
name='cloud%d_%d' % (s, i))
for (s, d) in enumerate(multiscale_depths_i)
]
self.cloud[i] = multiscale_clouds_i
# Reuse the same depth graph for all images.
tf.get_variable_scope().reuse_variables()
logging.info('disp: %s', util.info(self.disp))
def build_loss(self):
"""Adds ops for computing loss."""
with tf.name_scope('compute_loss'):
self.reconstr_loss = 0
self.smooth_loss = 0
self.ssim_loss = 0
self.icp_transform_loss = 0
self.icp_residual_loss = 0
# self.images is organized by ...[scale][B, h, w, seq_len * 3].
self.images = [{} for _ in range(NUM_SCALES)]
# Following nested lists are organized by ...[scale][source-target].
self.warped_image = [{} for _ in range(NUM_SCALES)]
self.warp_mask = [{} for _ in range(NUM_SCALES)]
self.warp_error = [{} for _ in range(NUM_SCALES)]
self.ssim_error = [{} for _ in range(NUM_SCALES)]
self.icp_transform = [{} for _ in range(NUM_SCALES)]
self.icp_residual = [{} for _ in range(NUM_SCALES)]
self.middle_frame_index = util.get_seq_middle(self.seq_length)
# Compute losses at each scale.
for s in range(NUM_SCALES):
# Scale image stack.
height_s = int(self.img_height / (2**s))
width_s = int(self.img_width / (2**s))
self.images[s] = tf.image.resize_area(self.image_stack,
[height_s, width_s])
# Smoothness.
if self.smooth_weight > 0:
for i in range(self.seq_length):
# In legacy mode, use the depth map from the middle frame only.
if not self.legacy_mode or i == self.middle_frame_index:
self.smooth_loss += 1.0 / (2**s) * self.depth_smoothness(
self.disp[i][s], self.images[s][:, :, :, 3 * i:3 * (i + 1)])
for i in range(self.seq_length):
for j in range(self.seq_length):
# Only consider adjacent frames.
if i == j or abs(i - j) != 1:
continue
# In legacy mode, only consider the middle frame as target.
if self.legacy_mode and j != self.middle_frame_index:
continue
source = self.images[s][:, :, :, 3 * i:3 * (i + 1)]
target = self.images[s][:, :, :, 3 * j:3 * (j + 1)]
target_depth = self.depth[j][s]
key = '%d-%d' % (i, j)
# Extract ego-motion from i to j
egomotion_index = min(i, j)
egomotion_mult = 1
if i > j:
# Need to inverse egomotion when going back in sequence.
egomotion_mult *= -1
# For compatiblity with SfMLearner, interpret all egomotion vectors
# as pointing toward the middle frame. Note that unlike SfMLearner,
# each vector captures the motion to/from its next frame, and not
# the center frame. Although with seq_length == 3, there is no
# difference.
if self.legacy_mode:
if egomotion_index >= self.middle_frame_index:
egomotion_mult *= -1
egomotion = egomotion_mult * self.egomotion[:, egomotion_index, :]
# Inverse warp the source image to the target image frame for
# photometric consistency loss.
self.warped_image[s][key], self.warp_mask[s][key] = (
project.inverse_warp(source,
target_depth,
egomotion,
self.intrinsic_mat[:, s, :, :],
self.intrinsic_mat_inv[:, s, :, :]))
# Reconstruction loss.
self.warp_error[s][key] = tf.abs(self.warped_image[s][key] - target)
self.reconstr_loss += tf.reduce_mean(
self.warp_error[s][key] * self.warp_mask[s][key])
# SSIM.
if self.ssim_weight > 0:
self.ssim_error[s][key] = self.ssim(self.warped_image[s][key],
target)
# TODO(rezama): This should be min_pool2d().
ssim_mask = slim.avg_pool2d(self.warp_mask[s][key], 3, 1, 'VALID')
self.ssim_loss += tf.reduce_mean(
self.ssim_error[s][key] * ssim_mask)
# 3D loss.
if self.icp_weight > 0:
cloud_a = self.cloud[j][s]
cloud_b = self.cloud[i][s]
self.icp_transform[s][key], self.icp_residual[s][key] = icp(
cloud_a, egomotion, cloud_b)
self.icp_transform_loss += 1.0 / (2**s) * tf.reduce_mean(
tf.abs(self.icp_transform[s][key]))
self.icp_residual_loss += 1.0 / (2**s) * tf.reduce_mean(
tf.abs(self.icp_residual[s][key]))
self.total_loss = self.reconstr_weight * self.reconstr_loss
if self.smooth_weight > 0:
self.total_loss += self.smooth_weight * self.smooth_loss
if self.ssim_weight > 0:
self.total_loss += self.ssim_weight * self.ssim_loss
if self.icp_weight > 0:
self.total_loss += self.icp_weight * (self.icp_transform_loss +
self.icp_residual_loss)
def gradient_x(self, img):
return img[:, :, :-1, :] - img[:, :, 1:, :]
def gradient_y(self, img):
return img[:, :-1, :, :] - img[:, 1:, :, :]
def depth_smoothness(self, depth, img):
"""Computes image-aware depth smoothness loss."""
depth_dx = self.gradient_x(depth)
depth_dy = self.gradient_y(depth)
image_dx = self.gradient_x(img)
image_dy = self.gradient_y(img)
weights_x = tf.exp(-tf.reduce_mean(tf.abs(image_dx), 3, keepdims=True))
weights_y = tf.exp(-tf.reduce_mean(tf.abs(image_dy), 3, keepdims=True))
smoothness_x = depth_dx * weights_x
smoothness_y = depth_dy * weights_y
return tf.reduce_mean(abs(smoothness_x)) + tf.reduce_mean(abs(smoothness_y))
def ssim(self, x, y):
"""Computes a differentiable structured image similarity measure."""
c1 = 0.01**2
c2 = 0.03**2
mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')
mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')
sigma_x = slim.avg_pool2d(x**2, 3, 1, 'VALID') - mu_x**2
sigma_y = slim.avg_pool2d(y**2, 3, 1, 'VALID') - mu_y**2
sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y
ssim_n = (2 * mu_x * mu_y + c1) * (2 * sigma_xy + c2)
ssim_d = (mu_x**2 + mu_y**2 + c1) * (sigma_x + sigma_y + c2)
ssim = ssim_n / ssim_d
return tf.clip_by_value((1 - ssim) / 2, 0, 1)
def build_train_op(self):
with tf.name_scope('train_op'):
optim = tf.train.AdamOptimizer(self.learning_rate, self.beta1)
self.train_op = slim.learning.create_train_op(self.total_loss, optim)
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.incr_global_step = tf.assign(self.global_step, self.global_step + 1)
def build_summaries(self):
"""Adds scalar and image summaries for TensorBoard."""
tf.summary.scalar('total_loss', self.total_loss)
tf.summary.scalar('reconstr_loss', self.reconstr_loss)
if self.smooth_weight > 0:
tf.summary.scalar('smooth_loss', self.smooth_loss)
if self.ssim_weight > 0:
tf.summary.scalar('ssim_loss', self.ssim_loss)
if self.icp_weight > 0:
tf.summary.scalar('icp_transform_loss', self.icp_transform_loss)
tf.summary.scalar('icp_residual_loss', self.icp_residual_loss)
for i in range(self.seq_length - 1):
tf.summary.histogram('tx%d' % i, self.egomotion[:, i, 0])
tf.summary.histogram('ty%d' % i, self.egomotion[:, i, 1])
tf.summary.histogram('tz%d' % i, self.egomotion[:, i, 2])
tf.summary.histogram('rx%d' % i, self.egomotion[:, i, 3])
tf.summary.histogram('ry%d' % i, self.egomotion[:, i, 4])
tf.summary.histogram('rz%d' % i, self.egomotion[:, i, 5])
for s in range(NUM_SCALES):
for i in range(self.seq_length):
tf.summary.image('scale%d_image%d' % (s, i),
self.images[s][:, :, :, 3 * i:3 * (i + 1)])
if i in self.depth:
tf.summary.histogram('scale%d_depth%d' % (s, i), self.depth[i][s])
tf.summary.histogram('scale%d_disp%d' % (s, i), self.disp[i][s])
tf.summary.image('scale%d_disparity%d' % (s, i), self.disp[i][s])
for key in self.warped_image[s]:
tf.summary.image('scale%d_warped_image%s' % (s, key),
self.warped_image[s][key])
tf.summary.image('scale%d_warp_mask%s' % (s, key),
self.warp_mask[s][key])
tf.summary.image('scale%d_warp_error%s' % (s, key),
self.warp_error[s][key])
if self.ssim_weight > 0:
tf.summary.image('scale%d_ssim_error%s' % (s, key),
self.ssim_error[s][key])
if self.icp_weight > 0:
tf.summary.image('scale%d_icp_residual%s' % (s, key),
self.icp_residual[s][key])
transform = self.icp_transform[s][key]
tf.summary.histogram('scale%d_icp_tx%s' % (s, key), transform[:, 0])
tf.summary.histogram('scale%d_icp_ty%s' % (s, key), transform[:, 1])
tf.summary.histogram('scale%d_icp_tz%s' % (s, key), transform[:, 2])
tf.summary.histogram('scale%d_icp_rx%s' % (s, key), transform[:, 3])
tf.summary.histogram('scale%d_icp_ry%s' % (s, key), transform[:, 4])
tf.summary.histogram('scale%d_icp_rz%s' % (s, key), transform[:, 5])
def build_depth_test_graph(self):
"""Builds depth model reading from placeholders."""
with tf.name_scope('depth_prediction'):
with tf.variable_scope('depth_prediction'):
input_uint8 = tf.placeholder(
tf.uint8, [self.batch_size, self.img_height, self.img_width, 3],
name='raw_input')
input_float = tf.image.convert_image_dtype(input_uint8, tf.float32)
# TODO(rezama): Retrain published model with batchnorm params and set
# is_training to False.
est_disp, _ = nets.disp_net(input_float, is_training=True)
est_depth = 1.0 / est_disp[0]
self.inputs_depth = input_uint8
self.est_depth = est_depth
def build_egomotion_test_graph(self):
"""Builds egomotion model reading from placeholders."""
input_uint8 = tf.placeholder(
tf.uint8,
[self.batch_size, self.img_height, self.img_width * self.seq_length, 3],
name='raw_input')
input_float = tf.image.convert_image_dtype(input_uint8, tf.float32)
image_seq = input_float
image_stack = self.unpack_image_batches(image_seq)
with tf.name_scope('egomotion_prediction'):
# TODO(rezama): Retrain published model with batchnorm params and set
# is_training to False.
egomotion, _ = nets.egomotion_net(image_stack, is_training=True,
legacy_mode=self.legacy_mode)
self.inputs_egomotion = input_uint8
self.est_egomotion = egomotion
def unpack_image_batches(self, image_seq):
"""[B, h, w * seq_length, 3] -> [B, h, w, 3 * seq_length]."""
with tf.name_scope('unpack_images'):
image_list = [
image_seq[:, :, i * self.img_width:(i + 1) * self.img_width, :]
for i in range(self.seq_length)
]
image_stack = tf.concat(image_list, axis=3)
image_stack.set_shape([
self.batch_size, self.img_height, self.img_width, self.seq_length * 3
])
return image_stack
def inference(self, inputs, sess, mode):
"""Runs depth or egomotion inference from placeholders."""
fetches = {}
if mode == 'depth':
fetches['depth'] = self.est_depth
inputs_ph = self.inputs_depth
if mode == 'egomotion':
fetches['egomotion'] = self.est_egomotion
inputs_ph = self.inputs_egomotion
results = sess.run(fetches, feed_dict={inputs_ph: inputs})
return results
| 16,378 | 41.876963 | 80 | py |
models | models-master/research/vid2depth/util.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common flags and functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import locale
import os
from absl import logging
import numpy as np
import tensorflow as tf
def get_seq_middle(seq_length):
"""Returns relative index for the middle frame in sequence."""
half_offset = int((seq_length - 1) / 2)
return seq_length - 1 - half_offset
def info(obj):
"""Return info on shape and dtype of a numpy array or TensorFlow tensor."""
if obj is None:
return 'None.'
elif isinstance(obj, list):
if obj:
return 'List of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty list.'
elif isinstance(obj, tuple):
if obj:
return 'Tuple of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty tuple.'
else:
if is_a_numpy_array(obj):
return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)
else:
return str(obj)
def is_a_numpy_array(obj):
"""Returns true if obj is a numpy array."""
return type(obj).__module__ == np.__name__
def count_parameters(also_print=True):
"""Cound the number of parameters in the model.
Args:
also_print: Boolean. If True also print the numbers.
Returns:
The total number of parameters.
"""
total = 0
if also_print:
logging.info('Model Parameters:')
for v in get_vars_to_restore():
shape = v.get_shape()
if also_print:
logging.info('%s %s: %s', v.op.name, shape,
format_number(shape.num_elements()))
total += shape.num_elements()
if also_print:
logging.info('Total: %s', format_number(total))
return total
def get_vars_to_restore(ckpt=None):
"""Returns list of variables that should be saved/restored.
Args:
ckpt: Path to existing checkpoint. If present, returns only the subset of
variables that exist in given checkpoint.
Returns:
List of all variables that need to be saved/restored.
"""
model_vars = tf.trainable_variables()
# Add batchnorm variables.
bn_vars = [v for v in tf.global_variables()
if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name]
model_vars.extend(bn_vars)
model_vars = sorted(model_vars, key=lambda x: x.op.name)
if ckpt is not None:
ckpt_var_names = tf.contrib.framework.list_variables(ckpt)
ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names]
for v in model_vars:
if v.op.name not in ckpt_var_names:
logging.warn('Missing var %s in checkpoint: %s', v.op.name,
os.path.basename(ckpt))
model_vars = [v for v in model_vars if v.op.name in ckpt_var_names]
return model_vars
def format_number(n):
"""Formats number with thousands commas."""
locale.setlocale(locale.LC_ALL, 'en_US')
return locale.format('%d', n, grouping=True)
def read_text_lines(filepath):
with open(filepath, 'r') as f:
lines = f.readlines()
lines = [l.rstrip() for l in lines]
return lines
| 3,698 | 29.319672 | 80 | py |
models | models-master/research/vid2depth/nets.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Depth and Ego-Motion networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import numpy as np
import tensorflow as tf
import util
slim = tf.contrib.slim
# TODO(rezama): Move flag to main, pass as argument to functions.
flags.DEFINE_bool('use_bn', True, 'Add batch norm layers.')
FLAGS = flags.FLAGS
# Weight regularization.
WEIGHT_REG = 0.05
# Disparity (inverse depth) values range from 0.01 to 10.
DISP_SCALING = 10
MIN_DISP = 0.01
EGOMOTION_VEC_SIZE = 6
def egomotion_net(image_stack, is_training=True, legacy_mode=False):
"""Predict ego-motion vectors from a stack of frames.
Args:
image_stack: Input tensor with shape [B, h, w, seq_length * 3]. Regardless
of the value of legacy_mode, the input image sequence passed to the
function should be in normal order, e.g. [1, 2, 3].
is_training: Whether the model is being trained or not.
legacy_mode: Setting legacy_mode to True enables compatibility with
SfMLearner checkpoints. When legacy_mode is on, egomotion_net()
rearranges the input tensor to place the target (middle) frame first in
sequence. This is the arrangement of inputs that legacy models have
received during training. In legacy mode, the client program
(model.Model.build_loss()) interprets the outputs of this network
differently as well. For example:
When legacy_mode == True,
Network inputs will be [2, 1, 3]
Network outputs will be [1 -> 2, 3 -> 2]
When legacy_mode == False,
Network inputs will be [1, 2, 3]
Network outputs will be [1 -> 2, 2 -> 3]
Returns:
Egomotion vectors with shape [B, seq_length - 1, 6].
"""
seq_length = image_stack.get_shape()[3].value // 3 # 3 == RGB.
if legacy_mode:
# Put the target frame at the beginning of stack.
with tf.name_scope('rearrange_stack'):
mid_index = util.get_seq_middle(seq_length)
left_subset = image_stack[:, :, :, :mid_index * 3]
target_frame = image_stack[:, :, :, mid_index * 3:(mid_index + 1) * 3]
right_subset = image_stack[:, :, :, (mid_index + 1) * 3:]
image_stack = tf.concat([target_frame, left_subset, right_subset], axis=3)
batch_norm_params = {'is_training': is_training}
num_egomotion_vecs = seq_length - 1
with tf.variable_scope('pose_exp_net') as sc:
end_points_collection = sc.original_name_scope + '_end_points'
normalizer_fn = slim.batch_norm if FLAGS.use_bn else None
normalizer_params = batch_norm_params if FLAGS.use_bn else None
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
normalizer_fn=normalizer_fn,
weights_regularizer=slim.l2_regularizer(WEIGHT_REG),
normalizer_params=normalizer_params,
activation_fn=tf.nn.relu,
outputs_collections=end_points_collection):
cnv1 = slim.conv2d(image_stack, 16, [7, 7], stride=2, scope='cnv1')
cnv2 = slim.conv2d(cnv1, 32, [5, 5], stride=2, scope='cnv2')
cnv3 = slim.conv2d(cnv2, 64, [3, 3], stride=2, scope='cnv3')
cnv4 = slim.conv2d(cnv3, 128, [3, 3], stride=2, scope='cnv4')
cnv5 = slim.conv2d(cnv4, 256, [3, 3], stride=2, scope='cnv5')
# Ego-motion specific layers
with tf.variable_scope('pose'):
cnv6 = slim.conv2d(cnv5, 256, [3, 3], stride=2, scope='cnv6')
cnv7 = slim.conv2d(cnv6, 256, [3, 3], stride=2, scope='cnv7')
pred_channels = EGOMOTION_VEC_SIZE * num_egomotion_vecs
egomotion_pred = slim.conv2d(cnv7,
pred_channels,
[1, 1],
scope='pred',
stride=1,
normalizer_fn=None,
activation_fn=None)
egomotion_avg = tf.reduce_mean(egomotion_pred, [1, 2])
# Tinghui found that scaling by a small constant facilitates training.
egomotion_final = 0.01 * tf.reshape(
egomotion_avg, [-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE])
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
return egomotion_final, end_points
def disp_net(target_image, is_training=True):
"""Predict inverse of depth from a single image."""
batch_norm_params = {'is_training': is_training}
h = target_image.get_shape()[1].value
w = target_image.get_shape()[2].value
inputs = target_image
with tf.variable_scope('depth_net') as sc:
end_points_collection = sc.original_name_scope + '_end_points'
normalizer_fn = slim.batch_norm if FLAGS.use_bn else None
normalizer_params = batch_norm_params if FLAGS.use_bn else None
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_regularizer=slim.l2_regularizer(WEIGHT_REG),
activation_fn=tf.nn.relu,
outputs_collections=end_points_collection):
cnv1 = slim.conv2d(inputs, 32, [7, 7], stride=2, scope='cnv1')
cnv1b = slim.conv2d(cnv1, 32, [7, 7], stride=1, scope='cnv1b')
cnv2 = slim.conv2d(cnv1b, 64, [5, 5], stride=2, scope='cnv2')
cnv2b = slim.conv2d(cnv2, 64, [5, 5], stride=1, scope='cnv2b')
cnv3 = slim.conv2d(cnv2b, 128, [3, 3], stride=2, scope='cnv3')
cnv3b = slim.conv2d(cnv3, 128, [3, 3], stride=1, scope='cnv3b')
cnv4 = slim.conv2d(cnv3b, 256, [3, 3], stride=2, scope='cnv4')
cnv4b = slim.conv2d(cnv4, 256, [3, 3], stride=1, scope='cnv4b')
cnv5 = slim.conv2d(cnv4b, 512, [3, 3], stride=2, scope='cnv5')
cnv5b = slim.conv2d(cnv5, 512, [3, 3], stride=1, scope='cnv5b')
cnv6 = slim.conv2d(cnv5b, 512, [3, 3], stride=2, scope='cnv6')
cnv6b = slim.conv2d(cnv6, 512, [3, 3], stride=1, scope='cnv6b')
cnv7 = slim.conv2d(cnv6b, 512, [3, 3], stride=2, scope='cnv7')
cnv7b = slim.conv2d(cnv7, 512, [3, 3], stride=1, scope='cnv7b')
up7 = slim.conv2d_transpose(cnv7b, 512, [3, 3], stride=2, scope='upcnv7')
# There might be dimension mismatch due to uneven down/up-sampling.
up7 = _resize_like(up7, cnv6b)
i7_in = tf.concat([up7, cnv6b], axis=3)
icnv7 = slim.conv2d(i7_in, 512, [3, 3], stride=1, scope='icnv7')
up6 = slim.conv2d_transpose(icnv7, 512, [3, 3], stride=2, scope='upcnv6')
up6 = _resize_like(up6, cnv5b)
i6_in = tf.concat([up6, cnv5b], axis=3)
icnv6 = slim.conv2d(i6_in, 512, [3, 3], stride=1, scope='icnv6')
up5 = slim.conv2d_transpose(icnv6, 256, [3, 3], stride=2, scope='upcnv5')
up5 = _resize_like(up5, cnv4b)
i5_in = tf.concat([up5, cnv4b], axis=3)
icnv5 = slim.conv2d(i5_in, 256, [3, 3], stride=1, scope='icnv5')
up4 = slim.conv2d_transpose(icnv5, 128, [3, 3], stride=2, scope='upcnv4')
i4_in = tf.concat([up4, cnv3b], axis=3)
icnv4 = slim.conv2d(i4_in, 128, [3, 3], stride=1, scope='icnv4')
disp4 = (slim.conv2d(icnv4, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,
normalizer_fn=None, scope='disp4')
* DISP_SCALING + MIN_DISP)
disp4_up = tf.image.resize_bilinear(disp4, [np.int(h / 4), np.int(w / 4)])
up3 = slim.conv2d_transpose(icnv4, 64, [3, 3], stride=2, scope='upcnv3')
i3_in = tf.concat([up3, cnv2b, disp4_up], axis=3)
icnv3 = slim.conv2d(i3_in, 64, [3, 3], stride=1, scope='icnv3')
disp3 = (slim.conv2d(icnv3, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,
normalizer_fn=None, scope='disp3')
* DISP_SCALING + MIN_DISP)
disp3_up = tf.image.resize_bilinear(disp3, [np.int(h / 2), np.int(w / 2)])
up2 = slim.conv2d_transpose(icnv3, 32, [3, 3], stride=2, scope='upcnv2')
i2_in = tf.concat([up2, cnv1b, disp3_up], axis=3)
icnv2 = slim.conv2d(i2_in, 32, [3, 3], stride=1, scope='icnv2')
disp2 = (slim.conv2d(icnv2, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,
normalizer_fn=None, scope='disp2')
* DISP_SCALING + MIN_DISP)
disp2_up = tf.image.resize_bilinear(disp2, [h, w])
up1 = slim.conv2d_transpose(icnv2, 16, [3, 3], stride=2, scope='upcnv1')
i1_in = tf.concat([up1, disp2_up], axis=3)
icnv1 = slim.conv2d(i1_in, 16, [3, 3], stride=1, scope='icnv1')
disp1 = (slim.conv2d(icnv1, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,
normalizer_fn=None, scope='disp1')
* DISP_SCALING + MIN_DISP)
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
return [disp1, disp2, disp3, disp4], end_points
def _resize_like(inputs, ref):
i_h, i_w = inputs.get_shape()[1], inputs.get_shape()[2]
r_h, r_w = ref.get_shape()[1], ref.get_shape()[2]
if i_h == r_h and i_w == r_w:
return inputs
else:
return tf.image.resize_nearest_neighbor(inputs, [r_h.value, r_w.value])
| 9,845 | 46.336538 | 80 | py |
models | models-master/research/vid2depth/train.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train the model."""
# Example usage:
#
# python train.py \
# --logtostderr \
# --data_dir ~/vid2depth/data/kitti_raw_eigen \
# --seq_length 3 \
# --reconstr_weight 0.85 \
# --smooth_weight 0.05 \
# --ssim_weight 0.15 \
# --icp_weight 0.1 \
# --checkpoint_dir ~/vid2depth/checkpoints
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import time
from absl import app
from absl import flags
from absl import logging
import model
import numpy as np
import tensorflow as tf
import util
gfile = tf.gfile
HOME_DIR = os.path.expanduser('~')
DEFAULT_DATA_DIR = os.path.join(HOME_DIR, 'vid2depth/data/kitti_raw_eigen')
DEFAULT_CHECKPOINT_DIR = os.path.join(HOME_DIR, 'vid2depth/checkpoints')
flags.DEFINE_string('data_dir', DEFAULT_DATA_DIR, 'Preprocessed data.')
flags.DEFINE_float('learning_rate', 0.0002, 'Adam learning rate.')
flags.DEFINE_float('beta1', 0.9, 'Adam momentum.')
flags.DEFINE_float('reconstr_weight', 0.85, 'Frame reconstruction loss weight.')
flags.DEFINE_float('smooth_weight', 0.05, 'Smoothness loss weight.')
flags.DEFINE_float('ssim_weight', 0.15, 'SSIM loss weight.')
flags.DEFINE_float('icp_weight', 0.0, 'ICP loss weight.')
flags.DEFINE_integer('batch_size', 4, 'The size of a sample batch')
flags.DEFINE_integer('img_height', 128, 'Input frame height.')
flags.DEFINE_integer('img_width', 416, 'Input frame width.')
# Note: Training time grows linearly with sequence length. Use 2 or 3.
flags.DEFINE_integer('seq_length', 3, 'Number of frames in sequence.')
flags.DEFINE_string('pretrained_ckpt', None, 'Path to checkpoint with '
'pretrained weights. Do not include .data* extension.')
flags.DEFINE_string('checkpoint_dir', DEFAULT_CHECKPOINT_DIR,
'Directory to save model checkpoints.')
flags.DEFINE_integer('train_steps', 200000, 'Number of training steps.')
flags.DEFINE_integer('summary_freq', 100, 'Save summaries every N steps.')
flags.DEFINE_bool('legacy_mode', False, 'Whether to limit losses to using only '
'the middle frame in sequence as the target frame.')
FLAGS = flags.FLAGS
# Maximum number of checkpoints to keep.
MAX_TO_KEEP = 100
def main(_):
# Fixed seed for repeatability
seed = 8964
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
if FLAGS.legacy_mode and FLAGS.seq_length < 3:
raise ValueError('Legacy mode supports sequence length > 2 only.')
if not gfile.Exists(FLAGS.checkpoint_dir):
gfile.MakeDirs(FLAGS.checkpoint_dir)
train_model = model.Model(data_dir=FLAGS.data_dir,
is_training=True,
learning_rate=FLAGS.learning_rate,
beta1=FLAGS.beta1,
reconstr_weight=FLAGS.reconstr_weight,
smooth_weight=FLAGS.smooth_weight,
ssim_weight=FLAGS.ssim_weight,
icp_weight=FLAGS.icp_weight,
batch_size=FLAGS.batch_size,
img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
seq_length=FLAGS.seq_length,
legacy_mode=FLAGS.legacy_mode)
train(train_model, FLAGS.pretrained_ckpt, FLAGS.checkpoint_dir,
FLAGS.train_steps, FLAGS.summary_freq)
def train(train_model, pretrained_ckpt, checkpoint_dir, train_steps,
summary_freq):
"""Train model."""
if pretrained_ckpt is not None:
vars_to_restore = util.get_vars_to_restore(pretrained_ckpt)
pretrain_restorer = tf.train.Saver(vars_to_restore)
vars_to_save = util.get_vars_to_restore()
saver = tf.train.Saver(vars_to_save + [train_model.global_step],
max_to_keep=MAX_TO_KEEP)
sv = tf.train.Supervisor(logdir=checkpoint_dir, save_summaries_secs=0,
saver=None)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with sv.managed_session(config=config) as sess:
if pretrained_ckpt is not None:
logging.info('Restoring pretrained weights from %s', pretrained_ckpt)
pretrain_restorer.restore(sess, pretrained_ckpt)
logging.info('Attempting to resume training from %s...', checkpoint_dir)
checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
logging.info('Last checkpoint found: %s', checkpoint)
if checkpoint:
saver.restore(sess, checkpoint)
logging.info('Training...')
start_time = time.time()
last_summary_time = time.time()
steps_per_epoch = train_model.reader.steps_per_epoch
step = 1
while step <= train_steps:
fetches = {
'train': train_model.train_op,
'global_step': train_model.global_step,
'incr_global_step': train_model.incr_global_step
}
if step % summary_freq == 0:
fetches['loss'] = train_model.total_loss
fetches['summary'] = sv.summary_op
results = sess.run(fetches)
global_step = results['global_step']
if step % summary_freq == 0:
sv.summary_writer.add_summary(results['summary'], global_step)
train_epoch = math.ceil(global_step / steps_per_epoch)
train_step = global_step - (train_epoch - 1) * steps_per_epoch
this_cycle = time.time() - last_summary_time
last_summary_time += this_cycle
logging.info(
'Epoch: [%2d] [%5d/%5d] time: %4.2fs (%ds total) loss: %.3f',
train_epoch, train_step, steps_per_epoch, this_cycle,
time.time() - start_time, results['loss'])
if step % steps_per_epoch == 0:
logging.info('[*] Saving checkpoint to %s...', checkpoint_dir)
saver.save(sess, os.path.join(checkpoint_dir, 'model'),
global_step=global_step)
# Setting step to global_step allows for training for a total of
# train_steps even if the program is restarted during training.
step = global_step + 1
if __name__ == '__main__':
app.run(main)
| 6,780 | 37.971264 | 80 | py |
models | models-master/research/vid2depth/dataset/gen_data.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates data for training/validation and save it to disk."""
# Example usage:
#
# python dataset/gen_data.py \
# --alsologtostderr \
# --dataset_name kitti_raw_eigen \
# --dataset_dir ~/vid2depth/dataset/kitti-raw-uncompressed \
# --data_dir ~/vid2depth/data/kitti_raw_eigen_s3 \
# --seq_length 3 \
# --num_threads 12
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import dataset_loader
import numpy as np
import scipy.misc
import tensorflow as tf
gfile = tf.gfile
FLAGS = flags.FLAGS
DATASETS = [
'kitti_raw_eigen', 'kitti_raw_stereo', 'kitti_odom', 'cityscapes', 'bike'
]
flags.DEFINE_enum('dataset_name', None, DATASETS, 'Dataset name.')
flags.DEFINE_string('dataset_dir', None, 'Location for dataset source files.')
flags.DEFINE_string('data_dir', None, 'Where to save the generated data.')
# Note: Training time grows linearly with sequence length. Use 2 or 3.
flags.DEFINE_integer('seq_length', 3, 'Length of each training sequence.')
flags.DEFINE_integer('img_height', 128, 'Image height.')
flags.DEFINE_integer('img_width', 416, 'Image width.')
flags.DEFINE_integer(
'num_threads', None, 'Number of worker threads. '
'Defaults to number of CPU cores.')
flags.mark_flag_as_required('dataset_name')
flags.mark_flag_as_required('dataset_dir')
flags.mark_flag_as_required('data_dir')
# Process data in chunks for reporting progress.
NUM_CHUNKS = 100
def _generate_data():
"""Extract sequences from dataset_dir and store them in data_dir."""
if not gfile.Exists(FLAGS.data_dir):
gfile.MakeDirs(FLAGS.data_dir)
global dataloader # pylint: disable=global-variable-undefined
if FLAGS.dataset_name == 'bike':
dataloader = dataset_loader.Bike(FLAGS.dataset_dir,
img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
seq_length=FLAGS.seq_length)
elif FLAGS.dataset_name == 'kitti_odom':
dataloader = dataset_loader.KittiOdom(FLAGS.dataset_dir,
img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
seq_length=FLAGS.seq_length)
elif FLAGS.dataset_name == 'kitti_raw_eigen':
dataloader = dataset_loader.KittiRaw(FLAGS.dataset_dir,
split='eigen',
img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
seq_length=FLAGS.seq_length)
elif FLAGS.dataset_name == 'kitti_raw_stereo':
dataloader = dataset_loader.KittiRaw(FLAGS.dataset_dir,
split='stereo',
img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
seq_length=FLAGS.seq_length)
elif FLAGS.dataset_name == 'cityscapes':
dataloader = dataset_loader.Cityscapes(FLAGS.dataset_dir,
img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
seq_length=FLAGS.seq_length)
else:
raise ValueError('Unknown dataset')
# The default loop below uses multiprocessing, which can make it difficult
# to locate source of errors in data loader classes.
# Uncomment this loop for easier debugging:
# all_examples = {}
# for i in range(dataloader.num_train):
# _gen_example(i, all_examples)
# logging.info('Generated: %d', len(all_examples))
all_frames = range(dataloader.num_train)
frame_chunks = np.array_split(all_frames, NUM_CHUNKS)
manager = multiprocessing.Manager()
all_examples = manager.dict()
num_cores = multiprocessing.cpu_count()
num_threads = num_cores if FLAGS.num_threads is None else FLAGS.num_threads
pool = multiprocessing.Pool(num_threads)
# Split into training/validation sets. Fixed seed for repeatability.
np.random.seed(8964)
if not gfile.Exists(FLAGS.data_dir):
gfile.MakeDirs(FLAGS.data_dir)
with gfile.Open(os.path.join(FLAGS.data_dir, 'train.txt'), 'w') as train_f:
with gfile.Open(os.path.join(FLAGS.data_dir, 'val.txt'), 'w') as val_f:
logging.info('Generating data...')
for index, frame_chunk in enumerate(frame_chunks):
all_examples.clear()
pool.map(_gen_example_star,
itertools.izip(frame_chunk, itertools.repeat(all_examples)))
logging.info('Chunk %d/%d: saving %s entries...', index + 1, NUM_CHUNKS,
len(all_examples))
for _, example in all_examples.items():
if example:
s = example['folder_name']
frame = example['file_name']
if np.random.random() < 0.1:
val_f.write('%s %s\n' % (s, frame))
else:
train_f.write('%s %s\n' % (s, frame))
pool.close()
pool.join()
def _gen_example(i, all_examples):
"""Saves one example to file. Also adds it to all_examples dict."""
example = dataloader.get_example_with_index(i)
if not example:
return
image_seq_stack = _stack_image_seq(example['image_seq'])
example.pop('image_seq', None) # Free up memory.
intrinsics = example['intrinsics']
fx = intrinsics[0, 0]
fy = intrinsics[1, 1]
cx = intrinsics[0, 2]
cy = intrinsics[1, 2]
save_dir = os.path.join(FLAGS.data_dir, example['folder_name'])
if not gfile.Exists(save_dir):
gfile.MakeDirs(save_dir)
img_filepath = os.path.join(save_dir, '%s.jpg' % example['file_name'])
scipy.misc.imsave(img_filepath, image_seq_stack.astype(np.uint8))
cam_filepath = os.path.join(save_dir, '%s_cam.txt' % example['file_name'])
example['cam'] = '%f,0.,%f,0.,%f,%f,0.,0.,1.' % (fx, cx, fy, cy)
with open(cam_filepath, 'w') as cam_f:
cam_f.write(example['cam'])
key = example['folder_name'] + '_' + example['file_name']
all_examples[key] = example
def _gen_example_star(params):
return _gen_example(*params)
def _stack_image_seq(seq):
for i, im in enumerate(seq):
if i == 0:
res = im
else:
res = np.hstack((res, im))
return res
def main(_):
_generate_data()
if __name__ == '__main__':
app.run(main)
| 7,204 | 35.948718 | 80 | py |
models | models-master/research/vid2depth/dataset/dataset_loader.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes to load KITTI and Cityscapes data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import os
import re
from absl import logging
import numpy as np
import scipy.misc
CITYSCAPES_CROP_BOTTOM = True # Crop bottom 25% to remove the car hood.
CITYSCAPES_CROP_PCT = 0.75
CITYSCAPES_SAMPLE_EVERY = 2 # Sample every 2 frames to match KITTI frame rate.
BIKE_SAMPLE_EVERY = 6 # 5fps, since the bike's motion is slower.
class Bike(object):
"""Load bike video frames."""
def __init__(self,
dataset_dir,
img_height=128,
img_width=416,
seq_length=3,
sample_every=BIKE_SAMPLE_EVERY):
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.sample_every = sample_every
self.frames = self.collect_frames()
self.num_frames = len(self.frames)
self.num_train = self.num_frames
logging.info('Total frames collected: %d', self.num_frames)
def collect_frames(self):
"""Create a list of unique ids for available frames."""
video_list = os.listdir(self.dataset_dir)
logging.info('video_list: %s', video_list)
frames = []
for video in video_list:
im_files = glob.glob(os.path.join(self.dataset_dir, video, '*.jpg'))
im_files = sorted(im_files, key=natural_keys)
# Adding 3 crops of the video.
frames.extend(['A' + video + '/' + os.path.basename(f) for f in im_files])
frames.extend(['B' + video + '/' + os.path.basename(f) for f in im_files])
frames.extend(['C' + video + '/' + os.path.basename(f) for f in im_files])
return frames
def get_example_with_index(self, target_index):
if not self.is_valid_sample(target_index):
return False
example = self.load_example(target_index)
return example
def load_intrinsics(self, unused_frame_idx, cy):
"""Load intrinsics."""
# https://www.wired.com/2013/05/calculating-the-angular-view-of-an-iphone/
# https://codeyarns.com/2015/09/08/how-to-compute-intrinsic-camera-matrix-for-a-camera/
# https://stackoverflow.com/questions/39992968/how-to-calculate-field-of-view-of-the-camera-from-camera-intrinsic-matrix
# # iPhone: These numbers are for images with resolution 720 x 1280.
# Assuming FOV = 50.9 => fx = (1280 // 2) / math.tan(fov / 2) = 1344.8
intrinsics = np.array([[1344.8, 0, 1280 // 2],
[0, 1344.8, cy],
[0, 0, 1.0]])
return intrinsics
def is_valid_sample(self, target_index):
"""Checks whether we can find a valid sequence around this frame."""
target_video, _ = self.frames[target_index].split('/')
start_index, end_index = get_seq_start_end(target_index,
self.seq_length,
self.sample_every)
if start_index < 0 or end_index >= self.num_frames:
return False
start_video, _ = self.frames[start_index].split('/')
end_video, _ = self.frames[end_index].split('/')
if target_video == start_video and target_video == end_video:
return True
return False
def load_image_raw(self, frame_id):
"""Reads the image and crops it according to first letter of frame_id."""
crop_type = frame_id[0]
img_file = os.path.join(self.dataset_dir, frame_id[1:])
img = scipy.misc.imread(img_file)
allowed_height = int(img.shape[1] * self.img_height / self.img_width)
# Starting height for the middle crop.
mid_crop_top = int(img.shape[0] / 2 - allowed_height / 2)
# How much to go up or down to get the other two crops.
height_var = int(mid_crop_top / 3)
if crop_type == 'A':
crop_top = mid_crop_top - height_var
cy = allowed_height / 2 + height_var
elif crop_type == 'B':
crop_top = mid_crop_top
cy = allowed_height / 2
elif crop_type == 'C':
crop_top = mid_crop_top + height_var
cy = allowed_height / 2 - height_var
else:
raise ValueError('Unknown crop_type: %s' % crop_type)
crop_bottom = crop_top + allowed_height + 1
return img[crop_top:crop_bottom, :, :], cy
def load_image_sequence(self, target_index):
"""Returns a list of images around target index."""
start_index, end_index = get_seq_start_end(target_index,
self.seq_length,
self.sample_every)
image_seq = []
for idx in range(start_index, end_index + 1, self.sample_every):
frame_id = self.frames[idx]
img, cy = self.load_image_raw(frame_id)
if idx == target_index:
zoom_y = self.img_height / img.shape[0]
zoom_x = self.img_width / img.shape[1]
img = scipy.misc.imresize(img, (self.img_height, self.img_width))
image_seq.append(img)
return image_seq, zoom_x, zoom_y, cy
def load_example(self, target_index):
"""Returns a sequence with requested target frame."""
image_seq, zoom_x, zoom_y, cy = self.load_image_sequence(target_index)
target_video, target_filename = self.frames[target_index].split('/')
# Put A, B, C at the end for better shuffling.
target_video = target_video[1:] + target_video[0]
intrinsics = self.load_intrinsics(target_index, cy)
intrinsics = self.scale_intrinsics(intrinsics, zoom_x, zoom_y)
example = {}
example['intrinsics'] = intrinsics
example['image_seq'] = image_seq
example['folder_name'] = target_video
example['file_name'] = target_filename.split('.')[0]
return example
def scale_intrinsics(self, mat, sx, sy):
out = np.copy(mat)
out[0, 0] *= sx
out[0, 2] *= sx
out[1, 1] *= sy
out[1, 2] *= sy
return out
class KittiRaw(object):
"""Reads KITTI raw data files."""
def __init__(self,
dataset_dir,
split,
load_pose=False,
img_height=128,
img_width=416,
seq_length=3):
static_frames_file = 'dataset/kitti/static_frames.txt'
test_scene_file = 'dataset/kitti/test_scenes_' + split + '.txt'
with open(get_resource_path(test_scene_file), 'r') as f:
test_scenes = f.readlines()
self.test_scenes = [t[:-1] for t in test_scenes]
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.load_pose = load_pose
self.cam_ids = ['02', '03']
self.date_list = [
'2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03'
]
self.collect_static_frames(static_frames_file)
self.collect_train_frames()
def collect_static_frames(self, static_frames_file):
with open(get_resource_path(static_frames_file), 'r') as f:
frames = f.readlines()
self.static_frames = []
for fr in frames:
if fr == '\n':
continue
unused_date, drive, frame_id = fr.split(' ')
fid = '%.10d' % (np.int(frame_id[:-1]))
for cam_id in self.cam_ids:
self.static_frames.append(drive + ' ' + cam_id + ' ' + fid)
def collect_train_frames(self):
"""Creates a list of training frames."""
all_frames = []
for date in self.date_list:
date_dir = os.path.join(self.dataset_dir, date)
drive_set = os.listdir(date_dir)
for dr in drive_set:
drive_dir = os.path.join(date_dir, dr)
if os.path.isdir(drive_dir):
if dr[:-5] in self.test_scenes:
continue
for cam in self.cam_ids:
img_dir = os.path.join(drive_dir, 'image_' + cam, 'data')
num_frames = len(glob.glob(img_dir + '/*[0-9].png'))
for i in range(num_frames):
frame_id = '%.10d' % i
all_frames.append(dr + ' ' + cam + ' ' + frame_id)
for s in self.static_frames:
try:
all_frames.remove(s)
except ValueError:
pass
self.train_frames = all_frames
self.num_train = len(self.train_frames)
def is_valid_sample(self, frames, target_index):
"""Checks whether we can find a valid sequence around this frame."""
num_frames = len(frames)
target_drive, cam_id, _ = frames[target_index].split(' ')
start_index, end_index = get_seq_start_end(target_index, self.seq_length)
if start_index < 0 or end_index >= num_frames:
return False
start_drive, start_cam_id, _ = frames[start_index].split(' ')
end_drive, end_cam_id, _ = frames[end_index].split(' ')
if (target_drive == start_drive and target_drive == end_drive and
cam_id == start_cam_id and cam_id == end_cam_id):
return True
return False
def get_example_with_index(self, target_index):
if not self.is_valid_sample(self.train_frames, target_index):
return False
example = self.load_example(self.train_frames, target_index)
return example
def load_image_sequence(self, frames, target_index):
"""Returns a sequence with requested target frame."""
start_index, end_index = get_seq_start_end(target_index, self.seq_length)
image_seq = []
for index in range(start_index, end_index + 1):
drive, cam_id, frame_id = frames[index].split(' ')
img = self.load_image_raw(drive, cam_id, frame_id)
if index == target_index:
zoom_y = self.img_height / img.shape[0]
zoom_x = self.img_width / img.shape[1]
img = scipy.misc.imresize(img, (self.img_height, self.img_width))
image_seq.append(img)
return image_seq, zoom_x, zoom_y
def load_pose_sequence(self, frames, target_index):
"""Returns a sequence of pose vectors for frames around the target frame."""
target_drive, _, target_frame_id = frames[target_index].split(' ')
target_pose = self.load_pose_raw(target_drive, target_frame_id)
start_index, end_index = get_seq_start_end(target_frame_id, self.seq_length)
pose_seq = []
for index in range(start_index, end_index + 1):
if index == target_frame_id:
continue
drive, _, frame_id = frames[index].split(' ')
pose = self.load_pose_raw(drive, frame_id)
# From target to index.
pose = np.dot(np.linalg.inv(pose), target_pose)
pose_seq.append(pose)
return pose_seq
def load_example(self, frames, target_index):
"""Returns a sequence with requested target frame."""
image_seq, zoom_x, zoom_y = self.load_image_sequence(frames, target_index)
target_drive, target_cam_id, target_frame_id = (
frames[target_index].split(' '))
intrinsics = self.load_intrinsics_raw(target_drive, target_cam_id)
intrinsics = self.scale_intrinsics(intrinsics, zoom_x, zoom_y)
example = {}
example['intrinsics'] = intrinsics
example['image_seq'] = image_seq
example['folder_name'] = target_drive + '_' + target_cam_id + '/'
example['file_name'] = target_frame_id
if self.load_pose:
pose_seq = self.load_pose_sequence(frames, target_index)
example['pose_seq'] = pose_seq
return example
def load_pose_raw(self, drive, frame_id):
date = drive[:10]
pose_file = os.path.join(self.dataset_dir, date, drive, 'poses',
frame_id + '.txt')
with open(pose_file, 'r') as f:
pose = f.readline()
pose = np.array(pose.split(' ')).astype(np.float32).reshape(3, 4)
pose = np.vstack((pose, np.array([0, 0, 0, 1]).reshape((1, 4))))
return pose
def load_image_raw(self, drive, cam_id, frame_id):
date = drive[:10]
img_file = os.path.join(self.dataset_dir, date, drive, 'image_' + cam_id,
'data', frame_id + '.png')
img = scipy.misc.imread(img_file)
return img
def load_intrinsics_raw(self, drive, cam_id):
date = drive[:10]
calib_file = os.path.join(self.dataset_dir, date, 'calib_cam_to_cam.txt')
filedata = self.read_raw_calib_file(calib_file)
p_rect = np.reshape(filedata['P_rect_' + cam_id], (3, 4))
intrinsics = p_rect[:3, :3]
return intrinsics
# From https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py
def read_raw_calib_file(self, filepath):
"""Read in a calibration file and parse into a dictionary."""
data = {}
with open(filepath, 'r') as f:
for line in f:
key, value = line.split(':', 1)
# The only non-float values in these files are dates, which we don't
# care about.
try:
data[key] = np.array([float(x) for x in value.split()])
except ValueError:
pass
return data
def scale_intrinsics(self, mat, sx, sy):
out = np.copy(mat)
out[0, 0] *= sx
out[0, 2] *= sx
out[1, 1] *= sy
out[1, 2] *= sy
return out
class KittiOdom(object):
"""Reads KITTI odometry data files."""
def __init__(self, dataset_dir, img_height=128, img_width=416, seq_length=3):
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.train_seqs = [0, 1, 2, 3, 4, 5, 6, 7, 8]
self.test_seqs = [9, 10]
self.collect_test_frames()
self.collect_train_frames()
def collect_test_frames(self):
self.test_frames = []
for seq in self.test_seqs:
seq_dir = os.path.join(self.dataset_dir, 'sequences', '%.2d' % seq)
img_dir = os.path.join(seq_dir, 'image_2')
num_frames = len(glob.glob(os.path.join(img_dir, '*.png')))
for n in range(num_frames):
self.test_frames.append('%.2d %.6d' % (seq, n))
self.num_test = len(self.test_frames)
def collect_train_frames(self):
self.train_frames = []
for seq in self.train_seqs:
seq_dir = os.path.join(self.dataset_dir, 'sequences', '%.2d' % seq)
img_dir = os.path.join(seq_dir, 'image_2')
num_frames = len(glob.glob(img_dir + '/*.png'))
for n in range(num_frames):
self.train_frames.append('%.2d %.6d' % (seq, n))
self.num_train = len(self.train_frames)
def is_valid_sample(self, frames, target_frame_index):
"""Checks whether we can find a valid sequence around this frame."""
num_frames = len(frames)
target_frame_drive, _ = frames[target_frame_index].split(' ')
start_index, end_index = get_seq_start_end(target_frame_index,
self.seq_length)
if start_index < 0 or end_index >= num_frames:
return False
start_drive, _ = frames[start_index].split(' ')
end_drive, _ = frames[end_index].split(' ')
if target_frame_drive == start_drive and target_frame_drive == end_drive:
return True
return False
def load_image_sequence(self, frames, target_frame_index):
"""Returns a sequence with requested target frame."""
start_index, end_index = get_seq_start_end(target_frame_index,
self.seq_length)
image_seq = []
for index in range(start_index, end_index + 1):
drive, frame_id = frames[index].split(' ')
img = self.load_image(drive, frame_id)
if index == target_frame_index:
zoom_y = self.img_height / img.shape[0]
zoom_x = self.img_width / img.shape[1]
img = scipy.misc.imresize(img, (self.img_height, self.img_width))
image_seq.append(img)
return image_seq, zoom_x, zoom_y
def load_example(self, frames, target_frame_index):
"""Returns a sequence with requested target frame."""
image_seq, zoom_x, zoom_y = self.load_image_sequence(frames,
target_frame_index)
target_frame_drive, target_frame_id = frames[target_frame_index].split(' ')
intrinsics = self.load_intrinsics(target_frame_drive, target_frame_id)
intrinsics = self.scale_intrinsics(intrinsics, zoom_x, zoom_y)
example = {}
example['intrinsics'] = intrinsics
example['image_seq'] = image_seq
example['folder_name'] = target_frame_drive
example['file_name'] = target_frame_id
return example
def get_example_with_index(self, target_frame_index):
if not self.is_valid_sample(self.train_frames, target_frame_index):
return False
example = self.load_example(self.train_frames, target_frame_index)
return example
def load_image(self, drive, frame_id):
img_file = os.path.join(self.dataset_dir, 'sequences',
'%s/image_2/%s.png' % (drive, frame_id))
img = scipy.misc.imread(img_file)
return img
def load_intrinsics(self, drive, unused_frame_id):
calib_file = os.path.join(self.dataset_dir, 'sequences',
'%s/calib.txt' % drive)
proj_c2p, _ = self.read_calib_file(calib_file)
intrinsics = proj_c2p[:3, :3]
return intrinsics
def read_calib_file(self, filepath, cam_id=2):
"""Read in a calibration file and parse into a dictionary."""
def parse_line(line, shape):
data = line.split()
data = np.array(data[1:]).reshape(shape).astype(np.float32)
return data
with open(filepath, 'r') as f:
mat = f.readlines()
proj_c2p = parse_line(mat[cam_id], shape=(3, 4))
proj_v2c = parse_line(mat[-1], shape=(3, 4))
filler = np.array([0, 0, 0, 1]).reshape((1, 4))
proj_v2c = np.concatenate((proj_v2c, filler), axis=0)
return proj_c2p, proj_v2c
def scale_intrinsics(self, mat, sx, sy):
out = np.copy(mat)
out[0, 0] *= sx
out[0, 2] *= sx
out[1, 1] *= sy
out[1, 2] *= sy
return out
class Cityscapes(object):
"""Reads Cityscapes data files."""
def __init__(self,
dataset_dir,
split='train',
crop_bottom=CITYSCAPES_CROP_BOTTOM, # Crop the car logo.
crop_pct=CITYSCAPES_CROP_PCT,
sample_every=CITYSCAPES_SAMPLE_EVERY,
img_height=128,
img_width=416,
seq_length=3):
self.dataset_dir = dataset_dir
self.split = split
self.crop_bottom = crop_bottom
self.crop_pct = crop_pct
self.sample_every = sample_every
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.frames = self.collect_frames(split)
self.num_frames = len(self.frames)
if split == 'train':
self.num_train = self.num_frames
else:
self.num_test = self.num_frames
logging.info('Total frames collected: %d', self.num_frames)
def collect_frames(self, split):
img_dir = os.path.join(self.dataset_dir, 'leftImg8bit_sequence', split)
city_list = os.listdir(img_dir)
frames = []
for city in city_list:
img_files = glob.glob(os.path.join(img_dir, city, '*.png'))
for f in img_files:
frame_id = os.path.basename(f).split('leftImg8bit')[0]
frames.append(frame_id)
return frames
def get_example_with_index(self, target_index):
target_frame_id = self.frames[target_index]
if not self.is_valid_example(target_frame_id):
return False
example = self.load_example(self.frames[target_index])
return example
def load_intrinsics(self, frame_id, split):
"""Read intrinsics data for frame."""
city, seq, _, _ = frame_id.split('_')
camera_file = os.path.join(self.dataset_dir, 'camera', split, city,
city + '_' + seq + '_*_camera.json')
camera_file = glob.glob(camera_file)[0]
with open(camera_file, 'r') as f:
camera = json.load(f)
fx = camera['intrinsic']['fx']
fy = camera['intrinsic']['fy']
u0 = camera['intrinsic']['u0']
v0 = camera['intrinsic']['v0']
# Cropping the bottom of the image and then resizing it to the same
# (height, width) amounts to stretching the image's height.
if self.crop_bottom:
fy *= 1.0 / self.crop_pct
intrinsics = np.array([[fx, 0, u0],
[0, fy, v0],
[0, 0, 1]])
return intrinsics
def is_valid_example(self, target_frame_id):
"""Checks whether we can find a valid sequence around this frame."""
city, snippet_id, target_local_frame_id, _ = target_frame_id.split('_')
start_index, end_index = get_seq_start_end(
int(target_local_frame_id), self.seq_length, self.sample_every)
for index in range(start_index, end_index + 1, self.sample_every):
local_frame_id = '%.6d' % index
frame_id = '%s_%s_%s_' % (city, snippet_id, local_frame_id)
image_filepath = os.path.join(self.dataset_dir, 'leftImg8bit_sequence',
self.split, city,
frame_id + 'leftImg8bit.png')
if not os.path.exists(image_filepath):
return False
return True
def load_image_sequence(self, target_frame_id):
"""Returns a sequence with requested target frame."""
city, snippet_id, target_local_frame_id, _ = target_frame_id.split('_')
start_index, end_index = get_seq_start_end(
int(target_local_frame_id), self.seq_length, self.sample_every)
image_seq = []
for index in range(start_index, end_index + 1, self.sample_every):
local_frame_id = '%.6d' % index
frame_id = '%s_%s_%s_' % (city, snippet_id, local_frame_id)
image_filepath = os.path.join(self.dataset_dir, 'leftImg8bit_sequence',
self.split, city,
frame_id + 'leftImg8bit.png')
img = scipy.misc.imread(image_filepath)
if self.crop_bottom:
ymax = int(img.shape[0] * self.crop_pct)
img = img[:ymax]
raw_shape = img.shape
if index == int(target_local_frame_id):
zoom_y = self.img_height / raw_shape[0]
zoom_x = self.img_width / raw_shape[1]
img = scipy.misc.imresize(img, (self.img_height, self.img_width))
image_seq.append(img)
return image_seq, zoom_x, zoom_y
def load_example(self, target_frame_id):
"""Returns a sequence with requested target frame."""
image_seq, zoom_x, zoom_y = self.load_image_sequence(target_frame_id)
intrinsics = self.load_intrinsics(target_frame_id, self.split)
intrinsics = self.scale_intrinsics(intrinsics, zoom_x, zoom_y)
example = {}
example['intrinsics'] = intrinsics
example['image_seq'] = image_seq
example['folder_name'] = target_frame_id.split('_')[0]
example['file_name'] = target_frame_id[:-1]
return example
def scale_intrinsics(self, mat, sx, sy):
out = np.copy(mat)
out[0, 0] *= sx
out[0, 2] *= sx
out[1, 1] *= sy
out[1, 2] *= sy
return out
def get_resource_path(relative_path):
return relative_path
def get_seq_start_end(target_index, seq_length, sample_every=1):
"""Returns absolute seq start and end indices for a given target frame."""
half_offset = int((seq_length - 1) / 2) * sample_every
end_index = target_index + half_offset
start_index = end_index - (seq_length - 1) * sample_every
return start_index, end_index
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
| 23,741 | 37.730832 | 124 | py |
models | models-master/research/vid2depth/dataset/__init__.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| 688 | 44.933333 | 80 | py |
models | models-master/research/vid2depth/ops/icp_train_demo.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Verify the op's ability to discover a hidden transformation and residual."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import random
import time
import google3
from absl import app
from absl import flags
from absl import logging
import icp_grad # pylint: disable=unused-import
from icp_op import icp
import icp_util
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_integer('batch_size', 4, 'Batch size.')
flags.DEFINE_float('learning_rate', 0.1, 'Learning rate.')
flags.DEFINE_integer('max_steps', 2000, 'Number of steps to run trainer.')
flags.DEFINE_string('train_dir', '/tmp/icp_train_demo',
'Directory to save event files for TensorBoard.')
# Every training step feeds the model two points clouds A, B, such that
# A = random_transform . sample_cloud
# B = (SECRET_EGO_MOTION . A) + cone(CENTER, RADIUS, SECRET_RES_HEIGHT).
# The ICP op gradients should help the program discover the values for
# SECRET_EGO_MOTION and SECRET_RES_HEIGHT to get the best alignment for A, B.
SECRET_EGO_MOTION = [0.0, 0.0, 0.1, 0.0, 0.0, 0.0]
RES_CENTER = [0.103, 1.954, 0]
RES_RADIUS = 10.0
SECRET_RES_HEIGHT = 0.1
class DataProducer(object):
"""Generates training data."""
def __init__(self):
pass
@classmethod
def setup(cls):
"""Open a KITTI video and read its point clouds."""
lidar_cloud_path = os.path.join(FLAGS.test_srcdir,
icp_util.LIDAR_CLOUD_PATH)
cls.sample_cloud = np.load(lidar_cloud_path)
logging.info('sample_cloud: %s', cls.sample_cloud)
x_min = np.min(cls.sample_cloud[:, 0])
x_max = np.max(cls.sample_cloud[:, 0])
y_min = np.min(cls.sample_cloud[:, 1])
y_max = np.max(cls.sample_cloud[:, 1])
z_min = np.min(cls.sample_cloud[:, 2])
z_max = np.max(cls.sample_cloud[:, 2])
logging.info('x: %s - %s', x_min, x_max)
logging.info('y: %s - %s', y_min, y_max)
logging.info('z: %s - %s', z_min, z_max)
@classmethod
def random_transform(cls):
tx = random.uniform(-0.2, 0.2)
ty = random.uniform(-0.2, 0.2)
tz = random.uniform(-0.9, 0.9)
rx = random.uniform(-0.2, 0.2) * np.pi
ry = random.uniform(-0.2, 0.2) * np.pi
rz = random.uniform(-0.2, 0.2) * np.pi
transform = [tx, ty, tz, rx, ry, rz]
return transform
@classmethod
def next_batch(cls, batch_size):
"""Returns a training batch."""
source_items = []
target_items = []
for _ in range(batch_size):
source_cloud = icp_util.np_transform_cloud_xyz(cls.sample_cloud,
cls.random_transform())
source_items.append(source_cloud)
dist_to_center = np.linalg.norm((source_cloud - RES_CENTER)[:, :2],
axis=1, keepdims=True)
res = np.maximum(RES_RADIUS - dist_to_center, 0.0) / RES_RADIUS
res *= SECRET_RES_HEIGHT
# x = 0, y = 0, z = res.
res = np.concatenate((np.zeros_like(res), np.zeros_like(res), res),
axis=1)
target_cloud = icp_util.np_transform_cloud_xyz(source_cloud + res,
SECRET_EGO_MOTION)
target_items.append(target_cloud)
return np.stack(source_items), np.stack(target_items)
def placeholder_inputs(batch_size):
cloud_shape = (batch_size, DataProducer.sample_cloud.shape[0], 3)
source_placeholder = tf.placeholder(tf.float32, shape=cloud_shape)
target_placeholder = tf.placeholder(tf.float32, shape=cloud_shape)
return source_placeholder, target_placeholder
def fill_feed_dict(source_placeholder, target_placeholder):
# Create the feed_dict for the placeholders filled with the next
# `batch size` examples.
source_feed, target_feed = DataProducer.next_batch(FLAGS.batch_size)
feed_dict = {
source_placeholder: source_feed,
target_placeholder: target_feed,
}
return feed_dict
def run_training():
"""Train model for a number of steps."""
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
DataProducer.setup()
source_placeholder, target_placeholder = placeholder_inputs(
FLAGS.batch_size)
transform, residual = inference(source_placeholder, target_placeholder)
loss = loss_func(transform, residual)
train_op = training(loss, FLAGS.learning_rate)
summary_op = tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
sess.run(init)
# Start the training loop.
for step in range(FLAGS.max_steps):
start_time = time.time()
feed_dict = fill_feed_dict(source_placeholder, target_placeholder)
_, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
duration = time.time() - start_time
# Print status to stdout.
print('Step %d: loss = %f (%.2f sec)' % (step, loss_value, duration))
# Update the events file.
summary_str = sess.run(summary_op, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
def inference(source, target):
"""Builds model."""
ego_motion = tf.Variable(tf.zeros([6]), name='ego_motion')
res_height = tf.Variable(tf.fill([1], 0.0), name='res_height')
tf.summary.scalar('tx', ego_motion[0])
tf.summary.scalar('ty', ego_motion[1])
tf.summary.scalar('tz', ego_motion[2])
tf.summary.scalar('rx', ego_motion[3])
tf.summary.scalar('ry', ego_motion[4])
tf.summary.scalar('rz', ego_motion[5])
tf.summary.scalar('res_height', res_height[0])
dist_to_center = tf.norm((source - RES_CENTER)[:, :, :2], axis=2,
keep_dims=True)
res = tf.maximum(RES_RADIUS - dist_to_center, 0.0) / RES_RADIUS
res *= res_height
res = tf.concat([tf.zeros_like(res), tf.zeros_like(res), res], axis=2)
shifted_source = source + res
ego_motion = tf.stack([ego_motion] * FLAGS.batch_size)
transform, residual = icp(shifted_source, ego_motion, target)
return transform, residual
def loss_func(transform, residual):
return (tf.reduce_mean(tf.square(transform), name='transform_mean') +
tf.reduce_mean(tf.square(residual), name='residual_mean'))
def training(loss, learning_rate):
tf.summary.scalar('loss', loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def main(_):
run_training()
if __name__ == '__main__':
app.run(main)
| 7,596 | 36.423645 | 80 | py |
models | models-master/research/vid2depth/ops/icp_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for icp op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from absl import flags
from absl import logging
from icp_op import icp
import icp_util
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
PRINT_CAP = 6
class IcpOpTestBase(tf.test.TestCase):
"""Classed used by IcpOpTest, IcpOpGradTest."""
def setUp(self):
self.small_cloud = tf.constant([[[0.352222, -0.151883, -0.106395],
[-0.397406, -0.473106, 0.292602],
[-0.731898, 0.667105, 0.441304],
[-0.734766, 0.854581, -0.0361733],
[-0.4607, -0.277468, -0.916762]]],
dtype=tf.float32)
self.random_cloud = self._generate_random_cloud()
self.organized_cloud = self._generate_organized_cloud()
self.lidar_cloud = self._load_lidar_cloud()
self.identity_transform = tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype=tf.float32)
self.index_translation = 0
self.index_rotation = 3
def _run_icp(self, cloud_source, ego_motion, cloud_target):
transform, residual = icp(cloud_source, ego_motion, cloud_target)
logging.info('Running ICP:')
logging.info('ego_motion: %s\n%s', ego_motion, ego_motion.eval())
logging.info('transform: %s\n%s', transform, transform.eval())
logging.info('residual: %s\n%s', residual,
residual[0, :PRINT_CAP, :].eval())
return transform, residual
def _generate_random_cloud(self):
self.random_cloud_size = 50
tf.set_random_seed(11)
return tf.truncated_normal(
[1, self.random_cloud_size, 3], mean=0.0, stddev=1.0, dtype=tf.float32)
def _generate_organized_cloud(self):
res = 10
scale = 7
# [B, 10, 10, 3]
cloud = np.zeros(shape=(1, res, res, 3))
for i in range(res):
for j in range(res):
# For scale == 1.0, x and y range from -0.5 to 0.4.
y = scale / 2 - scale * (res - i) / res
x = scale / 2 - scale * (res - j) / res
z = math.sin(x * x + y * y)
cloud[0, i, j, :] = (x, y, z)
return tf.constant(cloud, dtype=tf.float32)
def _load_lidar_cloud(self):
lidar_cloud_path = os.path.join(FLAGS.test_srcdir,
icp_util.LIDAR_CLOUD_PATH)
lidar_cloud = np.load(lidar_cloud_path)
lidar_cloud = tf.expand_dims(lidar_cloud, axis=0) # Add batch.
logging.info('lidar_cloud.shape: %s', lidar_cloud.shape)
return lidar_cloud
class IcpOpTest(IcpOpTestBase):
def test_translate_small_cloud(self):
with self.test_session():
tx = 0.1
cloud_source = self.small_cloud
cloud_target = cloud_source + [tx, 0, 0]
transform, residual = self._run_icp(cloud_source, self.identity_transform,
cloud_target)
self.assertAlmostEqual(transform.eval()[0, self.index_translation], tx,
places=6)
self.assertAllClose(residual.eval(), tf.zeros_like(residual).eval(),
atol=1e-4)
def test_translate_random_cloud(self):
with self.test_session():
tx = 0.1
cloud_source = self.random_cloud
cloud_target = cloud_source + [tx, 0, 0]
transform, residual = self._run_icp(cloud_source, self.identity_transform,
cloud_target)
self.assertAlmostEqual(transform.eval()[0, self.index_translation], tx,
places=4)
self.assertAllClose(residual.eval(), tf.zeros_like(residual).eval(),
atol=1e-4)
def test_rotate_random_cloud(self):
with self.test_session():
ego_motion = tf.constant([[0.0, 0.0, 0.0,
np.pi / 32, np.pi / 64, np.pi / 24]],
dtype=tf.float32)
cloud_source = self.random_cloud
cloud_target = icp_util.batch_transform_cloud_xyz(cloud_source,
ego_motion)
unused_transform, residual = self._run_icp(cloud_source,
self.identity_transform,
cloud_target)
self.assertAllClose(residual.eval(), tf.zeros_like(residual).eval(),
atol=1e-4)
def test_translate_organized_cloud(self):
with self.test_session():
tx = 0.1
cloud_source = self.organized_cloud
cloud_target = cloud_source + [tx, 0, 0]
transform, residual = self._run_icp(cloud_source, self.identity_transform,
cloud_target)
self.assertAlmostEqual(transform.eval()[0, self.index_translation], tx,
places=4)
self.assertAllClose(residual.eval(), tf.zeros_like(residual).eval(),
atol=1e-4)
def test_rotate_organized_cloud(self):
with self.test_session():
ego_motion = tf.constant([[0.0, 0.0, 0.0,
np.pi / 16, np.pi / 32, np.pi / 12]],
dtype=tf.float32)
cloud_source = self.organized_cloud
cloud_shape = cloud_source.shape.as_list()
flat_shape = (cloud_shape[0],
cloud_shape[1] * cloud_shape[2],
cloud_shape[3])
cloud_source = tf.reshape(cloud_source, shape=flat_shape)
cloud_target = icp_util.batch_transform_cloud_xyz(cloud_source,
ego_motion)
cloud_source = tf.reshape(cloud_source, cloud_shape)
cloud_target = tf.reshape(cloud_target, cloud_shape)
unused_transform, residual = self._run_icp(cloud_source,
self.identity_transform,
cloud_target)
self.assertAllClose(residual.eval(), tf.zeros_like(residual).eval(),
atol=1e-4)
def test_translate_lidar_cloud(self):
with self.test_session():
tx = 0.1
cloud_source = self.lidar_cloud
cloud_target = cloud_source + [tx, 0, 0]
transform, residual = self._run_icp(cloud_source, self.identity_transform,
cloud_target)
self.assertAlmostEqual(transform.eval()[0, self.index_translation], tx,
places=4)
self.assertAllClose(residual.eval(), tf.zeros_like(residual).eval(),
atol=1e-4)
def test_translate_lidar_cloud_ego_motion(self):
with self.test_session():
tx = 0.2
ego_motion = tf.constant([[tx, 0.0, 0.0,
0.0, 0.0, 0.0]], dtype=tf.float32)
cloud_source = self.lidar_cloud
cloud_target = cloud_source + [tx, 0, 0]
transform, residual = self._run_icp(cloud_source, ego_motion,
cloud_target)
self.assertAllClose(transform.eval(), tf.zeros_like(transform).eval(),
atol=1e-4)
self.assertAllClose(residual.eval(), tf.zeros_like(residual).eval(),
atol=1e-4)
def test_rotate_lidar_cloud_ego_motion(self):
with self.test_session():
transform = [0.0, 0.0, 0.0, np.pi / 16, np.pi / 32, np.pi / 12]
ego_motion = tf.constant([transform], dtype=tf.float32)
cloud_source = self.lidar_cloud
cloud_target = icp_util.batch_transform_cloud_xyz(cloud_source,
ego_motion)
transform, residual = self._run_icp(cloud_source, ego_motion,
cloud_target)
self.assertAllClose(transform.eval(), tf.zeros_like(transform).eval(),
atol=1e-4)
self.assertAllClose(residual.eval(), tf.zeros_like(residual).eval(),
atol=1e-3)
def test_no_change_lidar_cloud(self):
with self.test_session():
cloud_source = self.lidar_cloud
transform, residual = self._run_icp(cloud_source, self.identity_transform,
cloud_source)
self.assertAlmostEqual(transform.eval()[0, self.index_translation], 0,
places=4)
self.assertAllClose(residual.eval(), tf.zeros_like(residual).eval(),
atol=1e-4)
def test_translate_lidar_cloud_batch_size_2(self):
with self.test_session():
batch_size = 2
tx = 0.1
self.assertEqual(len(self.lidar_cloud.shape), 3)
cloud_source = tf.tile(self.lidar_cloud, [batch_size, 1, 1])
cloud_target = cloud_source + [tx, 0, 0]
self.assertEqual(len(self.identity_transform.shape), 2)
ego_motion = tf.tile(self.identity_transform, [batch_size, 1])
logging.info('cloud_source.shape: %s', cloud_source.shape)
logging.info('cloud_target.shape: %s', cloud_target.shape)
transform, residual = self._run_icp(cloud_source, ego_motion,
cloud_target)
for b in range(batch_size):
self.assertAlmostEqual(transform.eval()[b, self.index_translation], tx,
places=4)
self.assertAllClose(residual.eval(), tf.zeros_like(residual).eval(),
atol=1e-4)
if __name__ == '__main__':
tf.test.main()
| 10,192 | 41.648536 | 80 | py |
models | models-master/research/vid2depth/ops/icp_grad.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The gradient of the icp op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
@ops.RegisterGradient('Icp')
def _icp_grad(op, grad_transform, grad_residual):
"""The gradients for `icp`.
Args:
op: The `icp` `Operation` that we are differentiating, which we can use
to find the inputs and outputs of the original op.
grad_transform: Gradient with respect to `transform` output of the `icp` op.
grad_residual: Gradient with respect to `residual` output of the
`icp` op.
Returns:
Gradients with respect to the inputs of `icp`.
"""
unused_transform = op.outputs[0]
unused_residual = op.outputs[1]
unused_source = op.inputs[0]
unused_ego_motion = op.inputs[1]
unused_target = op.inputs[2]
grad_p = -grad_residual
grad_ego_motion = -grad_transform
return [grad_p, grad_ego_motion, None]
| 1,637 | 32.428571 | 80 | py |
models | models-master/research/vid2depth/ops/icp_util.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
# Sample pointcloud with shape (1568, 3).
LIDAR_CLOUD_PATH = 'ops/testdata/pointcloud.npy'
def get_transformation_matrix(transform):
"""Converts [tx, ty, tz, rx, ry, rz] to a transform matrix."""
rx = transform[3]
ry = transform[4]
rz = transform[5]
rz = tf.clip_by_value(rz, -np.pi, np.pi)
ry = tf.clip_by_value(ry, -np.pi, np.pi)
rx = tf.clip_by_value(rx, -np.pi, np.pi)
cos_rx = tf.cos(rx)
sin_rx = tf.sin(rx)
rotx_1 = tf.stack([1.0, 0.0, 0.0])
rotx_2 = tf.stack([0.0, cos_rx, -sin_rx])
rotx_3 = tf.stack([0.0, sin_rx, cos_rx])
xmat = tf.stack([rotx_1, rotx_2, rotx_3])
cos_ry = tf.cos(ry)
sin_ry = tf.sin(ry)
roty_1 = tf.stack([cos_ry, 0.0, sin_ry])
roty_2 = tf.stack([0.0, 1.0, 0.0])
roty_3 = tf.stack([-sin_ry, 0.0, cos_ry])
ymat = tf.stack([roty_1, roty_2, roty_3])
cos_rz = tf.cos(rz)
sin_rz = tf.sin(rz)
rotz_1 = tf.stack([cos_rz, -sin_rz, 0.0])
rotz_2 = tf.stack([sin_rz, cos_rz, 0.0])
rotz_3 = tf.stack([0.0, 0.0, 1.0])
zmat = tf.stack([rotz_1, rotz_2, rotz_3])
rotate = tf.matmul(tf.matmul(xmat, ymat), zmat)
translate = transform[:3]
mat = tf.concat([rotate, tf.expand_dims(translate, 1)], axis=1)
hom_filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 4], dtype=tf.float32)
mat = tf.concat([mat, hom_filler], axis=0)
return mat
def np_get_transformation_matrix(transform):
"""Converts [tx, ty, tz, rx, ry, rz] to a transform matrix."""
rx = transform[3]
ry = transform[4]
rz = transform[5]
rz = np.clip(rz, -np.pi, np.pi)
ry = np.clip(ry, -np.pi, np.pi)
rx = np.clip(rx, -np.pi, np.pi)
cos_rx = np.cos(rx)
sin_rx = np.sin(rx)
rotx_1 = np.stack([1.0, 0.0, 0.0])
rotx_2 = np.stack([0.0, cos_rx, -sin_rx])
rotx_3 = np.stack([0.0, sin_rx, cos_rx])
xmat = np.stack([rotx_1, rotx_2, rotx_3])
cos_ry = np.cos(ry)
sin_ry = np.sin(ry)
roty_1 = np.stack([cos_ry, 0.0, sin_ry])
roty_2 = np.stack([0.0, 1.0, 0.0])
roty_3 = np.stack([-sin_ry, 0.0, cos_ry])
ymat = np.stack([roty_1, roty_2, roty_3])
cos_rz = np.cos(rz)
sin_rz = np.sin(rz)
rotz_1 = np.stack([cos_rz, -sin_rz, 0.0])
rotz_2 = np.stack([sin_rz, cos_rz, 0.0])
rotz_3 = np.stack([0.0, 0.0, 1.0])
zmat = np.stack([rotz_1, rotz_2, rotz_3])
rotate = np.dot(np.dot(xmat, ymat), zmat)
translate = transform[:3]
mat = np.concatenate((rotate, np.expand_dims(translate, 1)), axis=1)
hom_filler = np.array([[0.0, 0.0, 0.0, 1.0]], dtype=np.float32)
mat = np.concatenate((mat, hom_filler), axis=0)
return mat
def transform_cloud_xyz(cloud, transform):
num_points = cloud.shape.as_list()[0]
ones = tf.ones(shape=[num_points, 1], dtype=tf.float32)
hom_cloud = tf.concat([cloud, ones], axis=1)
hom_cloud_t = tf.transpose(hom_cloud)
mat = get_transformation_matrix(transform)
transformed_cloud = tf.matmul(mat, hom_cloud_t)
transformed_cloud = tf.transpose(transformed_cloud)
transformed_cloud = transformed_cloud[:, :3]
return transformed_cloud
def np_transform_cloud_xyz(cloud, transform):
num_points = cloud.shape[0]
ones = np.ones(shape=[num_points, 1], dtype=np.float32)
hom_cloud = np.concatenate((cloud, ones), axis=1)
hom_cloud_t = np.transpose(hom_cloud)
mat = np_get_transformation_matrix(transform)
transformed_cloud = np.dot(mat, hom_cloud_t)
transformed_cloud = np.transpose(transformed_cloud)
transformed_cloud = transformed_cloud[:, :3]
return transformed_cloud
def batch_transform_cloud_xyz(cloud, transform):
results = []
cloud_items = tf.unstack(cloud)
if len(transform.shape.as_list()) == 2:
transform_items = tf.unstack(transform)
else:
transform_items = [transform] * len(cloud_items)
for cloud_item, transform_item in zip(cloud_items, transform_items):
results.append(transform_cloud_xyz(cloud_item, transform_item))
return tf.stack(results)
| 4,710 | 31.489655 | 80 | py |
models | models-master/research/vid2depth/ops/icp_op.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loads icp op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import tensorflow as tf
try:
icp_op_module = tf.load_op_library('./ops/icp_op.so')
icp = icp_op_module.icp
except Exception: # pylint: disable=broad-except
logging.error('Could not load object file for ICP op.')
icp = None
| 1,078 | 33.806452 | 80 | py |
models | models-master/research/vid2depth/ops/__init__.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| 688 | 44.933333 | 80 | py |
models | models-master/research/vid2depth/ops/icp_grad_test.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for icp grad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import icp_grad # pylint: disable=unused-import
import icp_test
import tensorflow as tf
from tensorflow.python.ops import gradient_checker
class IcpOpGradTest(icp_test.IcpOpTestBase):
def test_grad_transform(self):
with self.test_session():
cloud_source = self.small_cloud
cloud_target = cloud_source + [0.05, 0, 0]
ego_motion = self.identity_transform
transform, unused_residual = self._run_icp(cloud_source, ego_motion,
cloud_target)
err = gradient_checker.compute_gradient_error(ego_motion,
ego_motion.shape.as_list(),
transform,
transform.shape.as_list())
# Since our gradient is an approximation, it doesn't pass a numerical check.
# Nonetheless, this test verifies that icp_grad computes a gradient.
self.assertGreater(err, 1e-3)
def test_grad_transform_same_ego_motion(self):
with self.test_session():
cloud_source = self.small_cloud
cloud_target = cloud_source + [0.1, 0, 0]
ego_motion = tf.constant([[0.1, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype=tf.float32)
transform, unused_residual = self._run_icp(cloud_source, ego_motion,
cloud_target)
err = gradient_checker.compute_gradient_error(ego_motion,
ego_motion.shape.as_list(),
transform,
transform.shape.as_list())
# Since our gradient is an approximation, it doesn't pass a numerical check.
# Nonetheless, this test verifies that icp_grad computes a gradient.
self.assertGreater(err, 1e-3)
def test_grad_residual(self):
with self.test_session():
cloud_source = self.small_cloud
cloud_target = cloud_source + [0.05, 0, 0]
ego_motion = self.identity_transform
unused_transform, residual = self._run_icp(cloud_source, ego_motion,
cloud_target)
err = gradient_checker.compute_gradient_error(
cloud_source, cloud_source.shape.as_list(), residual,
residual.shape.as_list())
# Since our gradient is an approximation, it doesn't pass a numerical check.
# Nonetheless, this test verifies that icp_grad computes a gradient.
self.assertGreater(err, 1e-3)
if __name__ == '__main__':
tf.test.main()
| 3,429 | 42.974359 | 80 | py |
models | models-master/research/pcl_rl/objective.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Objectives to compute loss and value targets.
Implements Actor Critic, PCL (vanilla PCL, Unified PCL, Trust PCL), and TRPO.
"""
import tensorflow as tf
import numpy as np
class Objective(object):
def __init__(self, learning_rate, clip_norm):
self.learning_rate = learning_rate
self.clip_norm = clip_norm
def get_optimizer(self, learning_rate):
"""Optimizer for gradient descent ops."""
return tf.train.AdamOptimizer(learning_rate=learning_rate,
epsilon=2e-4)
def training_ops(self, loss, learning_rate=None):
"""Gradient ops."""
opt = self.get_optimizer(learning_rate)
params = tf.trainable_variables()
grads = tf.gradients(loss, params)
if self.clip_norm:
grads, global_norm = tf.clip_by_global_norm(grads, self.clip_norm)
tf.summary.scalar('grad_global_norm', global_norm)
return opt.apply_gradients(zip(grads, params))
def get(self, rewards, pads, values, final_values,
log_probs, prev_log_probs, target_log_probs,
entropies, logits,
target_values, final_target_values):
"""Get objective calculations."""
raise NotImplementedError()
def discounted_future_sum(values, discount, rollout):
"""Discounted future sum of time-major values."""
discount_filter = tf.reshape(
discount ** tf.range(float(rollout)), [-1, 1, 1])
expanded_values = tf.concat(
[values, tf.zeros([rollout - 1, tf.shape(values)[1]])], 0)
conv_values = tf.transpose(tf.squeeze(tf.nn.conv1d(
tf.expand_dims(tf.transpose(expanded_values), -1), discount_filter,
stride=1, padding='VALID'), -1))
return conv_values
def discounted_two_sided_sum(values, discount, rollout):
"""Discounted two-sided sum of time-major values."""
roll = float(rollout)
discount_filter = tf.reshape(
discount ** tf.abs(tf.range(-roll + 1, roll)), [-1, 1, 1])
expanded_values = tf.concat(
[tf.zeros([rollout - 1, tf.shape(values)[1]]), values,
tf.zeros([rollout - 1, tf.shape(values)[1]])], 0)
conv_values = tf.transpose(tf.squeeze(tf.nn.conv1d(
tf.expand_dims(tf.transpose(expanded_values), -1), discount_filter,
stride=1, padding='VALID'), -1))
return conv_values
def shift_values(values, discount, rollout, final_values=0.0):
"""Shift values up by some amount of time.
Those values that shift from a value beyond the last value
are calculated using final_values.
"""
roll_range = tf.cumsum(tf.ones_like(values[:rollout, :]), 0,
exclusive=True, reverse=True)
final_pad = tf.expand_dims(final_values, 0) * discount ** roll_range
return tf.concat([discount ** rollout * values[rollout:, :],
final_pad], 0)
class ActorCritic(Objective):
"""Standard Actor-Critic."""
def __init__(self, learning_rate, clip_norm=5,
policy_weight=1.0, critic_weight=0.1,
tau=0.1, gamma=1.0, rollout=10,
eps_lambda=0.0, clip_adv=None,
use_target_values=False):
super(ActorCritic, self).__init__(learning_rate, clip_norm=clip_norm)
self.policy_weight = policy_weight
self.critic_weight = critic_weight
self.tau = tau
self.gamma = gamma
self.rollout = rollout
self.clip_adv = clip_adv
self.eps_lambda = tf.get_variable( # TODO: need a better way
'eps_lambda', [], initializer=tf.constant_initializer(eps_lambda),
trainable=False)
self.new_eps_lambda = tf.placeholder(tf.float32, [])
self.assign_eps_lambda = self.eps_lambda.assign(
0.99 * self.eps_lambda + 0.01 * self.new_eps_lambda)
self.use_target_values = use_target_values
def get(self, rewards, pads, values, final_values,
log_probs, prev_log_probs, target_log_probs,
entropies, logits,
target_values, final_target_values):
not_pad = 1 - pads
batch_size = tf.shape(rewards)[1]
entropy = not_pad * sum(entropies)
rewards = not_pad * rewards
value_estimates = not_pad * values
log_probs = not_pad * sum(log_probs)
target_values = not_pad * tf.stop_gradient(target_values)
final_target_values = tf.stop_gradient(final_target_values)
sum_rewards = discounted_future_sum(rewards, self.gamma, self.rollout)
if self.use_target_values:
last_values = shift_values(
target_values, self.gamma, self.rollout,
final_target_values)
else:
last_values = shift_values(value_estimates, self.gamma, self.rollout,
final_values)
future_values = sum_rewards + last_values
baseline_values = value_estimates
adv = tf.stop_gradient(-baseline_values + future_values)
if self.clip_adv:
adv = tf.minimum(self.clip_adv, tf.maximum(-self.clip_adv, adv))
policy_loss = -adv * log_probs
critic_loss = -adv * baseline_values
regularizer = -self.tau * entropy
policy_loss = tf.reduce_mean(
tf.reduce_sum(policy_loss * not_pad, 0))
critic_loss = tf.reduce_mean(
tf.reduce_sum(critic_loss * not_pad, 0))
regularizer = tf.reduce_mean(
tf.reduce_sum(regularizer * not_pad, 0))
# loss for gradient calculation
loss = (self.policy_weight * policy_loss +
self.critic_weight * critic_loss + regularizer)
raw_loss = tf.reduce_mean( # TODO
tf.reduce_sum(not_pad * policy_loss, 0))
gradient_ops = self.training_ops(
loss, learning_rate=self.learning_rate)
tf.summary.histogram('log_probs', tf.reduce_sum(log_probs, 0))
tf.summary.histogram('rewards', tf.reduce_sum(rewards, 0))
tf.summary.scalar('avg_rewards',
tf.reduce_mean(tf.reduce_sum(rewards, 0)))
tf.summary.scalar('policy_loss',
tf.reduce_mean(tf.reduce_sum(not_pad * policy_loss)))
tf.summary.scalar('critic_loss',
tf.reduce_mean(tf.reduce_sum(not_pad * policy_loss)))
tf.summary.scalar('loss', loss)
tf.summary.scalar('raw_loss', raw_loss)
return (loss, raw_loss, future_values,
gradient_ops, tf.summary.merge_all())
class PCL(ActorCritic):
"""PCL implementation.
Implements vanilla PCL, Unified PCL, and Trust PCL depending
on provided inputs.
"""
def get(self, rewards, pads, values, final_values,
log_probs, prev_log_probs, target_log_probs,
entropies, logits,
target_values, final_target_values):
not_pad = 1 - pads
batch_size = tf.shape(rewards)[1]
rewards = not_pad * rewards
value_estimates = not_pad * values
log_probs = not_pad * sum(log_probs)
target_log_probs = not_pad * tf.stop_gradient(sum(target_log_probs))
relative_log_probs = not_pad * (log_probs - target_log_probs)
target_values = not_pad * tf.stop_gradient(target_values)
final_target_values = tf.stop_gradient(final_target_values)
# Prepend.
not_pad = tf.concat([tf.ones([self.rollout - 1, batch_size]),
not_pad], 0)
rewards = tf.concat([tf.zeros([self.rollout - 1, batch_size]),
rewards], 0)
value_estimates = tf.concat(
[self.gamma ** tf.expand_dims(
tf.range(float(self.rollout - 1), 0, -1), 1) *
tf.ones([self.rollout - 1, batch_size]) *
value_estimates[0:1, :],
value_estimates], 0)
log_probs = tf.concat([tf.zeros([self.rollout - 1, batch_size]),
log_probs], 0)
prev_log_probs = tf.concat([tf.zeros([self.rollout - 1, batch_size]),
prev_log_probs], 0)
relative_log_probs = tf.concat([tf.zeros([self.rollout - 1, batch_size]),
relative_log_probs], 0)
target_values = tf.concat(
[self.gamma ** tf.expand_dims(
tf.range(float(self.rollout - 1), 0, -1), 1) *
tf.ones([self.rollout - 1, batch_size]) *
target_values[0:1, :],
target_values], 0)
sum_rewards = discounted_future_sum(rewards, self.gamma, self.rollout)
sum_log_probs = discounted_future_sum(log_probs, self.gamma, self.rollout)
sum_prev_log_probs = discounted_future_sum(prev_log_probs, self.gamma, self.rollout)
sum_relative_log_probs = discounted_future_sum(
relative_log_probs, self.gamma, self.rollout)
if self.use_target_values:
last_values = shift_values(
target_values, self.gamma, self.rollout,
final_target_values)
else:
last_values = shift_values(value_estimates, self.gamma, self.rollout,
final_values)
future_values = (
- self.tau * sum_log_probs
- self.eps_lambda * sum_relative_log_probs
+ sum_rewards + last_values)
baseline_values = value_estimates
adv = tf.stop_gradient(-baseline_values + future_values)
if self.clip_adv:
adv = tf.minimum(self.clip_adv, tf.maximum(-self.clip_adv, adv))
policy_loss = -adv * sum_log_probs
critic_loss = -adv * (baseline_values - last_values)
policy_loss = tf.reduce_mean(
tf.reduce_sum(policy_loss * not_pad, 0))
critic_loss = tf.reduce_mean(
tf.reduce_sum(critic_loss * not_pad, 0))
# loss for gradient calculation
loss = (self.policy_weight * policy_loss +
self.critic_weight * critic_loss)
# actual quantity we're trying to minimize
raw_loss = tf.reduce_mean(
tf.reduce_sum(not_pad * adv * (-baseline_values + future_values), 0))
gradient_ops = self.training_ops(
loss, learning_rate=self.learning_rate)
tf.summary.histogram('log_probs', tf.reduce_sum(log_probs, 0))
tf.summary.histogram('rewards', tf.reduce_sum(rewards, 0))
tf.summary.histogram('future_values', future_values)
tf.summary.histogram('baseline_values', baseline_values)
tf.summary.histogram('advantages', adv)
tf.summary.scalar('avg_rewards',
tf.reduce_mean(tf.reduce_sum(rewards, 0)))
tf.summary.scalar('policy_loss',
tf.reduce_mean(tf.reduce_sum(not_pad * policy_loss)))
tf.summary.scalar('critic_loss',
tf.reduce_mean(tf.reduce_sum(not_pad * policy_loss)))
tf.summary.scalar('loss', loss)
tf.summary.scalar('raw_loss', tf.reduce_mean(raw_loss))
tf.summary.scalar('eps_lambda', self.eps_lambda)
return (loss, raw_loss,
future_values[self.rollout - 1:, :],
gradient_ops, tf.summary.merge_all())
class TRPO(ActorCritic):
"""TRPO."""
def get(self, rewards, pads, values, final_values,
log_probs, prev_log_probs, target_log_probs,
entropies, logits,
target_values, final_target_values):
not_pad = 1 - pads
batch_size = tf.shape(rewards)[1]
rewards = not_pad * rewards
value_estimates = not_pad * values
log_probs = not_pad * sum(log_probs)
prev_log_probs = not_pad * prev_log_probs
target_values = not_pad * tf.stop_gradient(target_values)
final_target_values = tf.stop_gradient(final_target_values)
sum_rewards = discounted_future_sum(rewards, self.gamma, self.rollout)
if self.use_target_values:
last_values = shift_values(
target_values, self.gamma, self.rollout,
final_target_values)
else:
last_values = shift_values(value_estimates, self.gamma, self.rollout,
final_values)
future_values = sum_rewards + last_values
baseline_values = value_estimates
adv = tf.stop_gradient(-baseline_values + future_values)
if self.clip_adv:
adv = tf.minimum(self.clip_adv, tf.maximum(-self.clip_adv, adv))
policy_loss = -adv * tf.exp(log_probs - prev_log_probs)
critic_loss = -adv * baseline_values
policy_loss = tf.reduce_mean(
tf.reduce_sum(policy_loss * not_pad, 0))
critic_loss = tf.reduce_mean(
tf.reduce_sum(critic_loss * not_pad, 0))
raw_loss = policy_loss
# loss for gradient calculation
if self.policy_weight == 0:
policy_loss = 0.0
elif self.critic_weight == 0:
critic_loss = 0.0
loss = (self.policy_weight * policy_loss +
self.critic_weight * critic_loss)
gradient_ops = self.training_ops(
loss, learning_rate=self.learning_rate)
tf.summary.histogram('log_probs', tf.reduce_sum(log_probs, 0))
tf.summary.histogram('rewards', tf.reduce_sum(rewards, 0))
tf.summary.scalar('avg_rewards',
tf.reduce_mean(tf.reduce_sum(rewards, 0)))
tf.summary.scalar('policy_loss',
tf.reduce_mean(tf.reduce_sum(not_pad * policy_loss)))
tf.summary.scalar('critic_loss',
tf.reduce_mean(tf.reduce_sum(not_pad * policy_loss)))
tf.summary.scalar('loss', loss)
tf.summary.scalar('raw_loss', raw_loss)
return (loss, raw_loss, future_values,
gradient_ops, tf.summary.merge_all())
| 13,589 | 36.232877 | 88 | py |
models | models-master/research/pcl_rl/expert_paths.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Expert paths/trajectories.
For producing or loading expert trajectories in environment.
"""
import tensorflow as tf
import random
import os
import numpy as np
from six.moves import xrange
import pickle
gfile = tf.gfile
def sample_expert_paths(num, env_str, env_spec,
load_trajectories_file=None):
"""Sample a number of expert paths randomly."""
if load_trajectories_file is not None:
if not gfile.Exists(load_trajectories_file):
assert False, 'trajectories file %s does not exist' % load_trajectories_file
with gfile.GFile(load_trajectories_file, 'r') as f:
episodes = pickle.load(f)
episodes = random.sample(episodes, num)
return [ep[1:] for ep in episodes]
return [sample_expert_path(env_str, env_spec)
for _ in xrange(num)]
def sample_expert_path(env_str, env_spec):
"""Algorithmic tasks have known distribution of expert paths we sample from."""
t = random.randint(2, 10) # sequence length
observations = []
actions = [env_spec.initial_act(None)]
rewards = []
if env_str in ['DuplicatedInput-v0', 'Copy-v0']:
chars = 5
random_ints = [int(random.random() * 1000) for _ in xrange(t)]
for tt in xrange(t):
char_idx = tt // 2 if env_str == 'DuplicatedInput-v0' else tt
char = random_ints[char_idx] % chars
observations.append([char])
actions.append([1, (tt + 1) % 2, char])
rewards.append((tt + 1) % 2)
elif env_str in ['RepeatCopy-v0']:
chars = 5
random_ints = [int(random.random() * 1000) for _ in xrange(t)]
for tt in xrange(3 * t + 2):
char_idx = (tt if tt < t else
2 * t - tt if tt <= 2 * t else
tt - 2 * t - 2)
if tt in [t, 2 * t + 1]:
char = chars
else:
char = random_ints[char_idx] % chars
observations.append([char])
actions.append([1 if tt < t else 0 if tt <= 2 * t else 1,
tt not in [t, 2 * t + 1], char])
rewards.append(actions[-1][-2])
elif env_str in ['Reverse-v0']:
chars = 2
random_ints = [int(random.random() * 1000) for _ in xrange(t)]
for tt in xrange(2 * t + 1):
char_idx = tt if tt < t else 2 * t - tt
if tt != t:
char = random_ints[char_idx] % chars
else:
char = chars
observations.append([char])
actions.append([tt < t, tt > t, char])
rewards.append(tt > t)
elif env_str in ['ReversedAddition-v0']:
chars = 3
random_ints = [int(random.random() * 1000) for _ in xrange(1 + 2 * t)]
carry = 0
char_history = []
move_map = {0: 3, 1: 1, 2: 2, 3: 1}
for tt in xrange(2 * t + 1):
char_idx = tt
if tt >= 2 * t:
char = chars
else:
char = random_ints[char_idx] % chars
char_history.append(char)
if tt % 2 == 1:
tot = char_history[-2] + char_history[-1] + carry
carry = tot // chars
tot = tot % chars
elif tt == 2 * t:
tot = carry
else:
tot = 0
observations.append([char])
actions.append([move_map[tt % len(move_map)],
tt % 2 or tt == 2 * t, tot])
rewards.append(tt % 2 or tt == 2 * t)
elif env_str in ['ReversedAddition3-v0']:
chars = 3
random_ints = [int(random.random() * 1000) for _ in xrange(1 + 3 * t)]
carry = 0
char_history = []
move_map = {0: 3, 1: 3, 2: 1, 3: 2, 4:2, 5: 1}
for tt in xrange(3 * t + 1):
char_idx = tt
if tt >= 3 * t:
char = chars
else:
char = random_ints[char_idx] % chars
char_history.append(char)
if tt % 3 == 2:
tot = char_history[-3] + char_history[-2] + char_history[-1] + carry
carry = tot // chars
tot = tot % chars
elif tt == 3 * t:
tot = carry
else:
tot = 0
observations.append([char])
actions.append([move_map[tt % len(move_map)],
tt % 3 == 2 or tt == 3 * t, tot])
rewards.append(tt % 3 == 2 or tt == 3 * t)
else:
assert False, 'No expert trajectories for env %s' % env_str
actions = [
env_spec.convert_env_actions_to_actions(act)
for act in actions]
observations.append([chars])
observations = [np.array(obs) for obs in zip(*observations)]
actions = [np.array(act) for act in zip(*actions)]
rewards = np.array(rewards)
return [observations, actions, rewards, True]
| 5,095 | 32.090909 | 82 | py |
models | models-master/research/pcl_rl/full_episode_objective.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Objectives for full-episode.
Implementations of UREX & REINFORCE. Note that these implementations
use a non-parametric baseline to reduce variance. Thus, multiple
samples with the same seed must be taken from the environment.
"""
import tensorflow as tf
import objective
class Reinforce(objective.Objective):
def __init__(self, learning_rate, clip_norm, num_samples,
tau=0.1, bonus_weight=1.0):
super(Reinforce, self).__init__(learning_rate, clip_norm=clip_norm)
self.num_samples = num_samples
assert self.num_samples > 1
self.tau = tau
self.bonus_weight = bonus_weight
self.eps_lambda = 0.0
def get_bonus(self, total_rewards, total_log_probs):
"""Exploration bonus."""
return -self.tau * total_log_probs
def get(self, rewards, pads, values, final_values,
log_probs, prev_log_probs, target_log_probs,
entropies, logits,
target_values, final_target_values):
seq_length = tf.shape(rewards)[0]
not_pad = tf.reshape(1 - pads, [seq_length, -1, self.num_samples])
rewards = not_pad * tf.reshape(rewards, [seq_length, -1, self.num_samples])
log_probs = not_pad * tf.reshape(sum(log_probs), [seq_length, -1, self.num_samples])
total_rewards = tf.reduce_sum(rewards, 0)
total_log_probs = tf.reduce_sum(log_probs, 0)
rewards_and_bonus = (total_rewards +
self.bonus_weight *
self.get_bonus(total_rewards, total_log_probs))
baseline = tf.reduce_mean(rewards_and_bonus, 1, keep_dims=True)
loss = -tf.stop_gradient(rewards_and_bonus - baseline) * total_log_probs
loss = tf.reduce_mean(loss)
raw_loss = loss # TODO
gradient_ops = self.training_ops(
loss, learning_rate=self.learning_rate)
tf.summary.histogram('log_probs', total_log_probs)
tf.summary.histogram('rewards', total_rewards)
tf.summary.scalar('avg_rewards',
tf.reduce_mean(total_rewards))
tf.summary.scalar('loss', loss)
return loss, raw_loss, baseline, gradient_ops, tf.summary.merge_all()
class UREX(Reinforce):
def get_bonus(self, total_rewards, total_log_probs):
"""Exploration bonus."""
discrepancy = total_rewards / self.tau - total_log_probs
normalized_d = self.num_samples * tf.nn.softmax(discrepancy)
return self.tau * normalized_d
| 3,050 | 35.321429 | 88 | py |
models | models-master/research/pcl_rl/optimizers.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizers mostly for value estimate.
Gradient Descent optimizer
LBFGS optimizer
Best Fit optimizer
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import tensorflow as tf
import numpy as np
import scipy.optimize
def var_size(v):
return int(np.prod([int(d) for d in v.shape]))
def gradients(loss, var_list):
grads = tf.gradients(loss, var_list)
return [g if g is not None else tf.zeros(v.shape)
for g, v in zip(grads, var_list)]
def flatgrad(loss, var_list):
grads = gradients(loss, var_list)
return tf.concat([tf.reshape(grad, [-1])
for (v, grad) in zip(var_list, grads)
if grad is not None], 0)
def get_flat(var_list):
return tf.concat([tf.reshape(v, [-1]) for v in var_list], 0)
def set_from_flat(var_list, flat_theta):
assigns = []
shapes = [v.shape for v in var_list]
sizes = [var_size(v) for v in var_list]
start = 0
assigns = []
for (shape, size, v) in zip(shapes, sizes, var_list):
assigns.append(v.assign(
tf.reshape(flat_theta[start:start + size], shape)))
start += size
assert start == sum(sizes)
return tf.group(*assigns)
class LbfgsOptimization(object):
def __init__(self, max_iter=25, mix_frac=1.0):
self.max_iter = max_iter
self.mix_frac = mix_frac
def setup_placeholders(self):
self.flat_theta = tf.placeholder(tf.float32, [None], 'flat_theta')
self.intended_values = tf.placeholder(tf.float32, [None], 'intended_values')
def setup(self, var_list, values, targets, pads,
inputs, regression_weight):
self.setup_placeholders()
self.values = values
self.targets = targets
self.raw_loss = (tf.reduce_sum((1 - pads) * tf.square(values - self.intended_values))
/ tf.reduce_sum(1 - pads))
self.loss_flat_gradient = flatgrad(self.raw_loss, var_list)
self.flat_vars = get_flat(var_list)
self.set_vars = set_from_flat(var_list, self.flat_theta)
def optimize(self, sess, feed_dict):
old_theta = sess.run(self.flat_vars)
old_values, targets = sess.run([self.values, self.targets], feed_dict=feed_dict)
intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)
feed_dict = dict(feed_dict)
feed_dict[self.intended_values] = intended_values
def calc_loss_and_grad(theta):
sess.run(self.set_vars, feed_dict={self.flat_theta: theta})
loss, grad = sess.run([self.raw_loss, self.loss_flat_gradient],
feed_dict=feed_dict)
grad = grad.astype('float64')
return loss, grad
theta, _, _ = scipy.optimize.fmin_l_bfgs_b(
calc_loss_and_grad, old_theta, maxiter=self.max_iter)
sess.run(self.set_vars, feed_dict={self.flat_theta: theta})
class GradOptimization(object):
def __init__(self, learning_rate=0.001, max_iter=25, mix_frac=1.0):
self.learning_rate = learning_rate
self.max_iter = max_iter
self.mix_frac = mix_frac
def get_optimizer(self):
return tf.train.AdamOptimizer(learning_rate=self.learning_rate,
epsilon=2e-4)
def setup_placeholders(self):
self.flat_theta = tf.placeholder(tf.float32, [None], 'flat_theta')
self.intended_values = tf.placeholder(tf.float32, [None], 'intended_values')
def setup(self, var_list, values, targets, pads,
inputs, regression_weight):
self.setup_placeholders()
self.values = values
self.targets = targets
self.raw_loss = (tf.reduce_sum((1 - pads) * tf.square(values - self.intended_values))
/ tf.reduce_sum(1 - pads))
opt = self.get_optimizer()
params = var_list
grads = tf.gradients(self.raw_loss, params)
self.gradient_ops = opt.apply_gradients(zip(grads, params))
def optimize(self, sess, feed_dict):
old_values, targets = sess.run([self.values, self.targets], feed_dict=feed_dict)
intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)
feed_dict = dict(feed_dict)
feed_dict[self.intended_values] = intended_values
for _ in xrange(self.max_iter):
sess.run(self.gradient_ops, feed_dict=feed_dict)
class BestFitOptimization(object):
def __init__(self, mix_frac=1.0):
self.mix_frac = mix_frac
def setup_placeholders(self):
self.new_regression_weight = tf.placeholder(
tf.float32, self.regression_weight.shape)
def setup(self, var_list, values, targets, pads,
inputs, regression_weight):
self.values = values
self.targets = targets
self.inputs = inputs
self.regression_weight = regression_weight
self.setup_placeholders()
self.update_regression_weight = tf.assign(
self.regression_weight, self.new_regression_weight)
def optimize(self, sess, feed_dict):
reg_input, reg_weight, old_values, targets = sess.run(
[self.inputs, self.regression_weight, self.values, self.targets],
feed_dict=feed_dict)
intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)
# taken from rllab
reg_coeff = 1e-5
for _ in range(5):
best_fit_weight = np.linalg.lstsq(
reg_input.T.dot(reg_input) +
reg_coeff * np.identity(reg_input.shape[1]),
reg_input.T.dot(intended_values))[0]
if not np.any(np.isnan(best_fit_weight)):
break
reg_coeff *= 10
if len(best_fit_weight.shape) == 1:
best_fit_weight = np.expand_dims(best_fit_weight, -1)
sess.run(self.update_regression_weight,
feed_dict={self.new_regression_weight: best_fit_weight})
| 6,346 | 31.055556 | 89 | py |
models | models-master/research/pcl_rl/model.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model is responsible for setting up Tensorflow graph.
Creates policy and value networks. Also sets up all optimization
ops, including gradient ops, trust region ops, and value optimizers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Model(object):
def __init__(self, env_spec, global_step,
target_network_lag=0.95,
sample_from='online',
get_policy=None,
get_baseline=None,
get_objective=None,
get_trust_region_p_opt=None,
get_value_opt=None):
self.env_spec = env_spec
self.global_step = global_step
self.inc_global_step = self.global_step.assign_add(1)
self.target_network_lag = target_network_lag
self.sample_from = sample_from
self.policy = get_policy()
self.baseline = get_baseline()
self.objective = get_objective()
self.baseline.eps_lambda = self.objective.eps_lambda # TODO: do this better
self.trust_region_policy_opt = get_trust_region_p_opt()
self.value_opt = get_value_opt()
def setup_placeholders(self):
"""Create the Tensorflow placeholders."""
# summary placeholder
self.avg_episode_reward = tf.placeholder(
tf.float32, [], 'avg_episode_reward')
self.greedy_episode_reward = tf.placeholder(
tf.float32, [], 'greedy_episode_reward')
# sampling placeholders
self.internal_state = tf.placeholder(tf.float32,
[None, self.policy.rnn_state_dim],
'internal_state')
self.single_observation = []
for i, (obs_dim, obs_type) in enumerate(self.env_spec.obs_dims_and_types):
if self.env_spec.is_discrete(obs_type):
self.single_observation.append(
tf.placeholder(tf.int32, [None], 'obs%d' % i))
elif self.env_spec.is_box(obs_type):
self.single_observation.append(
tf.placeholder(tf.float32, [None, obs_dim], 'obs%d' % i))
else:
assert False
self.single_action = []
for i, (action_dim, action_type) in \
enumerate(self.env_spec.act_dims_and_types):
if self.env_spec.is_discrete(action_type):
self.single_action.append(
tf.placeholder(tf.int32, [None], 'act%d' % i))
elif self.env_spec.is_box(action_type):
self.single_action.append(
tf.placeholder(tf.float32, [None, action_dim], 'act%d' % i))
else:
assert False
# training placeholders
self.observations = []
for i, (obs_dim, obs_type) in enumerate(self.env_spec.obs_dims_and_types):
if self.env_spec.is_discrete(obs_type):
self.observations.append(
tf.placeholder(tf.int32, [None, None], 'all_obs%d' % i))
else:
self.observations.append(
tf.placeholder(tf.float32, [None, None, obs_dim], 'all_obs%d' % i))
self.actions = []
self.other_logits = []
for i, (action_dim, action_type) in \
enumerate(self.env_spec.act_dims_and_types):
if self.env_spec.is_discrete(action_type):
self.actions.append(
tf.placeholder(tf.int32, [None, None], 'all_act%d' % i))
if self.env_spec.is_box(action_type):
self.actions.append(
tf.placeholder(tf.float32, [None, None, action_dim],
'all_act%d' % i))
self.other_logits.append(
tf.placeholder(tf.float32, [None, None, None],
'other_logits%d' % i))
self.rewards = tf.placeholder(tf.float32, [None, None], 'rewards')
self.terminated = tf.placeholder(tf.float32, [None], 'terminated')
self.pads = tf.placeholder(tf.float32, [None, None], 'pads')
self.prev_log_probs = tf.placeholder(tf.float32, [None, None],
'prev_log_probs')
def setup(self, train=True):
"""Setup Tensorflow Graph."""
self.setup_placeholders()
tf.summary.scalar('avg_episode_reward', self.avg_episode_reward)
tf.summary.scalar('greedy_episode_reward', self.greedy_episode_reward)
with tf.variable_scope('model', reuse=None):
# policy network
with tf.variable_scope('policy_net'):
(self.policy_internal_states, self.logits, self.log_probs,
self.entropies, self.self_kls) = \
self.policy.multi_step(self.observations,
self.internal_state,
self.actions)
self.out_log_probs = sum(self.log_probs)
self.kl = self.policy.calculate_kl(self.other_logits, self.logits)
self.avg_kl = (tf.reduce_sum(sum(self.kl)[:-1] * (1 - self.pads)) /
tf.reduce_sum(1 - self.pads))
# value network
with tf.variable_scope('value_net'):
(self.values,
self.regression_input,
self.regression_weight) = self.baseline.get_values(
self.observations, self.actions,
self.policy_internal_states, self.logits)
# target policy network
with tf.variable_scope('target_policy_net'):
(self.target_policy_internal_states,
self.target_logits, self.target_log_probs,
_, _) = \
self.policy.multi_step(self.observations,
self.internal_state,
self.actions)
# target value network
with tf.variable_scope('target_value_net'):
(self.target_values, _, _) = self.baseline.get_values(
self.observations, self.actions,
self.target_policy_internal_states, self.target_logits)
# construct copy op online --> target
all_vars = tf.trainable_variables()
online_vars = [p for p in all_vars if
'/policy_net' in p.name or '/value_net' in p.name]
target_vars = [p for p in all_vars if
'target_policy_net' in p.name or 'target_value_net' in p.name]
online_vars.sort(key=lambda p: p.name)
target_vars.sort(key=lambda p: p.name)
aa = self.target_network_lag
self.copy_op = tf.group(*[
target_p.assign(aa * target_p + (1 - aa) * online_p)
for online_p, target_p in zip(online_vars, target_vars)])
if train:
# evaluate objective
(self.loss, self.raw_loss, self.regression_target,
self.gradient_ops, self.summary) = self.objective.get(
self.rewards, self.pads,
self.values[:-1, :],
self.values[-1, :] * (1 - self.terminated),
self.log_probs, self.prev_log_probs, self.target_log_probs,
self.entropies, self.logits, self.target_values[:-1, :],
self.target_values[-1, :] * (1 - self.terminated))
self.regression_target = tf.reshape(self.regression_target, [-1])
self.policy_vars = [
v for v in tf.trainable_variables()
if '/policy_net' in v.name]
self.value_vars = [
v for v in tf.trainable_variables()
if '/value_net' in v.name]
# trust region optimizer
if self.trust_region_policy_opt is not None:
with tf.variable_scope('trust_region_policy', reuse=None):
avg_self_kl = (
tf.reduce_sum(sum(self.self_kls) * (1 - self.pads)) /
tf.reduce_sum(1 - self.pads))
self.trust_region_policy_opt.setup(
self.policy_vars, self.raw_loss, avg_self_kl,
self.avg_kl)
# value optimizer
if self.value_opt is not None:
with tf.variable_scope('trust_region_value', reuse=None):
self.value_opt.setup(
self.value_vars,
tf.reshape(self.values[:-1, :], [-1]),
self.regression_target,
tf.reshape(self.pads, [-1]),
self.regression_input, self.regression_weight)
# we re-use variables for the sampling operations
with tf.variable_scope('model', reuse=True):
scope = ('target_policy_net' if self.sample_from == 'target'
else 'policy_net')
with tf.variable_scope(scope):
self.next_internal_state, self.sampled_actions = \
self.policy.sample_step(self.single_observation,
self.internal_state,
self.single_action)
self.greedy_next_internal_state, self.greedy_sampled_actions = \
self.policy.sample_step(self.single_observation,
self.internal_state,
self.single_action,
greedy=True)
def sample_step(self, sess,
single_observation, internal_state, single_action,
greedy=False):
"""Sample batch of steps from policy."""
if greedy:
outputs = [self.greedy_next_internal_state, self.greedy_sampled_actions]
else:
outputs = [self.next_internal_state, self.sampled_actions]
feed_dict = {self.internal_state: internal_state}
for action_place, action in zip(self.single_action, single_action):
feed_dict[action_place] = action
for obs_place, obs in zip(self.single_observation, single_observation):
feed_dict[obs_place] = obs
return sess.run(outputs, feed_dict=feed_dict)
def train_step(self, sess,
observations, internal_state, actions,
rewards, terminated, pads,
avg_episode_reward=0, greedy_episode_reward=0):
"""Train network using standard gradient descent."""
outputs = [self.raw_loss, self.gradient_ops, self.summary]
feed_dict = {self.internal_state: internal_state,
self.rewards: rewards,
self.terminated: terminated,
self.pads: pads,
self.avg_episode_reward: avg_episode_reward,
self.greedy_episode_reward: greedy_episode_reward}
time_len = None
for action_place, action in zip(self.actions, actions):
if time_len is None:
time_len = len(action)
assert time_len == len(action)
feed_dict[action_place] = action
for obs_place, obs in zip(self.observations, observations):
assert time_len == len(obs)
feed_dict[obs_place] = obs
assert len(rewards) == time_len - 1
return sess.run(outputs, feed_dict=feed_dict)
def trust_region_step(self, sess,
observations, internal_state, actions,
rewards, terminated, pads,
avg_episode_reward=0,
greedy_episode_reward=0):
"""Train policy using trust region step."""
feed_dict = {self.internal_state: internal_state,
self.rewards: rewards,
self.terminated: terminated,
self.pads: pads,
self.avg_episode_reward: avg_episode_reward,
self.greedy_episode_reward: greedy_episode_reward}
for action_place, action in zip(self.actions, actions):
feed_dict[action_place] = action
for obs_place, obs in zip(self.observations, observations):
feed_dict[obs_place] = obs
(prev_log_probs, prev_logits) = sess.run(
[self.out_log_probs, self.logits], feed_dict=feed_dict)
feed_dict[self.prev_log_probs] = prev_log_probs
for other_logit, prev_logit in zip(self.other_logits, prev_logits):
feed_dict[other_logit] = prev_logit
# fit policy
self.trust_region_policy_opt.optimize(sess, feed_dict)
ret = sess.run([self.raw_loss, self.summary], feed_dict=feed_dict)
ret = [ret[0], None, ret[1]]
return ret
def fit_values(self, sess,
observations, internal_state, actions,
rewards, terminated, pads):
"""Train value network using value-specific optimizer."""
feed_dict = {self.internal_state: internal_state,
self.rewards: rewards,
self.terminated: terminated,
self.pads: pads}
for action_place, action in zip(self.actions, actions):
feed_dict[action_place] = action
for obs_place, obs in zip(self.observations, observations):
feed_dict[obs_place] = obs
# fit values
if self.value_opt is None:
raise ValueError('Specific value optimizer does not exist')
self.value_opt.optimize(sess, feed_dict)
| 13,144 | 39.198777 | 83 | py |
models | models-master/research/pcl_rl/policy.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Policy neural network.
Implements network which takes in input and produces actions
and log probabilities given a sampling distribution parameterization.
"""
import tensorflow as tf
import numpy as np
class Policy(object):
def __init__(self, env_spec, internal_dim,
fixed_std=True, recurrent=True,
input_prev_actions=True):
self.env_spec = env_spec
self.internal_dim = internal_dim
self.rnn_state_dim = self.internal_dim
self.fixed_std = fixed_std
self.recurrent = recurrent
self.input_prev_actions = input_prev_actions
self.matrix_init = tf.truncated_normal_initializer(stddev=0.01)
self.vector_init = tf.constant_initializer(0.0)
@property
def input_dim(self):
return (self.env_spec.total_obs_dim +
self.env_spec.total_sampled_act_dim * self.input_prev_actions)
@property
def output_dim(self):
return self.env_spec.total_sampling_act_dim
def get_cell(self):
"""Get RNN cell."""
self.cell_input_dim = self.internal_dim // 2
cell = tf.contrib.rnn.LSTMCell(self.cell_input_dim,
state_is_tuple=False,
reuse=tf.get_variable_scope().reuse)
cell = tf.contrib.rnn.OutputProjectionWrapper(
cell, self.output_dim,
reuse=tf.get_variable_scope().reuse)
return cell
def core(self, obs, prev_internal_state, prev_actions):
"""Core neural network taking in inputs and outputting sampling
distribution parameters."""
batch_size = tf.shape(obs[0])[0]
if not self.recurrent:
prev_internal_state = tf.zeros([batch_size, self.rnn_state_dim])
cell = self.get_cell()
b = tf.get_variable('input_bias', [self.cell_input_dim],
initializer=self.vector_init)
cell_input = tf.nn.bias_add(tf.zeros([batch_size, self.cell_input_dim]), b)
for i, (obs_dim, obs_type) in enumerate(self.env_spec.obs_dims_and_types):
w = tf.get_variable('w_state%d' % i, [obs_dim, self.cell_input_dim],
initializer=self.matrix_init)
if self.env_spec.is_discrete(obs_type):
cell_input += tf.matmul(tf.one_hot(obs[i], obs_dim), w)
elif self.env_spec.is_box(obs_type):
cell_input += tf.matmul(obs[i], w)
else:
assert False
if self.input_prev_actions:
if self.env_spec.combine_actions: # TODO(ofir): clean this up
prev_action = prev_actions[0]
for i, action_dim in enumerate(self.env_spec.orig_act_dims):
act = tf.mod(prev_action, action_dim)
w = tf.get_variable('w_prev_action%d' % i, [action_dim, self.cell_input_dim],
initializer=self.matrix_init)
cell_input += tf.matmul(tf.one_hot(act, action_dim), w)
prev_action = tf.to_int32(prev_action / action_dim)
else:
for i, (act_dim, act_type) in enumerate(self.env_spec.act_dims_and_types):
w = tf.get_variable('w_prev_action%d' % i, [act_dim, self.cell_input_dim],
initializer=self.matrix_init)
if self.env_spec.is_discrete(act_type):
cell_input += tf.matmul(tf.one_hot(prev_actions[i], act_dim), w)
elif self.env_spec.is_box(act_type):
cell_input += tf.matmul(prev_actions[i], w)
else:
assert False
output, next_state = cell(cell_input, prev_internal_state)
return output, next_state
def sample_action(self, logits, sampling_dim,
act_dim, act_type, greedy=False):
"""Sample an action from a distribution."""
if self.env_spec.is_discrete(act_type):
if greedy:
act = tf.argmax(logits, 1)
else:
act = tf.reshape(tf.multinomial(logits, 1), [-1])
elif self.env_spec.is_box(act_type):
means = logits[:, :sampling_dim / 2]
std = logits[:, sampling_dim / 2:]
if greedy:
act = means
else:
batch_size = tf.shape(logits)[0]
act = means + std * tf.random_normal([batch_size, act_dim])
else:
assert False
return act
def entropy(self, logits,
sampling_dim, act_dim, act_type):
"""Calculate entropy of distribution."""
if self.env_spec.is_discrete(act_type):
entropy = tf.reduce_sum(
-tf.nn.softmax(logits) * tf.nn.log_softmax(logits), -1)
elif self.env_spec.is_box(act_type):
means = logits[:, :sampling_dim / 2]
std = logits[:, sampling_dim / 2:]
entropy = tf.reduce_sum(
0.5 * (1 + tf.log(2 * np.pi * tf.square(std))), -1)
else:
assert False
return entropy
def self_kl(self, logits,
sampling_dim, act_dim, act_type):
"""Calculate KL of distribution with itself.
Used layer only for the gradients.
"""
if self.env_spec.is_discrete(act_type):
probs = tf.nn.softmax(logits)
log_probs = tf.nn.log_softmax(logits)
self_kl = tf.reduce_sum(
tf.stop_gradient(probs) *
(tf.stop_gradient(log_probs) - log_probs), -1)
elif self.env_spec.is_box(act_type):
means = logits[:, :sampling_dim / 2]
std = logits[:, sampling_dim / 2:]
my_means = tf.stop_gradient(means)
my_std = tf.stop_gradient(std)
self_kl = tf.reduce_sum(
tf.log(std / my_std) +
(tf.square(my_std) + tf.square(my_means - means)) /
(2.0 * tf.square(std)) - 0.5,
-1)
else:
assert False
return self_kl
def log_prob_action(self, action, logits,
sampling_dim, act_dim, act_type):
"""Calculate log-prob of action sampled from distribution."""
if self.env_spec.is_discrete(act_type):
act_log_prob = tf.reduce_sum(
tf.one_hot(action, act_dim) * tf.nn.log_softmax(logits), -1)
elif self.env_spec.is_box(act_type):
means = logits[:, :sampling_dim / 2]
std = logits[:, sampling_dim / 2:]
act_log_prob = (- 0.5 * tf.log(2 * np.pi * tf.square(std))
- 0.5 * tf.square(action - means) / tf.square(std))
act_log_prob = tf.reduce_sum(act_log_prob, -1)
else:
assert False
return act_log_prob
def sample_actions(self, output, actions=None, greedy=False):
"""Sample all actions given output of core network."""
sampled_actions = []
logits = []
log_probs = []
entropy = []
self_kl = []
start_idx = 0
for i, (act_dim, act_type) in enumerate(self.env_spec.act_dims_and_types):
sampling_dim = self.env_spec.sampling_dim(act_dim, act_type)
if self.fixed_std and self.env_spec.is_box(act_type):
act_logits = output[:, start_idx:start_idx + act_dim]
log_std = tf.get_variable('std%d' % i, [1, sampling_dim // 2])
# fix standard deviations to variable
act_logits = tf.concat(
[act_logits,
1e-6 + tf.exp(log_std) + 0 * act_logits], 1)
else:
act_logits = output[:, start_idx:start_idx + sampling_dim]
if actions is None:
act = self.sample_action(act_logits, sampling_dim,
act_dim, act_type,
greedy=greedy)
else:
act = actions[i]
ent = self.entropy(act_logits, sampling_dim, act_dim, act_type)
kl = self.self_kl(act_logits, sampling_dim, act_dim, act_type)
act_log_prob = self.log_prob_action(
act, act_logits,
sampling_dim, act_dim, act_type)
sampled_actions.append(act)
logits.append(act_logits)
log_probs.append(act_log_prob)
entropy.append(ent)
self_kl.append(kl)
start_idx += sampling_dim
assert start_idx == self.env_spec.total_sampling_act_dim
return sampled_actions, logits, log_probs, entropy, self_kl
def get_kl(self, my_logits, other_logits):
"""Calculate KL between one policy output and another."""
kl = []
for i, (act_dim, act_type) in enumerate(self.env_spec.act_dims_and_types):
sampling_dim = self.env_spec.sampling_dim(act_dim, act_type)
single_my_logits = my_logits[i]
single_other_logits = other_logits[i]
if self.env_spec.is_discrete(act_type):
my_probs = tf.nn.softmax(single_my_logits)
my_log_probs = tf.nn.log_softmax(single_my_logits)
other_log_probs = tf.nn.log_softmax(single_other_logits)
my_kl = tf.reduce_sum(my_probs * (my_log_probs - other_log_probs), -1)
elif self.env_spec.is_box(act_type):
my_means = single_my_logits[:, :sampling_dim / 2]
my_std = single_my_logits[:, sampling_dim / 2:]
other_means = single_other_logits[:, :sampling_dim / 2]
other_std = single_other_logits[:, sampling_dim / 2:]
my_kl = tf.reduce_sum(
tf.log(other_std / my_std) +
(tf.square(my_std) + tf.square(my_means - other_means)) /
(2.0 * tf.square(other_std)) - 0.5,
-1)
else:
assert False
kl.append(my_kl)
return kl
def single_step(self, prev, cur, greedy=False):
"""Single RNN step. Equivalently, single-time-step sampled actions."""
prev_internal_state, prev_actions, _, _, _, _ = prev
obs, actions = cur # state observed and action taken at this time step
# feed into RNN cell
output, next_state = self.core(
obs, prev_internal_state, prev_actions)
# sample actions with values and log-probs
(actions, logits, log_probs,
entropy, self_kl) = self.sample_actions(
output, actions=actions, greedy=greedy)
return (next_state, tuple(actions), tuple(logits), tuple(log_probs),
tuple(entropy), tuple(self_kl))
def sample_step(self, obs, prev_internal_state, prev_actions, greedy=False):
"""Sample single step from policy."""
(next_state, sampled_actions, logits, log_probs,
entropies, self_kls) = self.single_step(
(prev_internal_state, prev_actions, None, None, None, None),
(obs, None), greedy=greedy)
return next_state, sampled_actions
def multi_step(self, all_obs, initial_state, all_actions):
"""Calculate log-probs and other calculations on batch of episodes."""
batch_size = tf.shape(initial_state)[0]
time_length = tf.shape(all_obs[0])[0]
initial_actions = [act[0] for act in all_actions]
all_actions = [tf.concat([act[1:], act[0:1]], 0)
for act in all_actions] # "final" action is dummy
(internal_states, _, logits, log_probs,
entropies, self_kls) = tf.scan(
self.single_step,
(all_obs, all_actions),
initializer=self.get_initializer(
batch_size, initial_state, initial_actions))
# remove "final" computations
log_probs = [log_prob[:-1] for log_prob in log_probs]
entropies = [entropy[:-1] for entropy in entropies]
self_kls = [self_kl[:-1] for self_kl in self_kls]
return internal_states, logits, log_probs, entropies, self_kls
def get_initializer(self, batch_size, initial_state, initial_actions):
"""Get initializer for RNN."""
logits_init = []
log_probs_init = []
for act_dim, act_type in self.env_spec.act_dims_and_types:
sampling_dim = self.env_spec.sampling_dim(act_dim, act_type)
logits_init.append(tf.zeros([batch_size, sampling_dim]))
log_probs_init.append(tf.zeros([batch_size]))
entropy_init = [tf.zeros([batch_size]) for _ in self.env_spec.act_dims]
self_kl_init = [tf.zeros([batch_size]) for _ in self.env_spec.act_dims]
return (initial_state,
tuple(initial_actions),
tuple(logits_init), tuple(log_probs_init),
tuple(entropy_init),
tuple(self_kl_init))
def calculate_kl(self, my_logits, other_logits):
"""Calculate KL between one policy and another on batch of episodes."""
batch_size = tf.shape(my_logits[0])[1]
time_length = tf.shape(my_logits[0])[0]
reshaped_my_logits = [
tf.reshape(my_logit, [batch_size * time_length, -1])
for my_logit in my_logits]
reshaped_other_logits = [
tf.reshape(other_logit, [batch_size * time_length, -1])
for other_logit in other_logits]
kl = self.get_kl(reshaped_my_logits, reshaped_other_logits)
kl = [tf.reshape(kkl, [time_length, batch_size])
for kkl in kl]
return kl
class MLPPolicy(Policy):
"""Non-recurrent policy."""
def get_cell(self):
self.cell_input_dim = self.internal_dim
def mlp(cell_input, prev_internal_state):
w1 = tf.get_variable('w1', [self.cell_input_dim, self.internal_dim])
b1 = tf.get_variable('b1', [self.internal_dim])
w2 = tf.get_variable('w2', [self.internal_dim, self.internal_dim])
b2 = tf.get_variable('b2', [self.internal_dim])
w3 = tf.get_variable('w3', [self.internal_dim, self.internal_dim])
b3 = tf.get_variable('b3', [self.internal_dim])
proj = tf.get_variable(
'proj', [self.internal_dim, self.output_dim])
hidden = cell_input
hidden = tf.tanh(tf.nn.bias_add(tf.matmul(hidden, w1), b1))
hidden = tf.tanh(tf.nn.bias_add(tf.matmul(hidden, w2), b2))
output = tf.matmul(hidden, proj)
return output, hidden
return mlp
def single_step(self, obs, actions, prev_actions, greedy=False):
"""Single step."""
batch_size = tf.shape(obs[0])[0]
prev_internal_state = tf.zeros([batch_size, self.internal_dim])
output, next_state = self.core(
obs, prev_internal_state, prev_actions)
# sample actions with values and log-probs
(actions, logits, log_probs,
entropy, self_kl) = self.sample_actions(
output, actions=actions, greedy=greedy)
return (next_state, tuple(actions), tuple(logits), tuple(log_probs),
tuple(entropy), tuple(self_kl))
def sample_step(self, obs, prev_internal_state, prev_actions, greedy=False):
"""Sample single step from policy."""
(next_state, sampled_actions, logits, log_probs,
entropies, self_kls) = self.single_step(obs, None, prev_actions,
greedy=greedy)
return next_state, sampled_actions
def multi_step(self, all_obs, initial_state, all_actions):
"""Calculate log-probs and other calculations on batch of episodes."""
batch_size = tf.shape(initial_state)[0]
time_length = tf.shape(all_obs[0])[0]
# first reshape inputs as a single batch
reshaped_obs = []
for obs, (obs_dim, obs_type) in zip(all_obs, self.env_spec.obs_dims_and_types):
if self.env_spec.is_discrete(obs_type):
reshaped_obs.append(tf.reshape(obs, [time_length * batch_size]))
elif self.env_spec.is_box(obs_type):
reshaped_obs.append(tf.reshape(obs, [time_length * batch_size, obs_dim]))
reshaped_act = []
reshaped_prev_act = []
for i, (act_dim, act_type) in enumerate(self.env_spec.act_dims_and_types):
act = tf.concat([all_actions[i][1:], all_actions[i][0:1]], 0)
prev_act = all_actions[i]
if self.env_spec.is_discrete(act_type):
reshaped_act.append(tf.reshape(act, [time_length * batch_size]))
reshaped_prev_act.append(
tf.reshape(prev_act, [time_length * batch_size]))
elif self.env_spec.is_box(act_type):
reshaped_act.append(
tf.reshape(act, [time_length * batch_size, act_dim]))
reshaped_prev_act.append(
tf.reshape(prev_act, [time_length * batch_size, act_dim]))
# now inputs go into single step as one large batch
(internal_states, _, logits, log_probs,
entropies, self_kls) = self.single_step(
reshaped_obs, reshaped_act, reshaped_prev_act)
# reshape the outputs back to original time-major format
internal_states = tf.reshape(internal_states, [time_length, batch_size, -1])
logits = [tf.reshape(logit, [time_length, batch_size, -1])
for logit in logits]
log_probs = [tf.reshape(log_prob, [time_length, batch_size])[:-1]
for log_prob in log_probs]
entropies = [tf.reshape(ent, [time_length, batch_size])[:-1]
for ent in entropies]
self_kls = [tf.reshape(self_kl, [time_length, batch_size])[:-1]
for self_kl in self_kls]
return internal_states, logits, log_probs, entropies, self_kls
| 16,946 | 36.912752 | 87 | py |
models | models-master/research/pcl_rl/trust_region.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trust region optimization.
A lot of this is adapted from other's code.
See Schulman's Modular RL, wojzaremba's TRPO, etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import tensorflow as tf
import numpy as np
def var_size(v):
return int(np.prod([int(d) for d in v.shape]))
def gradients(loss, var_list):
grads = tf.gradients(loss, var_list)
return [g if g is not None else tf.zeros(v.shape)
for g, v in zip(grads, var_list)]
def flatgrad(loss, var_list):
grads = gradients(loss, var_list)
return tf.concat([tf.reshape(grad, [-1])
for (v, grad) in zip(var_list, grads)
if grad is not None], 0)
def get_flat(var_list):
return tf.concat([tf.reshape(v, [-1]) for v in var_list], 0)
def set_from_flat(var_list, flat_theta):
assigns = []
shapes = [v.shape for v in var_list]
sizes = [var_size(v) for v in var_list]
start = 0
assigns = []
for (shape, size, v) in zip(shapes, sizes, var_list):
assigns.append(v.assign(
tf.reshape(flat_theta[start:start + size], shape)))
start += size
assert start == sum(sizes)
return tf.group(*assigns)
class TrustRegionOptimization(object):
def __init__(self, max_divergence=0.1, cg_damping=0.1):
self.max_divergence = max_divergence
self.cg_damping = cg_damping
def setup_placeholders(self):
self.flat_tangent = tf.placeholder(tf.float32, [None], 'flat_tangent')
self.flat_theta = tf.placeholder(tf.float32, [None], 'flat_theta')
def setup(self, var_list, raw_loss, self_divergence,
divergence=None):
self.setup_placeholders()
self.raw_loss = raw_loss
self.divergence = divergence
self.loss_flat_gradient = flatgrad(raw_loss, var_list)
self.divergence_gradient = gradients(self_divergence, var_list)
shapes = [var.shape for var in var_list]
sizes = [var_size(var) for var in var_list]
start = 0
tangents = []
for shape, size in zip(shapes, sizes):
param = tf.reshape(self.flat_tangent[start:start + size], shape)
tangents.append(param)
start += size
assert start == sum(sizes)
self.grad_vector_product = sum(
tf.reduce_sum(g * t) for (g, t) in zip(self.divergence_gradient, tangents))
self.fisher_vector_product = flatgrad(self.grad_vector_product, var_list)
self.flat_vars = get_flat(var_list)
self.set_vars = set_from_flat(var_list, self.flat_theta)
def optimize(self, sess, feed_dict):
old_theta = sess.run(self.flat_vars)
loss_flat_grad = sess.run(self.loss_flat_gradient,
feed_dict=feed_dict)
def calc_fisher_vector_product(tangent):
feed_dict[self.flat_tangent] = tangent
fvp = sess.run(self.fisher_vector_product,
feed_dict=feed_dict)
fvp += self.cg_damping * tangent
return fvp
step_dir = conjugate_gradient(calc_fisher_vector_product, -loss_flat_grad)
shs = 0.5 * step_dir.dot(calc_fisher_vector_product(step_dir))
lm = np.sqrt(shs / self.max_divergence)
fullstep = step_dir / lm
neggdotstepdir = -loss_flat_grad.dot(step_dir)
def calc_loss(theta):
sess.run(self.set_vars, feed_dict={self.flat_theta: theta})
if self.divergence is None:
return sess.run(self.raw_loss, feed_dict=feed_dict), True
else:
raw_loss, divergence = sess.run(
[self.raw_loss, self.divergence], feed_dict=feed_dict)
return raw_loss, divergence < self.max_divergence
# find optimal theta
theta = linesearch(calc_loss, old_theta, fullstep, neggdotstepdir / lm)
if self.divergence is not None:
final_divergence = sess.run(self.divergence, feed_dict=feed_dict)
else:
final_divergence = None
# set vars accordingly
if final_divergence is None or final_divergence < self.max_divergence:
sess.run(self.set_vars, feed_dict={self.flat_theta: theta})
else:
sess.run(self.set_vars, feed_dict={self.flat_theta: old_theta})
def conjugate_gradient(f_Ax, b, cg_iters=10, residual_tol=1e-10):
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
for i in xrange(cg_iters):
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v * p
r -= v * z
newrdotr = r.dot(r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
return x
def linesearch(f, x, fullstep, expected_improve_rate):
accept_ratio = 0.1
max_backtracks = 10
fval, _ = f(x)
for (_n_backtracks, stepfrac) in enumerate(.5 ** np.arange(max_backtracks)):
xnew = x + stepfrac * fullstep
newfval, valid = f(xnew)
if not valid:
continue
actual_improve = fval - newfval
expected_improve = expected_improve_rate * stepfrac
ratio = actual_improve / expected_improve
if ratio > accept_ratio and actual_improve > 0:
return xnew
return x
| 5,673 | 30.005464 | 83 | py |
models | models-master/research/pcl_rl/gym_wrapper.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper around gym env.
Allows for using batches of possibly identitically seeded environments.
"""
import gym
import numpy as np
import random
from six.moves import xrange
import env_spec
def get_env(env_str):
return gym.make(env_str)
class GymWrapper(object):
def __init__(self, env_str, distinct=1, count=1, seeds=None):
self.distinct = distinct
self.count = count
self.total = self.distinct * self.count
self.seeds = seeds or [random.randint(0, 1e12)
for _ in xrange(self.distinct)]
self.envs = []
for seed in self.seeds:
for _ in xrange(self.count):
env = get_env(env_str)
env.seed(seed)
if hasattr(env, 'last'):
env.last = 100 # for algorithmic envs
self.envs.append(env)
self.dones = [True] * self.total
self.num_episodes_played = 0
one_env = self.get_one()
self.use_action_list = hasattr(one_env.action_space, 'spaces')
self.env_spec = env_spec.EnvSpec(self.get_one())
def get_seeds(self):
return self.seeds
def reset(self):
self.dones = [False] * self.total
self.num_episodes_played += len(self.envs)
# reset seeds to be synchronized
self.seeds = [random.randint(0, 1e12) for _ in xrange(self.distinct)]
counter = 0
for seed in self.seeds:
for _ in xrange(self.count):
self.envs[counter].seed(seed)
counter += 1
return [self.env_spec.convert_obs_to_list(env.reset())
for env in self.envs]
def reset_if(self, predicate=None):
if predicate is None:
predicate = self.dones
if self.count != 1:
assert np.all(predicate)
return self.reset()
self.num_episodes_played += sum(predicate)
output = [self.env_spec.convert_obs_to_list(env.reset())
if pred else None
for env, pred in zip(self.envs, predicate)]
for i, pred in enumerate(predicate):
if pred:
self.dones[i] = False
return output
def all_done(self):
return all(self.dones)
def step(self, actions):
def env_step(env, action):
action = self.env_spec.convert_action_to_gym(action)
obs, reward, done, tt = env.step(action)
obs = self.env_spec.convert_obs_to_list(obs)
return obs, reward, done, tt
actions = zip(*actions)
outputs = [env_step(env, action)
if not done else (self.env_spec.initial_obs(None), 0, True, None)
for action, env, done in zip(actions, self.envs, self.dones)]
for i, (_, _, done, _) in enumerate(outputs):
self.dones[i] = self.dones[i] or done
obs, reward, done, tt = zip(*outputs)
obs = [list(oo) for oo in zip(*obs)]
return [obs, reward, done, tt]
def get_one(self):
return random.choice(self.envs)
def __len__(self):
return len(self.envs)
| 3,520 | 29.094017 | 80 | py |
models | models-master/research/pcl_rl/controller.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Controller coordinates sampling and training model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import tensorflow as tf
import numpy as np
import pickle
import random
flags = tf.flags
gfile = tf.gfile
FLAGS = flags.FLAGS
def find_best_eps_lambda(rewards, lengths):
"""Find the best lambda given a desired epsilon = FLAGS.max_divergence."""
# perhaps not the best way to do this
desired_div = FLAGS.max_divergence * np.mean(lengths)
def calc_divergence(eps_lambda):
max_reward = np.max(rewards)
logz = (max_reward / eps_lambda +
np.log(np.mean(np.exp((rewards - max_reward) / eps_lambda))))
exprr = np.mean(np.exp(rewards / eps_lambda - logz) *
rewards / eps_lambda)
return exprr - logz
left = 0.0
right = 1000.0
if len(rewards) <= 8:
return (left + right) / 2
num_iter = max(4, 1 + int(np.log((right - left) / 0.1) / np.log(2.0)))
for _ in xrange(num_iter):
mid = (left + right) / 2
cur_div = calc_divergence(mid)
if cur_div > desired_div:
left = mid
else:
right = mid
return (left + right) / 2
class Controller(object):
def __init__(self, env, env_spec, internal_dim,
use_online_batch=True,
batch_by_steps=False,
unify_episodes=False,
replay_batch_size=None,
max_step=None,
cutoff_agent=1,
save_trajectories_file=None,
use_trust_region=False,
use_value_opt=False,
update_eps_lambda=False,
prioritize_by='rewards',
get_model=None,
get_replay_buffer=None,
get_buffer_seeds=None):
self.env = env
self.env_spec = env_spec
self.internal_dim = internal_dim
self.use_online_batch = use_online_batch
self.batch_by_steps = batch_by_steps
self.unify_episodes = unify_episodes
self.replay_batch_size = replay_batch_size
self.max_step = max_step
self.cutoff_agent = cutoff_agent
self.save_trajectories_file = save_trajectories_file
self.use_trust_region = use_trust_region
self.use_value_opt = use_value_opt
self.update_eps_lambda = update_eps_lambda
self.prioritize_by = prioritize_by
self.model = get_model()
self.replay_buffer = get_replay_buffer()
self.seed_replay_buffer(get_buffer_seeds())
self.internal_state = np.array([self.initial_internal_state()] *
len(self.env))
self.last_obs = self.env_spec.initial_obs(len(self.env))
self.last_act = self.env_spec.initial_act(len(self.env))
self.last_pad = np.zeros(len(self.env))
self.start_episode = np.array([True] * len(self.env))
self.step_count = np.array([0] * len(self.env))
self.episode_running_rewards = np.zeros(len(self.env))
self.episode_running_lengths = np.zeros(len(self.env))
self.episode_rewards = []
self.greedy_episode_rewards = []
self.episode_lengths = []
self.total_rewards = []
self.best_batch_rewards = None
def setup(self, train=True):
self.model.setup(train=train)
def initial_internal_state(self):
return np.zeros(self.model.policy.rnn_state_dim)
def _sample_episodes(self, sess, greedy=False):
"""Sample episodes from environment using model."""
# reset environments as necessary
obs_after_reset = self.env.reset_if(self.start_episode)
for i, obs in enumerate(obs_after_reset):
if obs is not None:
self.step_count[i] = 0
self.internal_state[i] = self.initial_internal_state()
for j in xrange(len(self.env_spec.obs_dims)):
self.last_obs[j][i] = obs[j]
for j in xrange(len(self.env_spec.act_dims)):
self.last_act[j][i] = -1
self.last_pad[i] = 0
# maintain episode as a single unit if the last sampling
# batch ended before the episode was terminated
if self.unify_episodes:
assert len(obs_after_reset) == 1
new_ep = obs_after_reset[0] is not None
else:
new_ep = True
self.start_id = 0 if new_ep else len(self.all_obs[:])
initial_state = self.internal_state
all_obs = [] if new_ep else self.all_obs[:]
all_act = ([self.last_act] if new_ep else self.all_act[:])
all_pad = [] if new_ep else self.all_pad[:]
rewards = [] if new_ep else self.rewards[:]
# start stepping in the environments
step = 0
while not self.env.all_done():
self.step_count += 1 - np.array(self.env.dones)
next_internal_state, sampled_actions = self.model.sample_step(
sess, self.last_obs, self.internal_state, self.last_act,
greedy=greedy)
env_actions = self.env_spec.convert_actions_to_env(sampled_actions)
next_obs, reward, next_dones, _ = self.env.step(env_actions)
all_obs.append(self.last_obs)
all_act.append(sampled_actions)
all_pad.append(self.last_pad)
rewards.append(reward)
self.internal_state = next_internal_state
self.last_obs = next_obs
self.last_act = sampled_actions
self.last_pad = np.array(next_dones).astype('float32')
step += 1
if self.max_step and step >= self.max_step:
break
self.all_obs = all_obs[:]
self.all_act = all_act[:]
self.all_pad = all_pad[:]
self.rewards = rewards[:]
# append final observation
all_obs.append(self.last_obs)
return initial_state, all_obs, all_act, rewards, all_pad
def sample_episodes(self, sess, greedy=False):
"""Sample steps from the environment until we have enough for a batch."""
# check if last batch ended with episode that was not terminated
if self.unify_episodes:
self.all_new_ep = self.start_episode[0]
# sample episodes until we either have enough episodes or enough steps
episodes = []
total_steps = 0
while total_steps < self.max_step * len(self.env):
(initial_state,
observations, actions, rewards,
pads) = self._sample_episodes(sess, greedy=greedy)
observations = list(zip(*observations))
actions = list(zip(*actions))
terminated = np.array(self.env.dones)
self.total_rewards = np.sum(np.array(rewards[self.start_id:]) *
(1 - np.array(pads[self.start_id:])), axis=0)
self.episode_running_rewards *= 1 - self.start_episode
self.episode_running_lengths *= 1 - self.start_episode
self.episode_running_rewards += self.total_rewards
self.episode_running_lengths += np.sum(1 - np.array(pads[self.start_id:]), axis=0)
episodes.extend(self.convert_from_batched_episodes(
initial_state, observations, actions, rewards,
terminated, pads))
total_steps += np.sum(1 - np.array(pads))
# set next starting episodes
self.start_episode = np.logical_or(terminated,
self.step_count >= self.cutoff_agent)
episode_rewards = self.episode_running_rewards[self.start_episode].tolist()
self.episode_rewards.extend(episode_rewards)
self.episode_lengths.extend(self.episode_running_lengths[self.start_episode].tolist())
self.episode_rewards = self.episode_rewards[-100:]
self.episode_lengths = self.episode_lengths[-100:]
if (self.save_trajectories_file is not None and
(self.best_batch_rewards is None or
np.mean(self.total_rewards) > self.best_batch_rewards)):
self.best_batch_rewards = np.mean(self.total_rewards)
my_episodes = self.convert_from_batched_episodes(
initial_state, observations, actions, rewards,
terminated, pads)
with gfile.GFile(self.save_trajectories_file, 'w') as f:
pickle.dump(my_episodes, f)
if not self.batch_by_steps:
return (initial_state,
observations, actions, rewards,
terminated, pads)
return self.convert_to_batched_episodes(episodes)
def _train(self, sess,
observations, initial_state, actions,
rewards, terminated, pads):
"""Train model using batch."""
avg_episode_reward = np.mean(self.episode_rewards)
greedy_episode_reward = (np.mean(self.greedy_episode_rewards)
if self.greedy_episode_rewards else
avg_episode_reward)
loss, summary = None, None
if self.use_trust_region:
# use trust region to optimize policy
loss, _, summary = self.model.trust_region_step(
sess,
observations, initial_state, actions,
rewards, terminated, pads,
avg_episode_reward=avg_episode_reward,
greedy_episode_reward=greedy_episode_reward)
else: # otherwise use simple gradient descent on policy
loss, _, summary = self.model.train_step(
sess,
observations, initial_state, actions,
rewards, terminated, pads,
avg_episode_reward=avg_episode_reward,
greedy_episode_reward=greedy_episode_reward)
if self.use_value_opt: # optionally perform specific value optimization
self.model.fit_values(
sess,
observations, initial_state, actions,
rewards, terminated, pads)
return loss, summary
def train(self, sess):
"""Sample some episodes and train on some episodes."""
cur_step = sess.run(self.model.inc_global_step)
self.cur_step = cur_step
# on the first iteration, set target network close to online network
if self.cur_step == 0:
for _ in xrange(100):
sess.run(self.model.copy_op)
# on other iterations, just perform single target <-- online operation
sess.run(self.model.copy_op)
# sample from env
(initial_state,
observations, actions, rewards,
terminated, pads) = self.sample_episodes(sess)
# add to replay buffer
self.add_to_replay_buffer(
initial_state, observations, actions,
rewards, terminated, pads)
loss, summary = 0, None
# train on online batch
if self.use_online_batch:
loss, summary = self._train(
sess,
observations, initial_state, actions,
rewards, terminated, pads)
# update relative entropy coefficient
if self.update_eps_lambda:
episode_rewards = np.array(self.episode_rewards)
episode_lengths = np.array(self.episode_lengths)
eps_lambda = find_best_eps_lambda(
episode_rewards[-20:], episode_lengths[-20:])
sess.run(self.model.objective.assign_eps_lambda,
feed_dict={self.model.objective.new_eps_lambda: eps_lambda})
# train on replay batch
replay_batch, replay_probs = self.get_from_replay_buffer(
self.replay_batch_size)
if replay_batch:
(initial_state,
observations, actions, rewards,
terminated, pads) = replay_batch
loss, summary = self._train(
sess,
observations, initial_state, actions,
rewards, terminated, pads)
return loss, summary, self.total_rewards, self.episode_rewards
def eval(self, sess):
"""Use greedy sampling."""
(initial_state,
observations, actions, rewards,
pads, terminated) = self.sample_episodes(sess, greedy=True)
total_rewards = np.sum(np.array(rewards) * (1 - np.array(pads)), axis=0)
return total_rewards, self.episode_rewards
def convert_from_batched_episodes(
self, initial_state, observations, actions, rewards,
terminated, pads):
"""Convert time-major batch of episodes to batch-major list of episodes."""
rewards = np.array(rewards)
pads = np.array(pads)
observations = [np.array(obs) for obs in observations]
actions = [np.array(act) for act in actions]
total_rewards = np.sum(rewards * (1 - pads), axis=0)
total_length = np.sum(1 - pads, axis=0).astype('int32')
episodes = []
num_episodes = rewards.shape[1]
for i in xrange(num_episodes):
length = total_length[i]
ep_initial = initial_state[i]
ep_obs = [obs[:length + 1, i, ...] for obs in observations]
ep_act = [act[:length + 1, i, ...] for act in actions]
ep_rewards = rewards[:length, i]
episodes.append(
[ep_initial, ep_obs, ep_act, ep_rewards, terminated[i]])
return episodes
def convert_to_batched_episodes(self, episodes, max_length=None):
"""Convert batch-major list of episodes to time-major batch of episodes."""
lengths = [len(ep[-2]) for ep in episodes]
max_length = max_length or max(lengths)
new_episodes = []
for ep, length in zip(episodes, lengths):
initial, observations, actions, rewards, terminated = ep
observations = [np.resize(obs, [max_length + 1] + list(obs.shape)[1:])
for obs in observations]
actions = [np.resize(act, [max_length + 1] + list(act.shape)[1:])
for act in actions]
pads = np.array([0] * length + [1] * (max_length - length))
rewards = np.resize(rewards, [max_length]) * (1 - pads)
new_episodes.append([initial, observations, actions, rewards,
terminated, pads])
(initial, observations, actions, rewards,
terminated, pads) = zip(*new_episodes)
observations = [np.swapaxes(obs, 0, 1)
for obs in zip(*observations)]
actions = [np.swapaxes(act, 0, 1)
for act in zip(*actions)]
rewards = np.transpose(rewards)
pads = np.transpose(pads)
return (initial, observations, actions, rewards, terminated, pads)
def add_to_replay_buffer(self, initial_state,
observations, actions, rewards,
terminated, pads):
"""Add batch of episodes to replay buffer."""
if self.replay_buffer is None:
return
rewards = np.array(rewards)
pads = np.array(pads)
total_rewards = np.sum(rewards * (1 - pads), axis=0)
episodes = self.convert_from_batched_episodes(
initial_state, observations, actions, rewards,
terminated, pads)
priorities = (total_rewards if self.prioritize_by == 'reward'
else self.cur_step)
if not self.unify_episodes or self.all_new_ep:
self.last_idxs = self.replay_buffer.add(
episodes, priorities)
else:
# If we are unifying episodes, we attempt to
# keep them unified in the replay buffer.
# The first episode sampled in the current batch is a
# continuation of the last episode from the previous batch
self.replay_buffer.add(episodes[:1], priorities, self.last_idxs[-1:])
if len(episodes) > 1:
self.replay_buffer.add(episodes[1:], priorities)
def get_from_replay_buffer(self, batch_size):
"""Sample a batch of episodes from the replay buffer."""
if self.replay_buffer is None or len(self.replay_buffer) < 1 * batch_size:
return None, None
desired_count = batch_size * self.max_step
# in the case of batch_by_steps, we sample larger and larger
# amounts from the replay buffer until we have enough steps.
while True:
if batch_size > len(self.replay_buffer):
batch_size = len(self.replay_buffer)
episodes, probs = self.replay_buffer.get_batch(batch_size)
count = sum(len(ep[-2]) for ep in episodes)
if count >= desired_count or not self.batch_by_steps:
break
if batch_size == len(self.replay_buffer):
return None, None
batch_size *= 1.2
return (self.convert_to_batched_episodes(episodes), probs)
def seed_replay_buffer(self, episodes):
"""Seed the replay buffer with some episodes."""
if self.replay_buffer is None:
return
# just need to add initial state
for i in xrange(len(episodes)):
episodes[i] = [self.initial_internal_state()] + episodes[i]
self.replay_buffer.seed_buffer(episodes)
| 16,614 | 34.963203 | 92 | py |
models | models-master/research/pcl_rl/replay_buffer.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Replay buffer.
Implements replay buffer in Python.
"""
import random
import numpy as np
from six.moves import xrange
class ReplayBuffer(object):
def __init__(self, max_size):
self.max_size = max_size
self.cur_size = 0
self.buffer = {}
self.init_length = 0
def __len__(self):
return self.cur_size
def seed_buffer(self, episodes):
self.init_length = len(episodes)
self.add(episodes, np.ones(self.init_length))
def add(self, episodes, *args):
"""Add episodes to buffer."""
idx = 0
while self.cur_size < self.max_size and idx < len(episodes):
self.buffer[self.cur_size] = episodes[idx]
self.cur_size += 1
idx += 1
if idx < len(episodes):
remove_idxs = self.remove_n(len(episodes) - idx)
for remove_idx in remove_idxs:
self.buffer[remove_idx] = episodes[idx]
idx += 1
assert len(self.buffer) == self.cur_size
def remove_n(self, n):
"""Get n items for removal."""
# random removal
idxs = random.sample(xrange(self.init_length, self.cur_size), n)
return idxs
def get_batch(self, n):
"""Get batch of episodes to train on."""
# random batch
idxs = random.sample(xrange(self.cur_size), n)
return [self.buffer[idx] for idx in idxs], None
def update_last_batch(self, delta):
pass
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, max_size, alpha=0.2,
eviction_strategy='rand'):
self.max_size = max_size
self.alpha = alpha
self.eviction_strategy = eviction_strategy
assert self.eviction_strategy in ['rand', 'fifo', 'rank']
self.remove_idx = 0
self.cur_size = 0
self.buffer = {}
self.priorities = np.zeros(self.max_size)
self.init_length = 0
def __len__(self):
return self.cur_size
def add(self, episodes, priorities, new_idxs=None):
"""Add episodes to buffer."""
if new_idxs is None:
idx = 0
new_idxs = []
while self.cur_size < self.max_size and idx < len(episodes):
self.buffer[self.cur_size] = episodes[idx]
new_idxs.append(self.cur_size)
self.cur_size += 1
idx += 1
if idx < len(episodes):
remove_idxs = self.remove_n(len(episodes) - idx)
for remove_idx in remove_idxs:
self.buffer[remove_idx] = episodes[idx]
new_idxs.append(remove_idx)
idx += 1
else:
assert len(new_idxs) == len(episodes)
for new_idx, ep in zip(new_idxs, episodes):
self.buffer[new_idx] = ep
self.priorities[new_idxs] = priorities
self.priorities[0:self.init_length] = np.max(
self.priorities[self.init_length:])
assert len(self.buffer) == self.cur_size
return new_idxs
def remove_n(self, n):
"""Get n items for removal."""
assert self.init_length + n <= self.cur_size
if self.eviction_strategy == 'rand':
# random removal
idxs = random.sample(xrange(self.init_length, self.cur_size), n)
elif self.eviction_strategy == 'fifo':
# overwrite elements in cyclical fashion
idxs = [
self.init_length +
(self.remove_idx + i) % (self.max_size - self.init_length)
for i in xrange(n)]
self.remove_idx = idxs[-1] + 1 - self.init_length
elif self.eviction_strategy == 'rank':
# remove lowest-priority indices
idxs = np.argpartition(self.priorities, n-1)[:n]
return idxs
def sampling_distribution(self):
p = self.priorities[:self.cur_size]
p = np.exp(self.alpha * (p - np.max(p)))
norm = np.sum(p)
if norm > 0:
uniform = 0.0
p = p / norm * (1 - uniform) + 1.0 / self.cur_size * uniform
else:
p = np.ones(self.cur_size) / self.cur_size
return p
def get_batch(self, n):
"""Get batch of episodes to train on."""
p = self.sampling_distribution()
idxs = np.random.choice(self.cur_size, size=int(n), replace=False, p=p)
self.last_batch = idxs
return [self.buffer[idx] for idx in idxs], p[idxs]
def update_last_batch(self, delta):
"""Update last batch idxs with new priority."""
self.priorities[self.last_batch] = np.abs(delta)
self.priorities[0:self.init_length] = np.max(
self.priorities[self.init_length:])
| 4,933 | 29.269939 | 80 | py |
models | models-master/research/pcl_rl/baseline.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Baseline model for value estimates.
Implements the value component of the neural network.
In some cases this is just an additional linear layer on the policy.
In other cases, it is a completely separate neural network.
"""
from six.moves import xrange
import tensorflow as tf
import numpy as np
class Baseline(object):
def __init__(self, env_spec, internal_policy_dim,
input_prev_actions=True,
input_time_step=False,
input_policy_state=True,
n_hidden_layers=0,
hidden_dim=64,
tau=0.0):
self.env_spec = env_spec
self.internal_policy_dim = internal_policy_dim
self.input_prev_actions = input_prev_actions
self.input_time_step = input_time_step
self.input_policy_state = input_policy_state
self.n_hidden_layers = n_hidden_layers
self.hidden_dim = hidden_dim
self.tau = tau
self.matrix_init = tf.truncated_normal_initializer(stddev=0.01)
def get_inputs(self, time_step, obs, prev_actions,
internal_policy_states):
"""Get inputs to network as single tensor."""
inputs = [tf.ones_like(time_step)]
input_dim = 1
if not self.input_policy_state:
for i, (obs_dim, obs_type) in enumerate(self.env_spec.obs_dims_and_types):
if self.env_spec.is_discrete(obs_type):
inputs.append(
tf.one_hot(obs[i], obs_dim))
input_dim += obs_dim
elif self.env_spec.is_box(obs_type):
cur_obs = obs[i]
inputs.append(cur_obs)
inputs.append(cur_obs ** 2)
input_dim += obs_dim * 2
else:
assert False
if self.input_prev_actions:
for i, (act_dim, act_type) in enumerate(self.env_spec.act_dims_and_types):
if self.env_spec.is_discrete(act_type):
inputs.append(
tf.one_hot(prev_actions[i], act_dim))
input_dim += act_dim
elif self.env_spec.is_box(act_type):
inputs.append(prev_actions[i])
input_dim += act_dim
else:
assert False
if self.input_policy_state:
inputs.append(internal_policy_states)
input_dim += self.internal_policy_dim
if self.input_time_step:
scaled_time = 0.01 * time_step
inputs.extend([scaled_time, scaled_time ** 2, scaled_time ** 3])
input_dim += 3
return input_dim, tf.concat(inputs, 1)
def reshape_batched_inputs(self, all_obs, all_actions,
internal_policy_states, policy_logits):
"""Reshape inputs from [time_length, batch_size, ...] to
[time_length * batch_size, ...].
This allows for computing the value estimate in one go.
"""
batch_size = tf.shape(all_obs[0])[1]
time_length = tf.shape(all_obs[0])[0]
reshaped_obs = []
for obs, (obs_dim, obs_type) in zip(all_obs, self.env_spec.obs_dims_and_types):
if self.env_spec.is_discrete(obs_type):
reshaped_obs.append(tf.reshape(obs, [time_length * batch_size]))
elif self.env_spec.is_box(obs_type):
reshaped_obs.append(tf.reshape(obs, [time_length * batch_size, obs_dim]))
reshaped_prev_act = []
reshaped_policy_logits = []
for i, (act_dim, act_type) in enumerate(self.env_spec.act_dims_and_types):
prev_act = all_actions[i]
if self.env_spec.is_discrete(act_type):
reshaped_prev_act.append(
tf.reshape(prev_act, [time_length * batch_size]))
elif self.env_spec.is_box(act_type):
reshaped_prev_act.append(
tf.reshape(prev_act, [time_length * batch_size, act_dim]))
reshaped_policy_logits.append(
tf.reshape(policy_logits[i], [time_length * batch_size, -1]))
reshaped_internal_policy_states = tf.reshape(
internal_policy_states,
[time_length * batch_size, self.internal_policy_dim])
time_step = (float(self.input_time_step) *
tf.expand_dims(
tf.to_float(tf.range(time_length * batch_size) /
batch_size), -1))
return (time_step, reshaped_obs, reshaped_prev_act,
reshaped_internal_policy_states,
reshaped_policy_logits)
def get_values(self, all_obs, all_actions, internal_policy_states,
policy_logits):
"""Get value estimates given input."""
batch_size = tf.shape(all_obs[0])[1]
time_length = tf.shape(all_obs[0])[0]
(time_step, reshaped_obs, reshaped_prev_act,
reshaped_internal_policy_states,
reshaped_policy_logits) = self.reshape_batched_inputs(
all_obs, all_actions, internal_policy_states, policy_logits)
input_dim, inputs = self.get_inputs(
time_step, reshaped_obs, reshaped_prev_act,
reshaped_internal_policy_states)
for depth in xrange(self.n_hidden_layers):
with tf.variable_scope('value_layer%d' % depth):
w = tf.get_variable('w', [input_dim, self.hidden_dim])
inputs = tf.nn.tanh(tf.matmul(inputs, w))
input_dim = self.hidden_dim
w_v = tf.get_variable('w_v', [input_dim, 1],
initializer=self.matrix_init)
values = tf.matmul(inputs, w_v)
values = tf.reshape(values, [time_length, batch_size])
inputs = inputs[:-batch_size] # remove "final vals"
return values, inputs, w_v
class UnifiedBaseline(Baseline):
"""Baseline for Unified PCL."""
def get_values(self, all_obs, all_actions, internal_policy_states,
policy_logits):
batch_size = tf.shape(all_obs[0])[1]
time_length = tf.shape(all_obs[0])[0]
(time_step, reshaped_obs, reshaped_prev_act,
reshaped_internal_policy_states,
reshaped_policy_logits) = self.reshape_batched_inputs(
all_obs, all_actions, internal_policy_states, policy_logits)
def f_transform(q_values, tau):
max_q = tf.reduce_max(q_values, -1, keep_dims=True)
return tf.squeeze(max_q, [-1]) + tau * tf.log(
tf.reduce_sum(tf.exp((q_values - max_q) / tau), -1))
assert len(reshaped_policy_logits) == 1
values = f_transform((self.tau + self.eps_lambda) * reshaped_policy_logits[0],
(self.tau + self.eps_lambda))
values = tf.reshape(values, [time_length, batch_size])
# not used
input_dim, inputs = self.get_inputs(
time_step, reshaped_obs, reshaped_prev_act,
reshaped_internal_policy_states)
w_v = tf.get_variable('w_v', [input_dim, 1],
initializer=self.matrix_init)
inputs = inputs[:-batch_size] # remove "final vals"
return values, inputs, w_v
| 7,292 | 35.833333 | 83 | py |
models | models-master/research/pcl_rl/env_spec.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for environment interface with agent / tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
class spaces(object):
discrete = 0
box = 1
def get_space(space):
if hasattr(space, 'n'):
return space.n, spaces.discrete, None
elif hasattr(space, 'shape'):
return np.prod(space.shape), spaces.box, (space.low, space.high)
def get_spaces(spaces):
if hasattr(spaces, 'spaces'):
return zip(*[get_space(space) for space in spaces.spaces])
else:
return [(ret,) for ret in get_space(spaces)]
class EnvSpec(object):
def __init__(self, env, try_combining_actions=True,
discretize_actions=None):
self.discretize_actions = discretize_actions
# figure out observation space
self.obs_space = env.observation_space
self.obs_dims, self.obs_types, self.obs_info = get_spaces(self.obs_space)
# figure out action space
self.act_space = env.action_space
self.act_dims, self.act_types, self.act_info = get_spaces(self.act_space)
if self.discretize_actions:
self._act_dims = self.act_dims[:]
self._act_types = self.act_types[:]
self.act_dims = []
self.act_types = []
for i, (dim, typ) in enumerate(zip(self._act_dims, self._act_types)):
if typ == spaces.discrete:
self.act_dims.append(dim)
self.act_types.append(spaces.discrete)
elif typ == spaces.box:
for _ in xrange(dim):
self.act_dims.append(self.discretize_actions)
self.act_types.append(spaces.discrete)
else:
self._act_dims = None
self._act_types = None
if (try_combining_actions and
all(typ == spaces.discrete for typ in self.act_types)):
self.combine_actions = True
self.orig_act_dims = self.act_dims[:]
self.orig_act_types = self.act_types[:]
total_act_dim = 1
for dim in self.act_dims:
total_act_dim *= dim
self.act_dims = [total_act_dim]
self.act_types = [spaces.discrete]
else:
self.combine_actions = False
self.obs_dims_and_types = tuple(zip(self.obs_dims, self.obs_types))
self.act_dims_and_types = tuple(zip(self.act_dims, self.act_types))
self.total_obs_dim = sum(self.obs_dims)
self.total_sampling_act_dim = sum(self.sampling_dim(dim, typ)
for dim, typ in self.act_dims_and_types)
self.total_sampled_act_dim = sum(self.act_dims)
def sampling_dim(self, dim, typ):
if typ == spaces.discrete:
return dim
elif typ == spaces.box:
return 2 * dim # Gaussian mean and std
else:
assert False
def convert_actions_to_env(self, actions):
if self.combine_actions:
new_actions = []
actions = actions[0]
for dim in self.orig_act_dims:
new_actions.append(np.mod(actions, dim))
actions = (actions / dim).astype('int32')
actions = new_actions
if self.discretize_actions:
new_actions = []
idx = 0
for i, (dim, typ) in enumerate(zip(self._act_dims, self._act_types)):
if typ == spaces.discrete:
new_actions.append(actions[idx])
idx += 1
elif typ == spaces.box:
low, high = self.act_info[i]
cur_action = []
for j in xrange(dim):
cur_action.append(
low[j] + (high[j] - low[j]) * actions[idx] /
float(self.discretize_actions))
idx += 1
new_actions.append(np.hstack(cur_action))
actions = new_actions
return actions
def convert_env_actions_to_actions(self, actions):
if not self.combine_actions:
return actions
new_actions = 0
base = 1
for act, dim in zip(actions, self.orig_act_dims):
new_actions = new_actions + base * act
base *= dim
return [new_actions]
def convert_obs_to_list(self, obs):
if len(self.obs_dims) == 1:
return [obs]
else:
return list(obs)
def convert_action_to_gym(self, action):
if len(action) == 1:
return action[0]
else:
return list(action)
if ((not self.combine_actions or len(self.orig_act_dims) == 1) and
(len(self.act_dims) == 1 or
(self.discretize_actions and len(self._act_dims) == 1))):
return action[0]
else:
return list(action)
def initial_obs(self, batch_size):
batched = batch_size is not None
batch_size = batch_size or 1
obs = []
for dim, typ in self.obs_dims_and_types:
if typ == spaces.discrete:
obs.append(np.zeros(batch_size))
elif typ == spaces.box:
obs.append(np.zeros([batch_size, dim]))
if batched:
return obs
else:
return list(zip(*obs))[0]
def initial_act(self, batch_size=None):
batched = batch_size is not None
batch_size = batch_size or 1
act = []
for dim, typ in self.act_dims_and_types:
if typ == spaces.discrete:
act.append(-np.ones(batch_size))
elif typ == spaces.box:
act.append(-np.ones([batch_size, dim]))
if batched:
return act
else:
return list(zip(*act))[0]
def is_discrete(self, typ):
return typ == spaces.discrete
def is_box(self, typ):
return typ == spaces.box
| 6,024 | 28.975124 | 80 | py |
models | models-master/research/pcl_rl/trainer.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trainer for coordinating single or multi-replica training.
Main point of entry for running models. Specifies most of
the parameters used by different algorithms.
"""
import tensorflow as tf
import numpy as np
import random
import os
import pickle
from six.moves import xrange
import controller
import model
import policy
import baseline
import objective
import full_episode_objective
import trust_region
import optimizers
import replay_buffer
import expert_paths
import gym_wrapper
import env_spec
app = tf.app
flags = tf.flags
logging = tf.logging
gfile = tf.gfile
FLAGS = flags.FLAGS
flags.DEFINE_string('env', 'Copy-v0', 'environment name')
flags.DEFINE_integer('batch_size', 100, 'batch size')
flags.DEFINE_integer('replay_batch_size', None, 'replay batch size; defaults to batch_size')
flags.DEFINE_integer('num_samples', 1,
'number of samples from each random seed initialization')
flags.DEFINE_integer('max_step', 200, 'max number of steps to train on')
flags.DEFINE_integer('cutoff_agent', 0,
'number of steps at which to cut-off agent. '
'Defaults to always cutoff')
flags.DEFINE_integer('num_steps', 100000, 'number of training steps')
flags.DEFINE_integer('validation_frequency', 100,
'every so many steps, output some stats')
flags.DEFINE_float('target_network_lag', 0.95,
'This exponential decay on online network yields target '
'network')
flags.DEFINE_string('sample_from', 'online',
'Sample actions from "online" network or "target" network')
flags.DEFINE_string('objective', 'pcl',
'pcl/upcl/a3c/trpo/reinforce/urex')
flags.DEFINE_bool('trust_region_p', False,
'use trust region for policy optimization')
flags.DEFINE_string('value_opt', None,
'leave as None to optimize it along with policy '
'(using critic_weight). Otherwise set to '
'"best_fit" (least squares regression), "lbfgs", or "grad"')
flags.DEFINE_float('max_divergence', 0.01,
'max divergence (i.e. KL) to allow during '
'trust region optimization')
flags.DEFINE_float('learning_rate', 0.01, 'learning rate')
flags.DEFINE_float('clip_norm', 5.0, 'clip norm')
flags.DEFINE_float('clip_adv', 0.0, 'Clip advantages at this value. '
'Leave as 0 to not clip at all.')
flags.DEFINE_float('critic_weight', 0.1, 'critic weight')
flags.DEFINE_float('tau', 0.1, 'entropy regularizer.'
'If using decaying tau, this is the final value.')
flags.DEFINE_float('tau_decay', None,
'decay tau by this much every 100 steps')
flags.DEFINE_float('tau_start', 0.1,
'start tau at this value')
flags.DEFINE_float('eps_lambda', 0.0, 'relative entropy regularizer.')
flags.DEFINE_bool('update_eps_lambda', False,
'Update lambda automatically based on last 100 episodes.')
flags.DEFINE_float('gamma', 1.0, 'discount')
flags.DEFINE_integer('rollout', 10, 'rollout')
flags.DEFINE_bool('use_target_values', False,
'use target network for value estimates')
flags.DEFINE_bool('fixed_std', True,
'fix the std in Gaussian distributions')
flags.DEFINE_bool('input_prev_actions', True,
'input previous actions to policy network')
flags.DEFINE_bool('recurrent', True,
'use recurrent connections')
flags.DEFINE_bool('input_time_step', False,
'input time step into value calucations')
flags.DEFINE_bool('use_online_batch', True, 'train on batches as they are sampled')
flags.DEFINE_bool('batch_by_steps', False,
'ensure each training batch has batch_size * max_step steps')
flags.DEFINE_bool('unify_episodes', False,
'Make sure replay buffer holds entire episodes, '
'even across distinct sampling steps')
flags.DEFINE_integer('replay_buffer_size', 5000, 'replay buffer size')
flags.DEFINE_float('replay_buffer_alpha', 0.5, 'replay buffer alpha param')
flags.DEFINE_integer('replay_buffer_freq', 0,
'replay buffer frequency (only supports -1/0/1)')
flags.DEFINE_string('eviction', 'rand',
'how to evict from replay buffer: rand/rank/fifo')
flags.DEFINE_string('prioritize_by', 'rewards',
'Prioritize replay buffer by "rewards" or "step"')
flags.DEFINE_integer('num_expert_paths', 0,
'number of expert paths to seed replay buffer with')
flags.DEFINE_integer('internal_dim', 256, 'RNN internal dim')
flags.DEFINE_integer('value_hidden_layers', 0,
'number of hidden layers in value estimate')
flags.DEFINE_integer('tf_seed', 42, 'random seed for tensorflow')
flags.DEFINE_string('save_trajectories_dir', None,
'directory to save trajectories to, if desired')
flags.DEFINE_string('load_trajectories_file', None,
'file to load expert trajectories from')
# supervisor flags
flags.DEFINE_bool('supervisor', False, 'use supervisor training')
flags.DEFINE_integer('task_id', 0, 'task id')
flags.DEFINE_integer('ps_tasks', 0, 'number of ps tasks')
flags.DEFINE_integer('num_replicas', 1, 'number of replicas used')
flags.DEFINE_string('master', 'local', 'name of master')
flags.DEFINE_string('save_dir', '', 'directory to save model to')
flags.DEFINE_string('load_path', '', 'path of saved model to load (if none in save_dir)')
class Trainer(object):
"""Coordinates single or multi-replica training."""
def __init__(self):
self.batch_size = FLAGS.batch_size
self.replay_batch_size = FLAGS.replay_batch_size
if self.replay_batch_size is None:
self.replay_batch_size = self.batch_size
self.num_samples = FLAGS.num_samples
self.env_str = FLAGS.env
self.env = gym_wrapper.GymWrapper(self.env_str,
distinct=FLAGS.batch_size // self.num_samples,
count=self.num_samples)
self.eval_env = gym_wrapper.GymWrapper(
self.env_str,
distinct=FLAGS.batch_size // self.num_samples,
count=self.num_samples)
self.env_spec = env_spec.EnvSpec(self.env.get_one())
self.max_step = FLAGS.max_step
self.cutoff_agent = FLAGS.cutoff_agent
self.num_steps = FLAGS.num_steps
self.validation_frequency = FLAGS.validation_frequency
self.target_network_lag = FLAGS.target_network_lag
self.sample_from = FLAGS.sample_from
assert self.sample_from in ['online', 'target']
self.critic_weight = FLAGS.critic_weight
self.objective = FLAGS.objective
self.trust_region_p = FLAGS.trust_region_p
self.value_opt = FLAGS.value_opt
assert not self.trust_region_p or self.objective in ['pcl', 'trpo']
assert self.objective != 'trpo' or self.trust_region_p
assert self.value_opt is None or self.value_opt == 'None' or \
self.critic_weight == 0.0
self.max_divergence = FLAGS.max_divergence
self.learning_rate = FLAGS.learning_rate
self.clip_norm = FLAGS.clip_norm
self.clip_adv = FLAGS.clip_adv
self.tau = FLAGS.tau
self.tau_decay = FLAGS.tau_decay
self.tau_start = FLAGS.tau_start
self.eps_lambda = FLAGS.eps_lambda
self.update_eps_lambda = FLAGS.update_eps_lambda
self.gamma = FLAGS.gamma
self.rollout = FLAGS.rollout
self.use_target_values = FLAGS.use_target_values
self.fixed_std = FLAGS.fixed_std
self.input_prev_actions = FLAGS.input_prev_actions
self.recurrent = FLAGS.recurrent
assert not self.trust_region_p or not self.recurrent
self.input_time_step = FLAGS.input_time_step
assert not self.input_time_step or (self.cutoff_agent <= self.max_step)
self.use_online_batch = FLAGS.use_online_batch
self.batch_by_steps = FLAGS.batch_by_steps
self.unify_episodes = FLAGS.unify_episodes
if self.unify_episodes:
assert self.batch_size == 1
self.replay_buffer_size = FLAGS.replay_buffer_size
self.replay_buffer_alpha = FLAGS.replay_buffer_alpha
self.replay_buffer_freq = FLAGS.replay_buffer_freq
assert self.replay_buffer_freq in [-1, 0, 1]
self.eviction = FLAGS.eviction
self.prioritize_by = FLAGS.prioritize_by
assert self.prioritize_by in ['rewards', 'step']
self.num_expert_paths = FLAGS.num_expert_paths
self.internal_dim = FLAGS.internal_dim
self.value_hidden_layers = FLAGS.value_hidden_layers
self.tf_seed = FLAGS.tf_seed
self.save_trajectories_dir = FLAGS.save_trajectories_dir
self.save_trajectories_file = (
os.path.join(
self.save_trajectories_dir, self.env_str.replace('-', '_'))
if self.save_trajectories_dir else None)
self.load_trajectories_file = FLAGS.load_trajectories_file
self.hparams = dict((attr, getattr(self, attr))
for attr in dir(self)
if not attr.startswith('__') and
not callable(getattr(self, attr)))
def hparams_string(self):
return '\n'.join('%s: %s' % item for item in sorted(self.hparams.items()))
def get_objective(self):
tau = self.tau
if self.tau_decay is not None:
assert self.tau_start >= self.tau
tau = tf.maximum(
tf.train.exponential_decay(
self.tau_start, self.global_step, 100, self.tau_decay),
self.tau)
if self.objective in ['pcl', 'a3c', 'trpo', 'upcl']:
cls = (objective.PCL if self.objective in ['pcl', 'upcl'] else
objective.TRPO if self.objective == 'trpo' else
objective.ActorCritic)
policy_weight = 1.0
return cls(self.learning_rate,
clip_norm=self.clip_norm,
policy_weight=policy_weight,
critic_weight=self.critic_weight,
tau=tau, gamma=self.gamma, rollout=self.rollout,
eps_lambda=self.eps_lambda, clip_adv=self.clip_adv,
use_target_values=self.use_target_values)
elif self.objective in ['reinforce', 'urex']:
cls = (full_episode_objective.Reinforce
if self.objective == 'reinforce' else
full_episode_objective.UREX)
return cls(self.learning_rate,
clip_norm=self.clip_norm,
num_samples=self.num_samples,
tau=tau, bonus_weight=1.0) # TODO: bonus weight?
else:
assert False, 'Unknown objective %s' % self.objective
def get_policy(self):
if self.recurrent:
cls = policy.Policy
else:
cls = policy.MLPPolicy
return cls(self.env_spec, self.internal_dim,
fixed_std=self.fixed_std,
recurrent=self.recurrent,
input_prev_actions=self.input_prev_actions)
def get_baseline(self):
cls = (baseline.UnifiedBaseline if self.objective == 'upcl' else
baseline.Baseline)
return cls(self.env_spec, self.internal_dim,
input_prev_actions=self.input_prev_actions,
input_time_step=self.input_time_step,
input_policy_state=self.recurrent, # may want to change this
n_hidden_layers=self.value_hidden_layers,
hidden_dim=self.internal_dim,
tau=self.tau)
def get_trust_region_p_opt(self):
if self.trust_region_p:
return trust_region.TrustRegionOptimization(
max_divergence=self.max_divergence)
else:
return None
def get_value_opt(self):
if self.value_opt == 'grad':
return optimizers.GradOptimization(
learning_rate=self.learning_rate, max_iter=5, mix_frac=0.05)
elif self.value_opt == 'lbfgs':
return optimizers.LbfgsOptimization(max_iter=25, mix_frac=0.1)
elif self.value_opt == 'best_fit':
return optimizers.BestFitOptimization(mix_frac=1.0)
else:
return None
def get_model(self):
cls = model.Model
return cls(self.env_spec, self.global_step,
target_network_lag=self.target_network_lag,
sample_from=self.sample_from,
get_policy=self.get_policy,
get_baseline=self.get_baseline,
get_objective=self.get_objective,
get_trust_region_p_opt=self.get_trust_region_p_opt,
get_value_opt=self.get_value_opt)
def get_replay_buffer(self):
if self.replay_buffer_freq <= 0:
return None
else:
assert self.objective in ['pcl', 'upcl'], 'Can\'t use replay buffer with %s' % (
self.objective)
cls = replay_buffer.PrioritizedReplayBuffer
return cls(self.replay_buffer_size,
alpha=self.replay_buffer_alpha,
eviction_strategy=self.eviction)
def get_buffer_seeds(self):
return expert_paths.sample_expert_paths(
self.num_expert_paths, self.env_str, self.env_spec,
load_trajectories_file=self.load_trajectories_file)
def get_controller(self, env):
"""Get controller."""
cls = controller.Controller
return cls(env, self.env_spec, self.internal_dim,
use_online_batch=self.use_online_batch,
batch_by_steps=self.batch_by_steps,
unify_episodes=self.unify_episodes,
replay_batch_size=self.replay_batch_size,
max_step=self.max_step,
cutoff_agent=self.cutoff_agent,
save_trajectories_file=self.save_trajectories_file,
use_trust_region=self.trust_region_p,
use_value_opt=self.value_opt not in [None, 'None'],
update_eps_lambda=self.update_eps_lambda,
prioritize_by=self.prioritize_by,
get_model=self.get_model,
get_replay_buffer=self.get_replay_buffer,
get_buffer_seeds=self.get_buffer_seeds)
def do_before_step(self, step):
pass
def run(self):
"""Run training."""
is_chief = FLAGS.task_id == 0 or not FLAGS.supervisor
sv = None
def init_fn(sess, saver):
ckpt = None
if FLAGS.save_dir and sv is None:
load_dir = FLAGS.save_dir
ckpt = tf.train.get_checkpoint_state(load_dir)
if ckpt and ckpt.model_checkpoint_path:
logging.info('restoring from %s', ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
elif FLAGS.load_path:
logging.info('restoring from %s', FLAGS.load_path)
saver.restore(sess, FLAGS.load_path)
if FLAGS.supervisor:
with tf.device(tf.ReplicaDeviceSetter(FLAGS.ps_tasks, merge_devices=True)):
self.global_step = tf.contrib.framework.get_or_create_global_step()
tf.set_random_seed(FLAGS.tf_seed)
self.controller = self.get_controller(self.env)
self.model = self.controller.model
self.controller.setup()
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
self.eval_controller = self.get_controller(self.eval_env)
self.eval_controller.setup(train=False)
saver = tf.train.Saver(max_to_keep=10)
step = self.model.global_step
sv = tf.Supervisor(logdir=FLAGS.save_dir,
is_chief=is_chief,
saver=saver,
save_model_secs=600,
summary_op=None, # we define it ourselves
save_summaries_secs=60,
global_step=step,
init_fn=lambda sess: init_fn(sess, saver))
sess = sv.PrepareSession(FLAGS.master)
else:
tf.set_random_seed(FLAGS.tf_seed)
self.global_step = tf.contrib.framework.get_or_create_global_step()
self.controller = self.get_controller(self.env)
self.model = self.controller.model
self.controller.setup()
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
self.eval_controller = self.get_controller(self.eval_env)
self.eval_controller.setup(train=False)
saver = tf.train.Saver(max_to_keep=10)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
init_fn(sess, saver)
self.sv = sv
self.sess = sess
logging.info('hparams:\n%s', self.hparams_string())
model_step = sess.run(self.model.global_step)
if model_step >= self.num_steps:
logging.info('training has reached final step')
return
losses = []
rewards = []
all_ep_rewards = []
for step in xrange(1 + self.num_steps):
if sv is not None and sv.ShouldStop():
logging.info('stopping supervisor')
break
self.do_before_step(step)
(loss, summary,
total_rewards, episode_rewards) = self.controller.train(sess)
_, greedy_episode_rewards = self.eval_controller.eval(sess)
self.controller.greedy_episode_rewards = greedy_episode_rewards
losses.append(loss)
rewards.append(total_rewards)
all_ep_rewards.extend(episode_rewards)
if (random.random() < 0.1 and summary and episode_rewards and
is_chief and sv and sv._summary_writer):
sv.summary_computed(sess, summary)
model_step = sess.run(self.model.global_step)
if is_chief and step % self.validation_frequency == 0:
logging.info('at training step %d, model step %d: '
'avg loss %f, avg reward %f, '
'episode rewards: %f, greedy rewards: %f',
step, model_step,
np.mean(losses), np.mean(rewards),
np.mean(all_ep_rewards),
np.mean(greedy_episode_rewards))
losses = []
rewards = []
all_ep_rewards = []
if model_step >= self.num_steps:
logging.info('training has reached final step')
break
if is_chief and sv is not None:
logging.info('saving final model to %s', sv.save_path)
sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
def main(unused_argv):
logging.set_verbosity(logging.INFO)
trainer = Trainer()
trainer.run()
if __name__ == '__main__':
app.run()
| 18,809 | 38.683544 | 92 | py |
models | models-master/orbit/controller_test.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for orbit.controller."""
import os
from absl import logging
from absl.testing import parameterized
import numpy as np
from orbit import controller
from orbit import runner
from orbit import standard_runner
import orbit.utils
import tensorflow as tf
def create_model():
x = tf.keras.layers.Input(shape=(3,), name="input")
y = tf.keras.layers.Dense(4, name="dense")(x)
model = tf.keras.Model(x, y)
return model
def summaries_with_matching_keyword(keyword, summary_dir):
"""Returns summary protos matching given keyword from event file."""
matches = []
event_paths = tf.io.gfile.glob(os.path.join(summary_dir, "events*"))
for event in tf.compat.v1.train.summary_iterator(event_paths[-1]):
if event.summary is not None:
for value in event.summary.value:
if keyword in value.tag:
matches.append(event.summary)
return matches
def dataset_fn(ctx):
del ctx
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.ones((10, 4), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10, drop_remainder=True)
return dataset
class TestRunner(standard_runner.StandardTrainer,
standard_runner.StandardEvaluator):
"""Implements the training and evaluation APIs for the test model."""
def __init__(self, return_numpy=False):
self.strategy = tf.distribute.get_strategy()
self.model = create_model()
self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.1)
self.global_step = self.optimizer.iterations
self.train_loss = tf.keras.metrics.Mean("train_loss", dtype=tf.float32)
self.eval_loss = tf.keras.metrics.Mean("eval_loss", dtype=tf.float32)
self.return_numpy = return_numpy
train_dataset = self.strategy.distribute_datasets_from_function(dataset_fn)
eval_dataset = self.strategy.distribute_datasets_from_function(dataset_fn)
standard_runner.StandardTrainer.__init__(self, train_dataset)
standard_runner.StandardEvaluator.__init__(self, eval_dataset)
def train_step(self, iterator):
def _replicated_step(inputs):
"""Replicated training step."""
inputs, targets = inputs
with tf.GradientTape() as tape:
outputs = self.model(inputs)
loss = tf.reduce_mean(tf.keras.losses.MSE(targets, outputs))
grads = tape.gradient(loss, self.model.variables)
self.optimizer.apply_gradients(zip(grads, self.model.variables))
self.train_loss.update_state(loss)
self.strategy.run(_replicated_step, args=(next(iterator),))
def train_loop_end(self):
train_loss = self.train_loss.result()
return {
"loss": train_loss.numpy() if self.return_numpy else train_loss,
}
def build_eval_dataset(self):
return self.strategy.distribute_datasets_from_function(dataset_fn)
def eval_begin(self):
self.eval_loss.reset_states()
def eval_step(self, iterator):
def _replicated_step(inputs):
"""Replicated evaluation step."""
inputs, targets = inputs
outputs = self.model(inputs)
loss = tf.reduce_mean(tf.keras.losses.MSE(targets, outputs))
self.eval_loss.update_state(loss)
self.strategy.run(_replicated_step, args=(next(iterator),))
def eval_end(self):
eval_loss = self.eval_loss.result()
return {
"eval_loss": eval_loss.numpy() if self.return_numpy else eval_loss,
}
class TestEvaluator(standard_runner.StandardEvaluator):
"""Implements the training and evaluation APIs for the test model."""
def __init__(self):
self.strategy = tf.distribute.get_strategy()
self.model = create_model()
eval_dataset = self.strategy.distribute_datasets_from_function(dataset_fn)
standard_runner.StandardEvaluator.__init__(self, eval_dataset)
def eval_reduce(self, state, output):
state.append(output)
return state
def eval_begin(self):
return []
def eval_step(self, iterator):
def _replicated_step(inputs):
"""Replicated evaluation step."""
inputs, targets = inputs
outputs = self.model(inputs)
loss = tf.reduce_mean(tf.keras.losses.MSE(targets, outputs))
return loss
per_replica_losses = self.strategy.run(
_replicated_step, args=(next(iterator),))
mean_loss = self.strategy.reduce(
tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None)
return mean_loss
def eval_end(self, outputs):
return {
"eval_loss": tf.reduce_mean(outputs),
}
class TestEvaluatorNoOutput(runner.AbstractEvaluator):
def evaluate(self, num_steps):
pass
class TestEvaluatorWithNestedSummary(standard_runner.StandardEvaluator):
"""Implements the training and evaluation APIs for the test model."""
def __init__(self):
self.strategy = tf.distribute.get_strategy()
self.model = create_model()
dataset = self.strategy.distribute_datasets_from_function(dataset_fn)
dataset2 = self.strategy.distribute_datasets_from_function(dataset_fn)
self.loss = tf.keras.metrics.Mean("loss", dtype=tf.float32)
self.accuracy = tf.keras.metrics.CategoricalAccuracy(
"accuracy", dtype=tf.float32)
self.loss2 = tf.keras.metrics.Mean("loss", dtype=tf.float32)
self.accuracy2 = tf.keras.metrics.CategoricalAccuracy(
"accuracy", dtype=tf.float32)
standard_runner.StandardEvaluator.__init__(
self, eval_dataset={
"dataset": dataset,
"dataset2": dataset2
})
def eval_step(self, iterator):
def _replicated_step(loss, accuracy, inputs):
"""Replicated evaluation step."""
inputs, targets = inputs
outputs = self.model(inputs)
loss.update_state(tf.keras.losses.MSE(targets, outputs))
accuracy.update_state(targets, outputs)
self.strategy.run(
lambda inputs: _replicated_step(self.loss, self.accuracy, inputs),
args=(next(iterator["dataset"]),))
self.strategy.run(
lambda inputs: _replicated_step(self.loss2, self.accuracy2, inputs),
args=(next(iterator["dataset2"]),))
def eval_end(self):
return {
"dataset": {
"loss": self.loss.result(),
"accuracy": self.accuracy.result()
},
"dataset2": {
"loss": self.loss2.result(),
"accuracy": self.accuracy2.result()
},
}
class TestTrainerWithSummaries(standard_runner.StandardTrainer):
"""A Trainer model with summaries for testing purposes."""
def __init__(self):
self.strategy = tf.distribute.get_strategy()
self.model = create_model()
self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.1)
self.global_step = self.optimizer.iterations
self.train_loss = tf.keras.metrics.Mean("train_loss", dtype=tf.float32)
train_dataset = self.strategy.distribute_datasets_from_function(dataset_fn)
standard_runner.StandardTrainer.__init__(
self,
train_dataset,
options=standard_runner.StandardTrainerOptions(
use_tpu_summary_optimization=True))
def build_train_dataset(self):
return self.strategy.distribute_datasets_from_function(dataset_fn)
def train_step(self, iterator):
def _replicated_step(inputs):
"""Replicated training step."""
inputs, targets = inputs
with tf.GradientTape() as tape:
outputs = self.model(inputs)
loss = tf.reduce_mean(tf.keras.losses.MSE(targets, outputs))
tf.summary.scalar("loss", loss)
grads = tape.gradient(loss, self.model.variables)
self.optimizer.apply_gradients(zip(grads, self.model.variables))
self.train_loss.update_state(loss)
self.strategy.run(_replicated_step, args=(next(iterator),))
class ControllerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self.model_dir = self.get_temp_dir()
def test_no_checkpoint(self):
test_runner = TestRunner()
# No checkpoint manager and no strategy.
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
summary_dir=os.path.join(self.model_dir, "summaries/train"),
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
self.assertEqual(test_runner.global_step, 10)
# Loss and accuracy values should be written into summaries.
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/eval")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"eval_loss", os.path.join(self.model_dir, "summaries/eval")))
# No checkpoint, so global step starts from 0.
test_runner.global_step.assign(0)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
self.assertEqual(test_runner.global_step, 10)
def test_no_checkpoint_and_summaries(self):
test_runner = TestRunner()
# No checkpoint + summary directories.
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
self.assertEqual(test_runner.global_step, 10)
@parameterized.named_parameters(
("_sync_checkpoint_saving", False),
("_async_checkpoint_saving", True)
)
def test_has_checkpoint_no_summaries(self, enable_async_checkpoint_saving):
test_runner = TestRunner()
# Has checkpoint, but no summary directories.
checkpoint = tf.train.Checkpoint(model=test_runner.model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step)
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving,
steps_per_loop=2)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
self.assertEqual(test_runner.global_step, 10)
# No summaries are saved.
self.assertEmpty(tf.io.gfile.glob(
os.path.join(checkpoint_manager.directory, "events.*")))
@parameterized.named_parameters(
("_sync_checkpoint_saving", False),
("_async_checkpoint_saving", True)
)
def test_has_checkpoint_eval_summary_only(
self, enable_async_checkpoint_saving
):
test_runner = TestRunner()
# Has checkpoint, but no summary directories.
checkpoint = tf.train.Checkpoint(model=test_runner.model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step)
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"),
steps_per_loop=2)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
self.assertEqual(test_runner.global_step, 10)
# Training summaries are not saved.
self.assertEmpty(tf.io.gfile.glob(
os.path.join(checkpoint_manager.directory, "events.*")))
# Evaluation summaries are saved.
self.assertNotEmpty(tf.io.gfile.glob(
os.path.join(self.model_dir, "summaries/eval/events.*")))
@parameterized.named_parameters(
("_sync_checkpoint_saving", False),
("_async_checkpoint_saving", True)
)
def test_restore_from_most_recent_checkpoint(
self, enable_async_checkpoint_saving
):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(model=test_runner.model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=5)
test_controller = controller.Controller(
trainer=test_runner,
global_step=test_runner.global_step,
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"),
steps_per_loop=5)
test_controller.train(20)
self.assertLen(checkpoint_manager.checkpoints, 4)
restored_path = test_controller.restore_checkpoint()
self.assertEqual(restored_path, checkpoint_manager.checkpoints[-1])
@parameterized.named_parameters(
("return_numpy_sync_checkpoint_saving", True, False),
("return_numpy_async_checkpoint_saving", True, True),
("return_tensor_sync_checkpoint_saving", False, False),
("return_tensor_async_checkpoint_saving", False, True),
)
def test_train_and_evaluate(
self, return_numpy, enable_async_checkpoint_saving
):
test_runner = TestRunner(return_numpy=return_numpy)
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=10)
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
summary_dir=os.path.join(self.model_dir, "summaries/train"),
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
# Checkpoints are saved.
self.assertNotEmpty(tf.io.gfile.glob(os.path.join(self.model_dir, "ckpt*")))
# Loss and accuracy values should be written into summaries.
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/eval")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"eval_loss", os.path.join(self.model_dir, "summaries/eval")))
@parameterized.named_parameters(
("_sync_checkpoint_saving", False),
("_async_checkpoint_saving", True)
)
def test_train_only(self, enable_async_checkpoint_saving):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=10)
test_controller = controller.Controller(
trainer=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
summary_dir=os.path.join(self.model_dir, "summaries/train"),
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"),
)
test_controller.train(steps=10)
# Checkpoints are saved.
self.assertNotEmpty(tf.io.gfile.glob(os.path.join(self.model_dir, "ckpt*")))
# Only train summaries are written.
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "summaries/train")))
self.assertFalse(
tf.io.gfile.exists(os.path.join(self.model_dir, "summaries/eval")))
def test_evaluate_only(self):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(model=test_runner.model)
checkpoint.save(os.path.join(self.model_dir, "ckpt"))
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step)
test_controller = controller.Controller(
evaluator=test_runner,
global_step=test_runner.global_step,
checkpoint_manager=checkpoint_manager,
summary_dir=os.path.join(self.model_dir, "summaries/train"),
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
eval_results = test_controller.evaluate(steps=2)
# Only eval summaries are written
self.assertFalse(
tf.io.gfile.exists(os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/eval")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"eval_loss", os.path.join(self.model_dir, "summaries/eval")))
self.assertIn("eval_loss", eval_results)
# Tests continuous eval with timeout and timeout_fn.
done_file = os.path.join(self.model_dir, "summaries/eval/Done")
def timeout_fn():
with tf.io.gfile.GFile(done_file, "w") as f:
f.write("DONE")
return True
test_controller = controller.Controller(
evaluator=test_runner,
global_step=test_runner.global_step,
checkpoint_manager=checkpoint_manager,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
test_controller.evaluate_continuously(
timeout=1, timeout_fn=timeout_fn, steps=2)
self.assertNotEmpty(tf.io.gfile.glob(done_file))
def test_no_eval_steps(self):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(model=test_runner.model)
checkpoint.save(os.path.join(self.model_dir, "ckpt"))
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step)
test_controller = controller.Controller(
evaluator=test_runner,
global_step=test_runner.global_step,
checkpoint_manager=checkpoint_manager)
test_controller.evaluate()
@parameterized.named_parameters(
("_sync_checkpoint_saving", False),
("_async_checkpoint_saving", True)
)
def test_already_trained_model(self, enable_async_checkpoint_saving):
test_runner = TestRunner()
test_runner.global_step.assign(10)
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=10)
test_controller = controller.Controller(
trainer=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving)
# `global_step` is already `train_steps`.
test_controller.train(steps=10)
def test_summaries_inside_train_fn(self):
test_runner = TestTrainerWithSummaries()
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step)
test_controller = controller.Controller(
trainer=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
summary_dir=os.path.join(self.model_dir, "summaries/train"),
summary_interval=2,
checkpoint_manager=checkpoint_manager
)
test_controller.train(steps=10)
# Checkpoints are saved.
self.assertEmpty(tf.io.gfile.glob(os.path.join(self.model_dir, "ckpt*")))
# Only train summaries are written.
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/train")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "summaries/train")))
self.assertFalse(
tf.io.gfile.exists(os.path.join(self.model_dir, "summaries/eval")))
def test_train_and_evaluate_with_same_summary_dir(self):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step)
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
summary_dir=os.path.join(self.model_dir, "summaries"),
checkpoint_manager=checkpoint_manager,
eval_summary_dir=os.path.join(self.model_dir, "summaries"))
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
# Loss and accuracy values should be written into summaries.
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "summaries")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"eval_loss", os.path.join(self.model_dir, "summaries")))
def test_early_stop_on_eval_loss(self):
test_runner = TestRunner()
class EarlyStopController(controller.Controller):
"""A subclass of Controller that supports early stopping."""
def train_and_evaluate(self,
train_steps: int = None,
eval_steps: int = None,
eval_interval: int = None):
while self.global_step.numpy() < train_steps:
interval = min(train_steps - self.global_step.numpy(), eval_interval)
num_steps = self.global_step.numpy() + interval
self.train(steps=num_steps, checkpoint_at_completion=False)
self._sync_on_async_checkpointing()
self.evaluate(steps=eval_steps)
# Early stop condition.
if test_runner.eval_loss.result() < 0.1:
logging.info(
"Training early stopped as eval_loss %s is less than 0.1",
test_runner.eval_loss.result())
return
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=10)
test_controller = EarlyStopController(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2,
checkpoint_manager=checkpoint_manager)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=6, eval_interval=2)
self.assertLess(test_runner.global_step, 10)
def test_evaluate_with_loss_output(self):
test_evaluator = TestEvaluator()
checkpoint = tf.train.Checkpoint(model=test_evaluator.model)
checkpoint.save(os.path.join(self.model_dir, "ckpt"))
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, self.model_dir, max_to_keep=None)
test_controller = controller.Controller(
evaluator=test_evaluator,
global_step=tf.Variable(0, dtype=tf.int64),
checkpoint_manager=checkpoint_manager,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
test_controller.evaluate(steps=5)
# Only eval summaries are written
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "summaries/eval")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"eval_loss", os.path.join(self.model_dir, "summaries/eval")))
def test_evaluate_with_no_output(self):
test_controller = controller.Controller(
evaluator=TestEvaluatorNoOutput(),
global_step=tf.Variable(0, dtype=tf.int64),
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
self.assertSameElements(["steps_per_second"],
test_controller.evaluate(steps=5).keys())
def test_train_and_evaluate_reset_datasets(self):
test_runner = TestRunner()
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=2)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
train_dataset = (
test_runner.strategy.distribute_datasets_from_function(dataset_fn))
eval_dataset = (
test_runner.strategy.distribute_datasets_from_function(dataset_fn))
test_runner.train_dataset = train_dataset
test_runner.eval_dataset = eval_dataset
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
@parameterized.named_parameters(
("_sync_checkpoint_saving", False),
("_async_checkpoint_saving", True)
)
def test_eval_and_checkpoint_interval(self, enable_async_checkpoint_saving):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=5)
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
global_step=test_runner.global_step,
steps_per_loop=10,
checkpoint_manager=checkpoint_manager,
enable_async_checkpointing=enable_async_checkpoint_saving,
summary_dir=self.model_dir)
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=5)
# Expect 3 checkpoints to be saved at step: 5, 10.
self.assertLen(
tf.io.gfile.glob(os.path.join(self.model_dir, "ckpt-*.data*")), 2)
# Expect evaluation is performed 2 times at step: 5, 10.
self.assertLen(
summaries_with_matching_keyword("eval_loss", self.model_dir), 2)
@parameterized.named_parameters(("DefaultSummary", False),
("InjectSummary", True))
def test_evaluate_with_nested_summaries(self, inject_summary_manager):
test_evaluator = TestEvaluatorWithNestedSummary()
if inject_summary_manager:
summary_manager = orbit.utils.SummaryManager(
self.model_dir,
tf.summary.scalar,
global_step=tf.Variable(0, dtype=tf.int64))
else:
summary_manager = None
test_controller = controller.Controller(
evaluator=test_evaluator,
global_step=tf.Variable(0, dtype=tf.int64),
eval_summary_dir=self.model_dir,
summary_manager=summary_manager)
test_controller.evaluate(steps=5)
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "dataset")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "dataset")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"accuracy", os.path.join(self.model_dir, "dataset")))
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(self.model_dir, "dataset2")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"loss", os.path.join(self.model_dir, "dataset2")))
self.assertNotEmpty(
summaries_with_matching_keyword(
"accuracy", os.path.join(self.model_dir, "dataset2")))
def test_actions(self):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=10)
class OutputRecorderAction:
"""Simple `Action` that just saves the outputs passed to `__call__`."""
def __init__(self):
self.outputs = []
def __call__(self, output):
self.outputs.append(output)
train_output_recorder = OutputRecorderAction()
eval_output_recorder = OutputRecorderAction()
test_controller = controller.Controller(
trainer=test_runner,
evaluator=test_runner,
train_actions=[train_output_recorder],
eval_actions=[eval_output_recorder],
global_step=test_runner.global_step,
steps_per_loop=2,
summary_dir=os.path.join(self.model_dir, "summaries/train"),
checkpoint_manager=checkpoint_manager,
eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"))
test_controller.train_and_evaluate(
train_steps=10, eval_steps=2, eval_interval=6)
self.assertLen(train_output_recorder.outputs, 5)
for output in train_output_recorder.outputs:
self.assertIn("loss", output)
self.assertGreaterEqual(output["loss"], 0)
self.assertLen(eval_output_recorder.outputs, 2)
for output in eval_output_recorder.outputs:
self.assertIn("eval_loss", output)
self.assertGreaterEqual(output["eval_loss"], 0)
def test_step_per_loop_callable(self):
test_runner = TestRunner()
checkpoint = tf.train.Checkpoint(
model=test_runner.model, optimizer=test_runner.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
self.model_dir,
max_to_keep=None,
step_counter=test_runner.global_step,
checkpoint_interval=10)
def steps_per_loop_fn(global_step):
if global_step > 4:
return 4
return 2
test_controller = controller.Controller(
trainer=test_runner,
global_step=test_runner.global_step,
steps_per_loop=steps_per_loop_fn,
checkpoint_manager=checkpoint_manager
)
test_controller.train(steps=10)
self.assertEqual(test_runner.global_step, 10)
if __name__ == "__main__":
tf.test.main()
| 31,588 | 35.903037 | 80 | py |
models | models-master/orbit/standard_runner_test.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for orbit.standard_runner."""
from absl.testing import parameterized
from orbit import standard_runner
from orbit import utils
import tensorflow as tf
def dataset_fn(input_context=None):
del input_context
def dummy_data(_):
return tf.zeros((1, 1), dtype=tf.float32)
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
class TestTrainer(standard_runner.StandardTrainer):
"""A StandardTrainer subclass for tests."""
def __init__(self, options=None):
self.strategy = tf.distribute.get_strategy()
self.global_step = utils.create_global_step()
dataset = self.strategy.distribute_datasets_from_function(dataset_fn)
super().__init__(train_dataset=dataset, options=options)
def train_loop_begin(self):
self.global_step.assign(0)
def train_step(self, iterator):
def replica_step(_):
self.global_step.assign_add(1)
self.strategy.run(replica_step, args=(next(iterator),))
def train_loop_end(self):
return self.global_step.numpy()
class TestEvaluator(standard_runner.StandardEvaluator):
"""A StandardEvaluator subclass for tests."""
def __init__(self, options=None):
self.strategy = tf.distribute.get_strategy()
self.global_step = utils.create_global_step()
dataset = self.strategy.distribute_datasets_from_function(dataset_fn)
super().__init__(eval_dataset=dataset, options=options)
def eval_begin(self):
self.global_step.assign(0)
def eval_step(self, iterator):
def replica_step(_):
self.global_step.assign_add(1)
self.strategy.run(replica_step, args=(next(iterator),))
def eval_end(self):
return self.global_step.numpy()
class TestEvaluatorWithOutputsAggregation(standard_runner.StandardEvaluator):
"""A StandardEvaluator subclass for tests."""
def __init__(self, options=None):
self.strategy = tf.distribute.get_strategy()
dataset = self.strategy.distribute_datasets_from_function(
lambda _: tf.data.Dataset.range(10))
super().__init__(eval_dataset=dataset, options=options)
def eval_begin(self):
return {"logits": tf.constant((0.0,))}
def eval_reduce(self, state, step_outputs):
state["logits"] = tf.concat([state["logits"], step_outputs], 0)
return state
def eval_step(self, iterator):
def replica_step(x):
x = tf.cast(x, tf.float32)
return tf.reduce_sum(x)
return self.strategy.experimental_local_results(
self.strategy.run(replica_step, args=(next(iterator),)))
def eval_end(self, outputs):
return tf.reduce_sum(outputs["logits"])
class StandardRunnerTest(parameterized.TestCase):
def test_default_trainer(self):
trainer = TestTrainer()
self.assertEqual(trainer.train(tf.constant(10)), 10)
def test_trainer_with_tpu_summary_optimization(self):
options = standard_runner.StandardTrainerOptions(
use_tpu_summary_optimization=True)
trainer = TestTrainer(options)
self.assertEqual(trainer.train(tf.constant(10)), 10)
@parameterized.named_parameters(("use_tf_while_loop", True), ("", False))
def test_default_evaluator(self, use_tf_while_loop):
options = standard_runner.StandardEvaluatorOptions(
use_tf_while_loop=use_tf_while_loop)
evaluator = TestEvaluator(options)
self.assertEqual(evaluator.evaluate(tf.constant(10)), 10)
@parameterized.named_parameters(("use_tf_while_loop", True), ("", False))
def test_evaluator_with_outputs_aggregation(self, use_tf_while_loop):
options = standard_runner.StandardEvaluatorOptions(
use_tf_while_loop=use_tf_while_loop)
evaluator = TestEvaluatorWithOutputsAggregation(options)
self.assertEqual(evaluator.evaluate(tf.constant(10)), 45)
@parameterized.named_parameters(
("recreate_iterator_for_each_eval", True, 10, 10),
("not_recreate_iterator_for_each_eval", False, 10, 35))
def test_evaluator_with_repeat_dataset(self, recreate_iterator_for_each_eval,
sum_for_1st_time, sum_for_2nd_time):
options = standard_runner.StandardEvaluatorOptions(
recreate_iterator_for_each_eval=recreate_iterator_for_each_eval)
evaluator = TestEvaluatorWithOutputsAggregation(options)
self.assertEqual(evaluator.evaluate(tf.constant(5)), sum_for_1st_time)
self.assertEqual(evaluator.evaluate(tf.constant(5)), sum_for_2nd_time)
if __name__ == "__main__":
tf.test.main()
| 5,100 | 32.339869 | 79 | py |
models | models-master/orbit/standard_runner.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AbstractTrainer/Evaluator subclasses with added functionality.
The classes in this module provide some additional structure to the bare
`AbstractTrainer`/`AbstractEvaluator` APIs.
Both `StandardTrainer` and `StandardEvaluator` split the train/eval loops into
"begin", "step", and "end" methods, and provide an implementation of the loop
itself that makes calls to the relevant step method.
`StandardTrainer` supports running the loop using the TF while loop construct
for added performance (particularly on TPUs). It additionally provides some
functionality to make writing summaries from inside a model more performant when
running on TPUs.
These classes are intended to work well in common settings, however there may
be use cases these classes don't support (for instance, `StandardEvaluator` in
particular doesn't support running full evaluations over multiple different eval
datasets). Users are encouraged to simply fall back to custom `AbstractTrainer`
and `AbstractEvaluator` subclasses in these cases.
"""
import abc
from typing import Any, Optional
import dataclasses
from orbit import runner
from orbit.utils import loop_fns
import tensorflow as tf
@dataclasses.dataclass(frozen=True)
class StandardTrainerOptions:
"""Advanced options for `orbit.StandardTrainer`.
Attributes:
use_tf_function: A boolean indicating whether to apply `tf.function` to the
training loop. This will only affect the body of the loop (involving
`train_step`); `train_loop_begin` and `train_loop_end` will always be run
in eager mode.
use_tf_while_loop: A boolean indicating whether to run the training loop
using a `tf.while_loop`. If `True`, `use_tf_function` must also be `True`.
use_tpu_summary_optimization: A boolean indicating whether to enable a
performance optimization for summaries in TPUs. Writing summaries
conditionally with outside compilation on TPUs can be extremely slow. If
`True`, this optimization creates two `tf.function`s with two XLA programs
(one with summary calls, and one without). The program with summaries runs
only for one step when summaries should be recorded.
"""
use_tf_function: bool = True
use_tf_while_loop: bool = True
use_tpu_summary_optimization: bool = False
class StandardTrainer(runner.AbstractTrainer, metaclass=abc.ABCMeta):
"""Implements standard functionality on top of the AbstractTrainer API.
This class structures the training "inner loop" roughly as follows:
train_loop_begin()
for _ in range(num_steps):
train_step(train_iterator)
return train_loop_end()
Calls to `train_loop_begin` and `train_loop_end` are always done in eager
mode, while the loop/`train_step` may be implemented using `tf.while` and/or
`tf.function`, as determined by the `options` passed to `__init__`.
"""
def __init__(self,
train_dataset,
options: Optional[StandardTrainerOptions] = None):
"""Initializes the `StandardTrainer` instance.
Args:
train_dataset: A `tf.nest`-compatible structure of `tf.data.Dataset` or
`DistributedDataset`.
options: An `orbit.StandardTrainerOptions` instance.
"""
options = options or StandardTrainerOptions()
if options.use_tf_while_loop and not options.use_tf_function:
raise ValueError("`use_tf_while_loop=True` and `use_tf_function=False` "
"is not supported")
if options.use_tpu_summary_optimization and not options.use_tf_while_loop:
raise ValueError("`use_tpu_summary_optimization=True` and "
"`use_tf_while_loop=False` is not supported")
self._train_options = options
self._train_dataset = train_dataset
self._train_iter = None
self._train_loop_fn = None
def create_train_loop_fn(self):
"""Creates a training loop from the current step function and options.
Returns:
The train loop function, i.e. wrapper of multiple train steps.
"""
train_step_fn = self.train_step
if self._train_options.use_tf_while_loop:
loop_fn = loop_fns.create_tf_while_loop_fn(train_step_fn)
if self._train_options.use_tpu_summary_optimization:
loop_fn = loop_fns.LoopFnWithSummaries(loop_fn)
else:
loop_fn = tf.function(loop_fn)
else:
if self._train_options.use_tf_function:
train_step_fn = tf.function(train_step_fn)
loop_fn = loop_fns.create_loop_fn(train_step_fn)
return loop_fn
def train(self, num_steps: tf.Tensor) -> Optional[runner.Output]:
"""Implements `num_steps` steps of training.
Args:
num_steps: The number of training steps to run. This corresponds directly
to the number of calls made to `train_step`.
Returns:
The output of `train_loop_end`.
"""
self.train_loop_begin()
if self._train_loop_fn is None:
self._train_loop_fn = self.create_train_loop_fn()
if self._train_iter is None:
self._train_iter = tf.nest.map_structure(iter, self.train_dataset)
self._train_loop_fn(self._train_iter, num_steps)
return self.train_loop_end()
def train_loop_begin(self):
"""Called once at the beginning of the training loop.
This method is always called in eager mode, and is a good place to reset
metrics that accumulate values over multiple steps of training.
Note that this method is called before dataset iterator creation.
"""
pass
@abc.abstractmethod
def train_step(self, iterator):
"""Implements one step of training.
What a "step" consists of is up to the implementer. When using distribution
strategies, the call to this method takes place in the "cross-replica
context" for generality, to allow e.g. multiple iterator dequeues and calls
to `strategy.run`.
Note that if `use_tf_function=True`, all the code inside `train_step` should
be compatible with `tf.function` tracing (and in particular, any state
modifications involving `self` should be avoided). In some cases, non-
`tf.function` compatible code can be moved to `train_loop_begin` or
`train_loop_end`, which always execute eagerly.
Args:
iterator: A `tf.nest`-compatible structure of `tf.data.Iterator` or
`DistributedIterator`. The structure of this input matches the structure
of `train_dataset` as passed to `__init__`.
"""
pass
def train_loop_end(self) -> Optional[runner.Output]:
"""Called once at the end of the training loop.
This method is always called in eager mode, and is a good place to get
metric results. The value returned from this function will be returned as-is
from the `train` method implementation provided by `StandardTrainer`.
Returns:
The function may return a dictionary of `Tensors`, which will be
written to logs and as TensorBoard summaries. It can also be a
nested dictionary, yielding a hierarchy of summary directories.
"""
pass
@property
def train_dataset(self):
"""The current training dataset."""
return self._train_dataset
@train_dataset.setter
def train_dataset(self, train_dataset):
"""Sets a new training dataset, replacing the current one.
Any unprocessed examples in the current dataset are discarded.
Args:
train_dataset: A `tf.nest`-compatible structure of `tf.data.Dataset` or
`DistributedDataset`.
"""
self._train_dataset = train_dataset
self._train_iter = None
@dataclasses.dataclass(frozen=True)
class StandardEvaluatorOptions:
"""Advanced options for the `orbit.StandardEvaluator`.
Attributes:
use_tf_function: A boolean indicating whether to apply `tf.function` to the
evaluation loop. This will only affect the body of the loop (involving
`eval_step`); `eval_loop_begin` and `eval_loop_end` will always be run
in eager mode.
use_tf_while_loop: A boolean indicating whether to run the evaluation loop
using a `tf.while_loop`. If `True`, `use_tf_function` must also be `True`.
recreate_iterator_for_each_eval: A boolean indicating whether to recreate a
new iterator for the evaluation dataset before each round of evaluation,
which implies each round of evaluation starts from the beginning of
the evaluation dataset. For example, the evaluation dataset is
`[1, 2, 3, 4]`, batch size is 1 and evaluation steps is 2. If `True`, the
data to be evaluated is [1, 2] every time. If `False`, the iterator
state is maintained between calls to `StandardEvaluator.evaluate()`.
"""
use_tf_function: bool = True
use_tf_while_loop: bool = False
recreate_iterator_for_each_eval: bool = True
class StandardEvaluator(runner.AbstractEvaluator, metaclass=abc.ABCMeta):
"""Implements the standard functionality of AbstractEvaluator APIs.
This class structures evaluation roughly as follows:
state = eval_begin()
for _ in range(num_steps):
step_outputs = eval_step(eval_iterator)
state = eval_reduce(state, step_outputs)
return eval_end(state)
Calls to `eval_begin` and `eval_end` are always done in eager
mode, while `eval_step` may be compiled with `tf.function` as determined by
the `options` passed to `__init__`. `eval_reduce` is in eager mode if
`use_tf_while_loop=False` in `StandardEvaluatorOptions`, but in graph mode if
`use_tf_while_loop=True`.
This class does not support completely evaluating multiple different datasets
(i.e., where every example of each dataset should be processed, as opposed to
running for a fixed number of evaluation steps). A custom `AbstractEvaluator`
is recommended in this case.
"""
def __init__(self,
eval_dataset,
options: Optional[StandardEvaluatorOptions] = None):
"""Initializes the `StandardEvaluator` instance.
Args:
eval_dataset: A `tf.nest`-compatible structure of `tf.data.Dataset` or
`DistributedDataset`. On TPUs, if users want to exaust the dataset
without specifying number of eval steps, it is recommended to set
`drop_remainder=False` when batching the dataset, so the infrastructure
can handle the last partial batch properly.
options: An `orbit.StandardEvaluatorOptions` instance.
"""
options = options or StandardEvaluatorOptions()
if options.use_tf_while_loop and not options.use_tf_function:
raise ValueError("`use_tf_while_loop=True` and `use_tf_function=False` "
"is not supported")
self._eval_options = options
self._eval_dataset = eval_dataset
self._eval_iter = None
self._eval_loop_fn = None
def create_eval_loop_fn(self, has_state: bool):
"""Creates an eval loop from the current step function and options.
Args:
has_state: If the step function has state, state will be kept in the loop.
Returns:
The eval loop function, i.e. wrapper of multiple eval steps.
"""
eval_step_fn = self.eval_step
if self._eval_options.use_tf_while_loop:
# TODO(b/176126742): tf.while_loop doesn't support `None` as a loop input
# even when it is not used inside the loop. To workaround this limitation,
# we have to build two tf.functions for it.
if has_state:
loop_fn = loop_fns.create_tf_while_loop_fn_with_state(eval_step_fn)
else:
loop_fn = loop_fns.create_tf_while_loop_fn(eval_step_fn)
loop_fn = tf.function(loop_fn)
else:
if self._eval_options.use_tf_function:
eval_step_fn = tf.function(eval_step_fn)
loop_fn = loop_fns.create_loop_fn(eval_step_fn)
return loop_fn
def evaluate(self, num_steps: tf.Tensor) -> Optional[runner.Output]:
"""Implements `num_steps` steps of evaluation.
Args:
num_steps: The number of evaluation steps to run. When this is -1,
evaluation proceeds until a call to `eval_step` raises a `StopIteration`
or `tf.errors.OutOfRangeError`.
Returns:
The output of `self.eval_end()`.
Raises:
ValueError: If `options.use_tf_while_loop` is `True` and `num_steps` is
unspecified.
"""
if self._eval_options.use_tf_while_loop and num_steps == -1:
raise ValueError("Looping until exhausted is not supported if "
"`options.use_tf_while_loop` is `True`")
outputs = self.eval_begin() # pylint: disable=assignment-from-no-return
has_state = outputs is not None
if self._eval_loop_fn is None:
self._eval_loop_fn = self.create_eval_loop_fn(has_state)
# If `recreate_iterator_for_each_eval` is `True`, `self._eval_iter` is
# always None.
if self._eval_iter is None:
eval_iter = tf.nest.map_structure(iter, self.eval_dataset)
if not self._eval_options.recreate_iterator_for_each_eval:
self._eval_iter = eval_iter
else:
eval_iter = self._eval_iter
if self._eval_options.use_tf_while_loop and not has_state:
self._eval_loop_fn(eval_iter, num_steps)
else:
outputs = self._eval_loop_fn(
eval_iter, num_steps, state=outputs, reduce_fn=self.eval_reduce)
if outputs is None:
return self.eval_end()
else:
return self.eval_end(outputs)
def eval_begin(self) -> Any:
"""Called once at the beginning of the evaluation.
This method is always called in eager mode, and is a good place to reset
metrics that accumulate values over the course of evaluation.
Note that this method is called before dataset iterator creation.
Returns:
A value to pass as the `state` argument to `eval_reduce`.
"""
pass
@abc.abstractmethod
def eval_step(self, iterator) -> Any:
"""Implements one step of evaluation.
What a "step" consists of is up to the implementer. When using distribution
strategies, the call to this method takes place in the "cross-replica
context" for generality, to allow e.g. multiple iterator dequeues and calls
to `strategy.run`.
Note that if `use_tf_function=True`, all the code inside `eval_step` should
be compatible with `tf.function` tracing (and in particular, any state
modifications involving `self` should be avoided). In some cases, non-
`tf.function` compatible code can be moved to `eval_loop_begin`,
`eval_reduce`, or `eval_loop_end`, which always execute eagerly.
Args:
iterator: A `tf.nest`-compatible structure of `tf.data.Iterator` or
`DistributedIterator`.
Returns:
An output which is passed as `step_outputs` argument into `eval_reduce`
function.
"""
pass
def eval_end(self, *args) -> Optional[runner.Output]:
"""Called at the end of the evaluation.
Called once at the end of evaluation.
This method is always called in eager mode, and is a good place to get
metric results. The value returned from this function will be returned as-is
from the `evaluate` method implementation provided by `StandardEvaluator`.
Args:
*args: The outputs from `eval_reduce` for the last eval step, if they are
non-`None` (if they are `None`, nothing is passed).
Returns:
The function may return a dictionary of `Tensors`, which will be
written to logs and as TensorBoard summaries. It can also be a
nested dictionary, yielding a hierarchy of summary directories.
"""
pass
def eval_reduce(self,
state: Optional[Any] = None,
step_outputs: Optional[runner.Output] = None) -> Any:
"""A function to perform per-step reduction on the evaluation outputs.
This is useful for passing state throughout evaluation, especially in cases
where maintaining or accumulating state is hard to accomplish using
`tf.metrics.Metric` or other `tf.Variable`-based approaches. For instance,
it can be used to easily accumulate all per-example losses from the full
evaluation for subsequent processing in `eval_end()`.
Args:
state: A state being maintained throughout the evaluation.
step_outputs: Outputs from the current evaluation step.
Returns:
An output which is passed as the `state` argument to this function for the
next step. After evaluation is finished, the output from last step will be
passed to `eval_end`.
"""
pass
@property
def eval_dataset(self):
"""The current evaluation dataset."""
return self._eval_dataset
@eval_dataset.setter
def eval_dataset(self, eval_dataset):
"""Sets a new eval dataset, replacing the current one.
Any unprocessed examples in the current dataset are discarded.
Args:
eval_dataset: A `tf.nest`-compatible structure of `tf.data.Dataset` or
`DistributedDataset`.
"""
self._eval_dataset = eval_dataset
self._eval_iter = None
| 17,402 | 37.587583 | 80 | py |
models | models-master/orbit/runner.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides AbstractTrainer/Evaluator base classes, defining train/eval APIs."""
import abc
from typing import Dict, Optional, Union
import numpy as np
import tensorflow as tf
Output = Dict[str, Union[tf.Tensor, float, np.number, np.ndarray, 'Output']] # pytype: disable=not-supported-yet
class AbstractTrainer(tf.Module, metaclass=abc.ABCMeta):
"""An abstract class defining the API required for training."""
@abc.abstractmethod
def train(self, num_steps: tf.Tensor) -> Optional[Output]:
"""Implements `num_steps` steps of training.
This method will be called by the `Controller` to perform the "inner loop"
of training. This inner loop amortizes the cost of bookkeeping associated
with checkpointing, evaluation, and writing summaries. Additionally, the
inner loop can be implemented (if desired) using TensorFlow's looping
constructs (e.g. a `for` loop over a `tf.range` inside a `tf.function`),
which can be necessary for getting optimal performance when running on TPU.
For cases that don't require peak performance, a simple Python loop can be
used instead for simplicity.
Args:
num_steps: The number of training steps to run. Note that it is up to the
model what constitutes a "step", which may involve more than one update
to model parameters (e.g., if training a GAN).
Returns:
Either `None`, or a dictionary mapping names to `Tensor`s or NumPy values.
If a dictionary is returned, it will be written to logs and as TensorBoard
summaries. The dictionary may also be nested, which will generate a
hierarchy of summary directories.
"""
pass
class AbstractEvaluator(tf.Module, metaclass=abc.ABCMeta):
"""An abstract class defining the API required for evaluation."""
@abc.abstractmethod
def evaluate(self, num_steps: tf.Tensor) -> Optional[Output]:
"""Implements `num_steps` steps of evaluation.
This method will by called the `Controller` to perform an evaluation. The
`num_steps` parameter specifies the number of steps of evaluation to run,
which is specified by the user when calling one of the `Controller`'s
evaluation methods. A special sentinel value of `-1` is reserved to indicate
evaluation should run until the underlying data source is exhausted.
Args:
num_steps: The number of evaluation steps to run. Note that it is up to
the model what constitutes a "step". Evaluations may also want to
support "complete" evaluations when `num_steps == -1`, running until a
given data source is exhausted.
Returns:
Either `None`, or a dictionary mapping names to `Tensor`s or NumPy values.
If a dictionary is returned, it will be written to logs and as TensorBoard
summaries. The dictionary may also be nested, which will generate a
hierarchy of summary directories.
"""
pass
| 3,509 | 40.785714 | 113 | py |
models | models-master/orbit/controller.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a `Controller` class for managing the outer training loop."""
import pprint
import time
from typing import Callable, Iterable, Optional, Union
from absl import logging
from orbit import runner
from orbit import utils
import tensorflow as tf
def _log(message: str):
"""Logs `message` to the `info` log, and also prints to stdout."""
logging.info(message)
print(message)
logging.ABSLLogger.register_frame_to_skip(__file__, _log.__name__)
def _format_output(output, indent=4):
"""Formats `output`, either on one line, or indented across multiple lines."""
formatted = pprint.pformat(output)
lines = formatted.splitlines()
if len(lines) == 1:
return formatted
lines = [" " * indent + line for line in lines]
return "\n" + "\n".join(lines)
Action = Callable[[runner.Output], None]
class Controller:
"""Class that controls the outer loop of model training and evaluation.
Orbit divides training and evaluation into "inner" and "outer" loops. Inner
loops are implemented by users in the form of `AbstractTrainer` and
`AbstractEvaluator` subclasses, and define how to run a given number of
training or evaluation steps. The outer loop is provided by this `Controller`,
and interleaves calls to the user-provided inner loops with additional actions
such as saving checkpoints, running evaluations, writing summaries, as well as
(optionally) user provided `Action`s (see below).
There are four top-level "outer loops" provided:
- `train`, which trains until a specified number of global steps is reached;
- `evaluate`, for one-off model evaluation;
- `train_and_evaluate`, for interleaved training and evaluation;
- `evaluate_continuously`, for monitoring a given directory and running
evaluations on new model checkpoints.
While this class attempts to provide out-of-the-box solutions for common
training and evaluation use cases, the internal details and method
implementations are also intended to be simple enough to make subclassing or
other custom outer loop implementations easy to achieve.
Some additional customization can be achieved by supplying `train_actions` or
`eval_actions` when constructing the `Controller`. Actions arbitrary callables
that are applied by the `Controller` to the output of train steps (after each
inner loop of `steps_per_loop` steps) or an evaluation. This provides a hook
mechanism, enabling things like reporting metrics to Vizier, model exporting,
additional logging, etc. See the `orbit.actions` package for a small handful
of predefined actions and some utility classes that may be useful in defining
your own.
"""
def __init__(
self,
*, # Makes all args keyword only.
global_step: tf.Variable,
trainer: Optional[runner.AbstractTrainer] = None,
evaluator: Optional[runner.AbstractEvaluator] = None,
strategy: Optional[tf.distribute.Strategy] = None,
# Actions
train_actions: Optional[Iterable[Action]] = None,
eval_actions: Optional[Iterable[Action]] = None,
# Train related
steps_per_loop: Optional[Union[int, Callable[[int], int]]] = None,
checkpoint_manager: Optional[tf.train.CheckpointManager] = None,
enable_async_checkpointing: bool = False,
# Summary related
summary_interval: Optional[int] = None,
summary_dir: Optional[str] = None,
# Evaluation related
eval_summary_dir: Optional[str] = None,
summary_manager: Optional[utils.SummaryManagerInterface] = None,
eval_summary_manager: Optional[utils.SummaryManagerInterface] = None):
"""Initializes a `Controller` instance.
Note that if `checkpoint_manager` is provided and there are checkpoints in
the associated model directory, the model will be restored from the most
recent checkpoint during this `__init__` method.
Args:
global_step: An integer `tf.Variable` storing the global training step
number. Usually this can be obtained from the `iterations` property of
the model's optimizer (e.g. `trainer.optimizer.iterations`). In cases
where multiple optimizers are used, or if one model "step" corresponds
to more than one update to model parameters, users can create and
increment their own global step variable as well. In this case it is
recommended to create the `tf.Variable` inside the distribution strategy
scope, with `aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA` (see
also `orbit.utils.create_global_step()`).
trainer: An instance of `orbit.AbstractTrainer`, which implements the
inner training loop.
evaluator: An instance of `orbit.AbstractEvaluator`, which implements
evaluation.
strategy: An instance of `tf.distribute.Strategy`. If not provided, the
strategy will be initialized from the current in-scope strategy using
`tf.distribute.get_strategy()`.
train_actions: Optional `orbit.Action`s to call after each block of
`steps_per_loop` training steps are run. These will be called with the
output of `trainer.train`.
eval_actions: Optional `orbit.Action`s to call after each evaluation.
These will be called with the output of `evaluator.evaluate`.
steps_per_loop: Optional integer to indicate the number of steps to run in
each inner loop of training (passed as the `num_steps` parameter of
`trainer.train`). It can be also a callable which takes the current
global step value as input and returns the number of steps to run as
output.
checkpoint_manager: An instance of `tf.train.CheckpointManager`. If
provided and there are checkpoints in the associated model directory,
the model will be restored from the most recent checkpoint inside this
`__init__` method. If not provided, the `Controller` will not
automatically save to or restore from checkpoints.
enable_async_checkpointing: Optional bool indicating whether to enable
async checkpoint saving.
summary_interval: Step interval for training summaries. Note that this
argument only applies to `tf.summary` calls inside the `trainer.train`
function. Summaries written by the `Controller` (specifically
"steps_per_second" and output from the `trainer.train` method) will
always be enabled unless the `summary_dir` parameter is `None`. If set,
the value must be divisible by `steps_per_loop`.
summary_dir: The directory to write summaries to. To use the same
directory as for checkpointing, pass `checkpoint_manager.directory`. If
`None`, no training summaries will be written.
eval_summary_dir: The directory to write eval summaries to. If `None`, it
will be set to `summary_dir`. If both `summary_dir` and
`eval_summary_dir` are `None`, no eval summaries will be written.
summary_manager: Instance of the summary manager. If set, the
`summary_dir` will be ignored. Otherwise the summary manager will be
created internally for TensorBoard summaries by default from the
`summary_dir`.
eval_summary_manager: Instance of the eval summary manager. If set, the
`eval_summary_dir` will be ignored. Otherwise the eval summary manager
will be created internally for TensorBoard summaries by default from the
`eval_summary_dir`.
Raises:
ValueError: If both `trainer` and `evaluator` are `None`.
ValueError: If `steps_per_loop` is not a positive integer or a callable.
ValueError: If `summary_interval` is not a positive integer or is not
divisible by `steps_per_loop`.
"""
if trainer is None and evaluator is None:
raise ValueError("`trainer` and `evaluator` should not both be `None`.")
if trainer is not None:
if steps_per_loop is None:
raise ValueError(
"`steps_per_loop` is required when `trainer` is provided.")
elif not callable(steps_per_loop) and (
not isinstance(steps_per_loop, int) or steps_per_loop < 1):
raise ValueError(
f"`steps_per_loop` ({steps_per_loop}) must be a positive integer "
"or a callable.")
if summary_interval is not None:
if summary_interval <= 0:
raise ValueError(
f"`summary_interval` ({summary_interval}) must be larger than 0.")
elif not callable(steps_per_loop) and (summary_interval % steps_per_loop
!= 0):
raise ValueError(
f"`summary interval` ({summary_interval}) must be a multiple "
f"of `steps_per_loop` ({steps_per_loop}).")
if not isinstance(global_step, tf.Variable):
raise ValueError("`global_step` must be a `tf.Variable`.")
self.trainer = trainer
self.evaluator = evaluator
self.strategy = strategy or tf.distribute.get_strategy()
self.train_actions = () if train_actions is None else tuple(train_actions)
self.eval_actions = () if eval_actions is None else tuple(eval_actions)
self.global_step = global_step
self.checkpoint_manager = checkpoint_manager
self._enable_async_checkpoint_saving = enable_async_checkpointing
self._checkpoint_options = tf.train.CheckpointOptions(
enable_async=enable_async_checkpointing
)
if self.trainer is not None:
self.step_timer = None
self.summary_interval = summary_interval
if summary_manager:
self.summary_manager = summary_manager
else:
self.summary_manager = utils.SummaryManager(
summary_dir, tf.summary.scalar, global_step=self.global_step)
self._steps_per_loop = steps_per_loop
if self.evaluator is not None:
eval_summary_dir = eval_summary_dir or summary_dir
if eval_summary_dir == summary_dir and self.trainer is not None:
# Reuse the summary writer if train and evaluation summary directory
# are the same.
self.eval_summary_manager = self.summary_manager
else:
if eval_summary_manager:
self.eval_summary_manager = eval_summary_manager
else:
self.eval_summary_manager = utils.SummaryManager(
eval_summary_dir, tf.summary.scalar, global_step=self.global_step)
tf.summary.experimental.set_step(self.global_step)
# Restores the model if needed.
if self.checkpoint_manager is not None:
restored_path = self.restore_checkpoint()
if restored_path:
_log(f"restored from checkpoint: {restored_path}")
def train(self, steps: int, checkpoint_at_completion: bool = True):
"""Runs training until the specified global step count has been reached.
This method makes calls to `self.trainer.train()` until the global step
count is equal to `steps`. It will additionally save checkpoints (if a
`CheckpointManager` was passed to `Controller.__init__`) and summarize
training output (if `summary_dir` is set).
When async checkpointing is enabled, a sync is triggered at the end of this
method to make sure any ongoing async checkpoint saving is finished before
returning.
Args:
steps: The global step count to train up to.
checkpoint_at_completion: Whether to save a checkpoint when this method
returns (regardless of the checkpointing interval). Defaults to `True`.
"""
self._require("trainer", for_method="train")
# TODO(momernick): Support steps=None or -1 (training to exhaustion).
current_step = self.global_step.numpy() # Cache, since this is expensive.
_log(f"train | step: {current_step: 6d} | training until step {steps}...")
while current_step < steps:
# Calculates steps to run for the next train loop.
num_steps = min(steps - current_step, self.steps_per_loop)
self._train_n_steps(num_steps)
self._maybe_save_checkpoint()
current_step = self.global_step.numpy()
if checkpoint_at_completion:
self._maybe_save_checkpoint(check_interval=False)
self._sync_on_async_checkpointing()
def evaluate(self, steps: int = -1) -> Optional[runner.Output]:
"""Runs evaluation for the given number of steps.
This method calls `self.evaluator.evaluate(steps)`, then writes the returned
summaries (if any).
Args:
steps: The number of evaluation steps to run. The value `-1` is reserved
as a special sentinel to indicate a "complete" evaluation that runs
until the underlying dataset is exhausted. Support for this is dependent
on the specific `evaluator` being used.
Returns:
The evaluation results as a dictionary mapping names to NumPy values.
Raises:
ValueError: If `evaluator` was not provided to `Controller.__init__`.
ValueError: If no checkpoint is present in `checkpoint_manager.directory`.
ValueError: If `steps` is not a positive value or -1.
"""
self._require("evaluator", for_method="evaluate")
if steps > 0:
steps_msg = f"running {steps} steps of evaluation..."
elif steps == -1:
steps_msg = "running complete evaluation..."
else:
raise ValueError(f"`steps` ({steps}) should be > 0, or == -1.")
current_step = self.global_step.numpy()
_log(f" eval | step: {current_step: 6d} | {steps_msg}")
start = time.time()
with self.eval_summary_manager.summary_writer().as_default():
steps_tensor = tf.convert_to_tensor(steps, dtype=tf.int32)
eval_output = self.evaluator.evaluate(steps_tensor)
elapsed = time.time() - start
eval_output = eval_output or {}
for action in self.eval_actions:
action(eval_output)
eval_output = tf.nest.map_structure(utils.get_value, eval_output)
if steps > 0:
# Only log if steps has been specified.
steps_per_second = steps / elapsed
eval_output["steps_per_second"] = steps_per_second
steps_per_second_log = f"steps/sec: {steps_per_second: 6.1f} | "
else:
steps_per_second_log = ""
_log(f" eval | step: {current_step: 6d} | "
f"{steps_per_second_log}"
f"eval time: {elapsed: 6.1f} sec | "
f"output: {_format_output(eval_output)}")
self.eval_summary_manager.write_summaries(eval_output)
self.eval_summary_manager.flush()
return eval_output
def train_and_evaluate(
self,
train_steps: int,
eval_steps: int = -1,
eval_interval: Optional[int] = None,
) -> Optional[runner.Output]:
"""Runs interleaved training and evaluation.
This method interleaves calls to `self.train()` and `self.evaluate()`,
training the model until the global step count equals `train_steps`, and
running an evaluation for `eval_steps` every `eval_interval` training steps.
In addition, this method will run a final evaluation at the end of the
training sequence.
When async checkpointing is enabled, a sync is triggered at the end of this
method to make sure any ongoing async checkpoint saving is finished before
returning.
Args:
train_steps: The global step count to train up to.
eval_steps: The number of steps to run during an evaluation. If -1, this
method will evaluate over the entire evaluation dataset.
eval_interval: The number of training steps to run between evaluations. If
set, training will always stop every `eval_interval` steps, even if this
results in a shorter inner loop than specified by `steps_per_loop`
setting. If None, evaluation will only be performed after training is
complete.
Returns:
The evaluation results as a dictionary mapping names to NumPy values.
"""
self._require("trainer", for_method="train_and_evaluate")
self._require("evaluator", for_method="train_and_evaluate")
output = None
current_step = self.global_step.numpy() # Cache, since this is expensive.
eval_interval = eval_interval or (train_steps - current_step)
while current_step < train_steps:
interval = min(train_steps - current_step, eval_interval)
num_steps = current_step + interval
self.train(steps=num_steps, checkpoint_at_completion=False)
output = self.evaluate(steps=eval_steps)
current_step = self.global_step.numpy()
self._maybe_save_checkpoint(check_interval=False)
self._sync_on_async_checkpointing()
return output
def evaluate_continuously(
self,
steps: int = -1,
timeout: Optional[Union[int, float]] = None,
timeout_fn: Optional[Callable[[], bool]] = None,
) -> Optional[runner.Output]:
"""Continuously monitors a directory and evaluates new checkpoints in it.
This method continuously monitors a directory as specified by this
Controller's CheckpointManager init arg and runs evaluation on the
checkpoints found there.
Args:
steps: The number of steps to run when evaluating. If -1, this method will
evaluate over the entire evaluation dataset.
timeout: The maximum number of seconds to wait between checkpoints. See
tf.train.checkpoints_iterator documentation.
timeout_fn: Optional callable to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated
and the iterator will exit.
Returns:
The evaluation results as a dictionary mapping names to NumPy values.
Raises:
ValueError: If no checkpoint found in `self.checkpoint_manager.directory`.
ValueError: If `evaluator` was not provided as a controller init arg.
"""
self._require("evaluator", for_method="evaluate_continuously")
self._require("checkpoint_manager", for_method="evaluate_continuously")
output = None
for checkpoint_path in tf.train.checkpoints_iterator(
self.checkpoint_manager.directory,
timeout=timeout,
timeout_fn=timeout_fn):
self.restore_checkpoint(checkpoint_path)
output = self.evaluate(steps)
return output
def restore_checkpoint(self, checkpoint_path: Optional[str] = None):
"""Restores the model from a checkpoint.
Args:
checkpoint_path: An optional string specifying the checkpoint path to
restore from. If `None`, will restore from the most recent checkpoint
(or initialize the model using a custom `init_fn` if no checkpoints can
be found) using `self.checkpoint_manager.restore_or_initialize()`.
Returns:
The path to the restored checkpoint if a restore happened, or `None` if no
restore occurred.
"""
self._require("checkpoint_manager", for_method="restore_checkpoint")
with self.strategy.scope():
# Checkpoint restoring should be inside scope (b/139450638).
if checkpoint_path is not None:
_log(f"restoring model from {checkpoint_path}...")
self.checkpoint_manager.checkpoint.restore(checkpoint_path)
else:
_log("restoring or initializing model...")
checkpoint_path = self.checkpoint_manager.restore_or_initialize()
if checkpoint_path is not None:
_log(f"restored model from {checkpoint_path}.")
return checkpoint_path
def save_checkpoint(self):
"""Saves the model to a checkpoint.
This method will save a checkpoint containing the current state of the
model.
Raises:
ValueError: If no `checkpoint_manager` was provided to
`Controller.__init__`.
"""
self._require("checkpoint_manager", for_method="save_checkpoint")
self._maybe_save_checkpoint(check_interval=False)
@property
def steps_per_loop(self):
"""Returns current steps_per_loop value in a training loop."""
if callable(self._steps_per_loop):
return self._steps_per_loop(self.global_step.numpy())
return self._steps_per_loop
def _train_n_steps(self, num_steps: int):
"""Runs training for `num_steps` steps.
Also prints/logs updates about training progress, and summarizes training
output (if output is returned from `self.trainer.train()`, and if
`self.summary_dir` is set).
Args:
num_steps: An integer specifying how many steps of training to run.
Raises:
RuntimeError: If `global_step` is not properly incremented by `num_steps`
after calling `self.trainer.train(num_steps)`.
"""
if not self.step_timer:
self.step_timer = StepTimer(self.global_step)
current_step = self.global_step.numpy()
with self.summary_manager.summary_writer().as_default():
should_record = False # Allows static optimization in no-summary cases.
if self.summary_interval:
# Create a predicate to determine when summaries should be written.
should_record = lambda: (self.global_step % self.summary_interval == 0)
with tf.summary.record_if(should_record):
num_steps_tensor = tf.convert_to_tensor(num_steps, dtype=tf.int32)
train_output = self.trainer.train(num_steps_tensor)
# Verify that global_step was updated properly, then update current_step.
expected_step = current_step + num_steps
if self.global_step.numpy() != expected_step:
message = (
f"`trainer.train({num_steps})` did not update `global_step` by "
f"{num_steps}. Old value was {current_step}, expected updated value "
f"to be {expected_step}, but it was {self.global_step.numpy()}.")
logging.warning(message)
train_output = train_output or {}
for action in self.train_actions:
action(train_output)
train_output = tf.nest.map_structure(utils.get_value, train_output)
current_step = self.global_step.numpy()
steps_per_second = self.step_timer.steps_per_second()
_log(f"train | step: {current_step: 6d} | "
f"steps/sec: {steps_per_second: 6.1f} | "
f"output: {_format_output(train_output)}")
train_output["steps_per_second"] = steps_per_second
self.summary_manager.write_summaries(train_output)
self.summary_manager.flush()
def _maybe_save_checkpoint(self, check_interval: bool = True):
"""Conditionally saves a checkpoint.
A checkpoint is saved if a `CheckpointManager` is available, and if the
required number of steps has elapsed since the last checkpoint was saved
(although this condition can be disabled by setting `check_interval=False`).
Args:
check_interval: Whether to check if the checkpoint interval has fully
elapsed. If `False`, a checkpoint is saved regardless of the elapsed
steps since the most recent checkpoint, unless no `checkpoint_manager`
was provided to `Controller.__init__`.
Returns:
A boolean indicating whether a checkpoint was saved.
"""
if self.checkpoint_manager and self.checkpoint_manager.checkpoint_interval:
ckpt_path = self.checkpoint_manager.save(
checkpoint_number=self.global_step.numpy(),
check_interval=check_interval,
options=self._checkpoint_options)
if ckpt_path is not None:
_log(f"saved checkpoint to {ckpt_path}.")
return True
return False
def _require(self, attribute, for_method):
"""Utility method to raise an error if the given `attribute` is not set."""
if getattr(self, attribute, None) is None:
raise ValueError(
f"`{attribute}` is not set. Pass `{attribute}` to "
f"`Controller.__init__` before calling `{for_method}()`.")
def _sync_on_async_checkpointing(self):
"""Force to wait for the async checkpoint saving (if any) to finish."""
# pylint: disable=protected-access
if self.checkpoint_manager:
logging.info("Sync on async checkpoint saving.")
self.checkpoint_manager.sync()
class StepTimer:
"""Utility class for measuring steps/second."""
def __init__(self, step):
self.step = step
self.start()
def start(self):
self.last_iteration = self.step.numpy()
self.last_time = time.time()
def steps_per_second(self, restart=True):
value = ((self.step.numpy() - self.last_iteration) /
(time.time() - self.last_time))
if restart:
self.start()
return value
| 24,765 | 41.262799 | 80 | py |
models | models-master/orbit/__init__.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines exported symbols for the `orbit` package."""
from orbit import actions
# Internal import orbit.
from orbit import utils
from orbit.controller import Action
from orbit.controller import Controller
from orbit.runner import AbstractEvaluator
from orbit.runner import AbstractTrainer
from orbit.standard_runner import StandardEvaluator
from orbit.standard_runner import StandardEvaluatorOptions
from orbit.standard_runner import StandardTrainer
from orbit.standard_runner import StandardTrainerOptions
| 1,117 | 35.064516 | 74 | py |
models | models-master/orbit/actions/new_best_metric_test.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for orbit.actions.new_best_metric."""
import os
from orbit import actions
import tensorflow as tf
class NewBestMetricTest(tf.test.TestCase):
def test_new_best_metric_higher_is_better(self):
new_best_metric = actions.NewBestMetric(
lambda x: x['value'], higher_is_better=True)
self.assertTrue(new_best_metric.test({'value': 0.0}))
self.assertTrue(new_best_metric.commit({'value': 0.0}))
self.assertFalse(new_best_metric.test({'value': 0.0}))
self.assertTrue(new_best_metric.test({'value': 1.0}))
def test_new_best_metric_lower_is_better(self):
new_best_metric = actions.NewBestMetric('value', higher_is_better=False)
self.assertTrue(new_best_metric.test({'value': 0.0}))
self.assertTrue(new_best_metric.commit({'value': 0.0}))
self.assertFalse(new_best_metric.test({'value': 0.0}))
self.assertTrue(new_best_metric.test({'value': -1.0}))
def test_new_best_metric_persistence(self):
backing_file = self.create_tempfile()
new_best_metric = actions.NewBestMetric(
'value',
higher_is_better=True,
filename=backing_file.full_path,
write_metric=False)
self.assertTrue(new_best_metric.test({'value': 0.0}))
self.assertTrue(new_best_metric.commit({'value': 0.0}))
self.assertFalse(new_best_metric.test({'value': 0.0}))
new_best_metric = actions.NewBestMetric(
'value', higher_is_better=True, filename=backing_file.full_path)
self.assertLess(new_best_metric.best_value, 0.0)
self.assertTrue(new_best_metric.commit({'value': 5.0}))
self.assertEqual(new_best_metric.best_value, 5.0)
new_best_metric = actions.NewBestMetric(
'value', higher_is_better=True, filename=backing_file.full_path)
self.assertEqual(new_best_metric.best_value, 5.0)
def test_json_persisted_value(self):
tempfile = self.create_tempfile().full_path
value = {'a': 1, 'b': 2}
persisted_value = actions.JSONPersistedValue(value, tempfile)
# The inital value is used since tempfile is empty.
self.assertEqual(persisted_value.read(), value)
persisted_value = actions.JSONPersistedValue('ignored', tempfile)
# Initial value of 'ignored' is ignored, since there's a value in tempfile.
self.assertEqual(persisted_value.read(), value)
value = [1, 2, 3]
persisted_value.write(value)
# Now that a new value is written, it gets read on initialization.
persisted_value = actions.JSONPersistedValue(['also ignored'], tempfile)
self.assertEqual(persisted_value.read(), value)
# Writes can be disabled.
persisted_value = actions.JSONPersistedValue(
'ignored', tempfile, write_value=False)
self.assertEqual(persisted_value.read(), value)
persisted_value.write("won't get persisted")
persisted_value = actions.JSONPersistedValue(
'ignored', tempfile, write_value=False)
self.assertEqual(persisted_value.read(), value)
def test_json_persisted_value_create_dirs(self):
tempfile = os.path.join(self.create_tempdir().full_path, 'subdir/value')
value = {'a': 1, 'b': 2}
# The directory is not created if write_value=False.
actions.JSONPersistedValue(value, tempfile, write_value=False)
self.assertFalse(tf.io.gfile.exists(os.path.dirname(tempfile)))
actions.JSONPersistedValue(value, tempfile)
self.assertTrue(tf.io.gfile.exists(tempfile))
if __name__ == '__main__':
tf.test.main()
| 4,014 | 41.263158 | 79 | py |
models | models-master/orbit/actions/export_saved_model_test.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for orbit.actions.export_saved_model."""
import os
from orbit import actions
import tensorflow as tf
def _id_key(name):
_, id_num = name.rsplit('-', maxsplit=1)
return int(id_num)
def _id_sorted_file_base_names(dir_path):
return sorted(tf.io.gfile.listdir(dir_path), key=_id_key)
class TestModel(tf.Module):
def __init__(self):
self.value = tf.Variable(0)
@tf.function(input_signature=[])
def __call__(self):
return self.value
class ExportSavedModelTest(tf.test.TestCase):
def test_export_file_manager_default_ids(self):
directory = self.create_tempdir()
base_name = os.path.join(directory.full_path, 'basename')
manager = actions.ExportFileManager(base_name, max_to_keep=3)
self.assertEmpty(tf.io.gfile.listdir(directory.full_path))
directory.create_file(manager.next_name())
manager.clean_up() # Shouldn't do anything...
self.assertLen(tf.io.gfile.listdir(directory.full_path), 1)
directory.create_file(manager.next_name())
manager.clean_up() # Shouldn't do anything...
self.assertLen(tf.io.gfile.listdir(directory.full_path), 2)
directory.create_file(manager.next_name())
manager.clean_up() # Shouldn't do anything...
self.assertLen(tf.io.gfile.listdir(directory.full_path), 3)
directory.create_file(manager.next_name())
self.assertLen(tf.io.gfile.listdir(directory.full_path), 4)
self.assertEqual(
_id_sorted_file_base_names(directory.full_path),
['basename-0', 'basename-1', 'basename-2', 'basename-3'])
manager.clean_up() # Should delete file with lowest ID.
self.assertEqual(
_id_sorted_file_base_names(directory.full_path),
['basename-1', 'basename-2', 'basename-3'])
manager = actions.ExportFileManager(base_name, max_to_keep=3)
self.assertEqual(os.path.basename(manager.next_name()), 'basename-4')
def test_export_file_manager_custom_ids(self):
directory = self.create_tempdir()
base_name = os.path.join(directory.full_path, 'basename')
id_num = 0
def next_id():
return id_num
manager = actions.ExportFileManager(
base_name, max_to_keep=2, next_id_fn=next_id)
self.assertEmpty(tf.io.gfile.listdir(directory.full_path))
id_num = 30
directory.create_file(manager.next_name())
self.assertLen(tf.io.gfile.listdir(directory.full_path), 1)
manager.clean_up() # Shouldn't do anything...
self.assertEqual(
_id_sorted_file_base_names(directory.full_path), ['basename-30'])
id_num = 200
directory.create_file(manager.next_name())
self.assertLen(tf.io.gfile.listdir(directory.full_path), 2)
manager.clean_up() # Shouldn't do anything...
self.assertEqual(
_id_sorted_file_base_names(directory.full_path),
['basename-30', 'basename-200'])
id_num = 1000
directory.create_file(manager.next_name())
self.assertLen(tf.io.gfile.listdir(directory.full_path), 3)
self.assertEqual(
_id_sorted_file_base_names(directory.full_path),
['basename-30', 'basename-200', 'basename-1000'])
manager.clean_up() # Should delete file with lowest ID.
self.assertLen(tf.io.gfile.listdir(directory.full_path), 2)
self.assertEqual(
_id_sorted_file_base_names(directory.full_path),
['basename-200', 'basename-1000'])
def test_export_file_manager_with_suffix(self):
directory = self.create_tempdir()
base_name = os.path.join(directory.full_path, 'basename')
id_num = 0
def next_id():
return id_num
subdirectory = 'sub'
manager = actions.ExportFileManager(
base_name, max_to_keep=2, next_id_fn=next_id, subdirectory=subdirectory
)
self.assertEmpty(tf.io.gfile.listdir(directory.full_path))
id_num = 30
directory.create_file(manager.next_name())
self.assertLen(tf.io.gfile.listdir(directory.full_path), 1)
manager.clean_up() # Shouldn't do anything...
self.assertEqual(
_id_sorted_file_base_names(directory.full_path), ['basename-30']
)
id_num = 200
directory.create_file(manager.next_name())
self.assertLen(tf.io.gfile.listdir(directory.full_path), 2)
manager.clean_up() # Shouldn't do anything...
self.assertEqual(
_id_sorted_file_base_names(directory.full_path),
['basename-30', 'basename-200'],
)
id_num = 1000
directory.create_file(manager.next_name())
self.assertLen(tf.io.gfile.listdir(directory.full_path), 3)
self.assertEqual(
_id_sorted_file_base_names(directory.full_path),
['basename-30', 'basename-200', 'basename-1000'],
)
manager.clean_up() # Should delete file with lowest ID.
self.assertLen(tf.io.gfile.listdir(directory.full_path), 3)
# Note that the base folder is intact, only the suffix folder is deleted.
self.assertEqual(
_id_sorted_file_base_names(directory.full_path),
['basename-30', 'basename-200', 'basename-1000'],
)
step_folder = os.path.join(directory.full_path, 'basename-1000')
self.assertIn(subdirectory, tf.io.gfile.listdir(step_folder))
def test_export_file_manager_managed_files(self):
directory = self.create_tempdir()
directory.create_file('basename-5')
directory.create_file('basename-10')
directory.create_file('basename-50')
directory.create_file('basename-1000')
directory.create_file('basename-9')
directory.create_file('basename-10-suffix')
base_name = os.path.join(directory.full_path, 'basename')
manager = actions.ExportFileManager(base_name, max_to_keep=3)
self.assertLen(manager.managed_files, 5)
self.assertEqual(manager.next_name(), f'{base_name}-1001')
manager.clean_up()
self.assertEqual(
manager.managed_files,
[f'{base_name}-10', f'{base_name}-50', f'{base_name}-1000'])
def test_export_file_manager_managed_files_double_slash(self):
directory = self.create_tempdir('foo//bar')
directory.create_file('basename-5')
directory.create_file('basename-10')
directory.create_file('basename-50')
directory.create_file('basename-1000')
directory.create_file('basename-9')
directory.create_file('basename-10-suffix')
base_name = os.path.join(directory.full_path, 'basename')
expected_base_name = os.path.normpath(base_name)
self.assertNotEqual(base_name, expected_base_name)
manager = actions.ExportFileManager(base_name, max_to_keep=3)
self.assertLen(manager.managed_files, 5)
self.assertEqual(manager.next_name(), f'{expected_base_name}-1001')
manager.clean_up()
self.assertEqual(manager.managed_files, [
f'{expected_base_name}-10', f'{expected_base_name}-50',
f'{expected_base_name}-1000'
])
def test_export_saved_model(self):
directory = self.create_tempdir()
base_name = os.path.join(directory.full_path, 'basename')
file_manager = actions.ExportFileManager(base_name, max_to_keep=2)
model = TestModel()
export_action = actions.ExportSavedModel(
model, file_manager=file_manager, signatures=model.__call__)
model.value.assign(3)
self.assertEqual(model(), 3)
self.assertEmpty(file_manager.managed_files)
export_action({})
self.assertLen(file_manager.managed_files, 1)
reloaded_model = tf.saved_model.load(file_manager.managed_files[-1])
self.assertEqual(reloaded_model(), 3)
model.value.assign(5)
self.assertEqual(model(), 5)
export_action({})
self.assertLen(file_manager.managed_files, 2)
reloaded_model = tf.saved_model.load(file_manager.managed_files[-1])
self.assertEqual(reloaded_model(), 5)
model.value.assign(7)
self.assertEqual(model(), 7)
export_action({})
self.assertLen(file_manager.managed_files, 2) # Still 2, due to clean up.
reloaded_model = tf.saved_model.load(file_manager.managed_files[-1])
self.assertEqual(reloaded_model(), 7)
if __name__ == '__main__':
tf.test.main()
| 8,521 | 36.707965 | 79 | py |
models | models-master/orbit/actions/save_checkpoint_if_preempted.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the `SaveCheckpointIfPreempted` action."""
from typing import Optional
import tensorflow as tf
class SaveCheckpointIfPreempted:
"""Action that saves on-demand checkpoints after a preemption."""
def __init__(
self,
cluster_resolver: tf.distribute.cluster_resolver.ClusterResolver,
checkpoint_manager: tf.train.CheckpointManager,
checkpoint_number: Optional[tf.Variable] = None,
keep_running_after_save: Optional[bool] = False,
):
"""Initializes the instance.
Args:
cluster_resolver: A `tf.distribute.cluster_resolver.ClusterResolver`
object.
checkpoint_manager: A `tf.train.CheckpointManager` object.
checkpoint_number: A `tf.Variable` to indicate the checkpoint_number for
checkpoint manager, usually it will be the global step.
keep_running_after_save: Whether to keep the job running after the
preemption on-demand checkpoint. Only set to True when in-process
preemption recovery with tf.distribute.experimental.PreemptionWatcher is
enabled.
"""
self._checkpoint_number = checkpoint_number
self._termination_config = None
if keep_running_after_save:
self._termination_config = tf.distribute.experimental.TerminationConfig(
exit_fn=lambda: None
)
self._preemption_handler = (
tf.distribute.experimental.PreemptionCheckpointHandler(
cluster_resolver,
checkpoint_manager,
termination_config=self._termination_config,
)
)
def __call__(self, _) -> None:
self._preemption_handler.save_checkpoint_if_preempted(
checkpoint_number=self._checkpoint_number, check_interval=False
)
| 2,319 | 35.825397 | 80 | py |
models | models-master/orbit/actions/export_saved_model.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the `ExportSavedModel` action and associated helper classes."""
import os
import re
from typing import Callable, Optional
import tensorflow as tf
def _id_key(filename):
_, id_num = filename.rsplit('-', maxsplit=1)
return int(id_num)
def _find_managed_files(base_name):
r"""Returns all files matching '{base_name}-\d+', in sorted order."""
managed_file_regex = re.compile(rf'{re.escape(base_name)}-\d+$')
filenames = tf.io.gfile.glob(f'{base_name}-*')
filenames = filter(managed_file_regex.match, filenames)
return sorted(filenames, key=_id_key)
class _CounterIdFn:
"""Implements a counter-based ID function for `ExportFileManager`."""
def __init__(self, base_name: str):
managed_files = _find_managed_files(base_name)
self.value = _id_key(managed_files[-1]) + 1 if managed_files else 0
def __call__(self):
output = self.value
self.value += 1
return output
class ExportFileManager:
"""Utility class that manages a group of files with a shared base name.
For actions like SavedModel exporting, there are potentially many different
file naming and cleanup strategies that may be desirable. This class provides
a basic interface allowing SavedModel export to be decoupled from these
details, and a default implementation that should work for many basic
scenarios. Users may subclass this class to alter behavior and define more
customized naming and cleanup strategies.
"""
def __init__(
self,
base_name: str,
max_to_keep: int = 5,
next_id_fn: Optional[Callable[[], int]] = None,
subdirectory: Optional[str] = None,
):
"""Initializes the instance.
Args:
base_name: A shared base name for file names generated by this class.
max_to_keep: The maximum number of files matching `base_name` to keep
after each call to `cleanup`. The most recent (as determined by file
modification time) `max_to_keep` files are preserved; the rest are
deleted. If < 0, all files are preserved.
next_id_fn: An optional callable that returns integer IDs to append to
base name (formatted as `'{base_name}-{id}'`). The order of integers is
used to sort files to determine the oldest ones deleted by `clean_up`.
If not supplied, a default ID based on an incrementing counter is used.
One common alternative maybe be to use the current global step count,
for instance passing `next_id_fn=global_step.numpy`.
subdirectory: An optional subdirectory to concat after the
{base_name}-{id}. Then the file manager will manage
{base_name}-{id}/{subdirectory} files.
"""
self._base_name = os.path.normpath(base_name)
self._max_to_keep = max_to_keep
self._next_id_fn = next_id_fn or _CounterIdFn(self._base_name)
self._subdirectory = subdirectory or ''
@property
def managed_files(self):
"""Returns all files managed by this instance, in sorted order.
Returns:
The list of files matching the `base_name` provided when constructing this
`ExportFileManager` instance, sorted in increasing integer order of the
IDs returned by `next_id_fn`.
"""
files = _find_managed_files(self._base_name)
return [
os.path.normpath(os.path.join(f, self._subdirectory)) for f in files
]
def clean_up(self):
"""Cleans up old files matching `{base_name}-*`.
The most recent `max_to_keep` files are preserved.
"""
if self._max_to_keep < 0:
return
# Note that the base folder will remain intact, only the folder with suffix
# is deleted.
for filename in self.managed_files[: -self._max_to_keep]:
tf.io.gfile.rmtree(filename)
def next_name(self) -> str:
"""Returns a new file name based on `base_name` and `next_id_fn()`."""
base_path = f'{self._base_name}-{self._next_id_fn()}'
return os.path.normpath(os.path.join(base_path, self._subdirectory))
class ExportSavedModel:
"""Action that exports the given model as a SavedModel."""
def __init__(self,
model: tf.Module,
file_manager: ExportFileManager,
signatures,
options: Optional[tf.saved_model.SaveOptions] = None):
"""Initializes the instance.
Args:
model: The model to export.
file_manager: An instance of `ExportFileManager` (or a subclass), that
provides file naming and cleanup functionality.
signatures: The signatures to forward to `tf.saved_model.save()`.
options: Optional options to forward to `tf.saved_model.save()`.
"""
self.model = model
self.file_manager = file_manager
self.signatures = signatures
self.options = options
def __call__(self, _):
"""Exports the SavedModel."""
export_dir = self.file_manager.next_name()
tf.saved_model.save(self.model, export_dir, self.signatures, self.options)
self.file_manager.clean_up()
| 5,540 | 35.453947 | 80 | py |
models | models-master/orbit/actions/conditional_action.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a `ConditionalAction` abstraction."""
from typing import Any, Callable, Sequence, Union
from orbit import controller
from orbit import runner
import tensorflow as tf
Condition = Callable[[runner.Output], Union[bool, tf.Tensor]]
def _as_sequence(maybe_sequence: Union[Any, Sequence[Any]]) -> Sequence[Any]:
if isinstance(maybe_sequence, Sequence):
return maybe_sequence
return [maybe_sequence]
class ConditionalAction:
"""Represents an action that is only taken when a given condition is met.
This class is itself an `Action` (a callable that can be applied to train or
eval outputs), but is intended to make it easier to write modular and reusable
conditions by decoupling "when" something whappens (the condition) from "what"
happens (the action).
"""
def __init__(
self,
condition: Condition,
action: Union[controller.Action, Sequence[controller.Action]],
):
"""Initializes the instance.
Args:
condition: A callable accepting train or eval outputs and returing a bool.
action: The action (or optionally sequence of actions) to perform when
`condition` is met.
"""
self.condition = condition
self.action = action
def __call__(self, output: runner.Output) -> None:
if self.condition(output):
for action in _as_sequence(self.action):
action(output)
| 1,978 | 31.442623 | 80 | py |
models | models-master/orbit/actions/conditional_action_test.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for orbit.actions.conditional_action."""
from orbit import actions
import tensorflow as tf
class ConditionalActionTest(tf.test.TestCase):
def test_conditional_action(self):
# Define a function to raise an AssertionError, since we can't in a lambda.
def raise_assertion(arg):
raise AssertionError(str(arg))
conditional_action = actions.ConditionalAction(
condition=lambda x: x['value'], action=raise_assertion)
conditional_action({'value': False}) # Nothing is raised.
with self.assertRaises(AssertionError) as ctx:
conditional_action({'value': True})
self.assertEqual(ctx.exception.message, "{'value': True}")
if __name__ == '__main__':
tf.test.main()
| 1,325 | 32.15 | 79 | py |
models | models-master/orbit/actions/new_best_metric.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the `NewBestMetric` condition and associated helper classes."""
import json
import os
import sys
from typing import Any, Callable, Optional, Union
import uuid
from orbit import runner
from orbit import utils
import tensorflow as tf
MetricFn = Callable[[runner.Output], Union[float, tf.Tensor]]
class NewBestMetric:
"""Condition that is satisfied when a new best metric is achieved.
This class keeps track of the best metric value seen so far, optionally in a
persistent (preemption-safe) way.
Two methods are provided, which each satisfy the `Action` protocol: `test` for
only testing whether a new best metric is achieved by a given train/eval
output, and `commit`, which both tests and records the new best metric value
if it is achieved. These separate methods enable the same `NewBestMetric`
instance to be reused as a condition multiple times, and can also provide
additional preemption/failure safety. For example, to avoid updating the best
metric if a model export fails or is pre-empted:
new_best_metric = orbit.actions.NewBestMetric(
'accuracy', filename='/model/dir/best_metric')
action = orbit.actions.ConditionalAction(
condition=new_best_metric.test,
action=[
orbit.actions.ExportSavedModel(...),
new_best_metric.commit
])
The default `__call__` implementation is equivalent to `commit`.
This class is safe to use in multi-client settings if all clients can be
guaranteed to compute the same metric. However when saving metrics it may be
helpful to avoid unnecessary writes by setting the `write_value` parameter to
`False` for most clients.
Attributes:
metric: The metric passed to __init__ (may be a string key or a callable
that can be applied to train/eval output).
higher_is_better: Whether higher metric values are better.
"""
def __init__(self,
metric: Union[str, MetricFn],
higher_is_better: bool = True,
filename: Optional[str] = None,
write_metric=True):
"""Initializes the instance.
Args:
metric: Either a string key name to use to look up a metric (assuming the
train/eval output is a dictionary), or a callable that accepts the
train/eval output and returns a metric value.
higher_is_better: Whether higher metric values are better. If `True`, a
new best metric is achieved when the metric value is strictly greater
than the previous best metric. If `False`, a new best metric is achieved
when the metric value is strictly less than the previous best metric.
filename: A filename to use for storage of the best metric value seen so
far, to allow peristence of the value across preemptions. If `None`
(default), values aren't persisted.
write_metric: If `filename` is set, this controls whether this instance
will write new best metric values to the file, or just read from the
file to obtain the initial value. Setting this to `False` for most
clients in some multi-client setups can avoid unnecessary file writes.
Has no effect if `filename` is `None`.
"""
self.metric = metric
self.higher_is_better = higher_is_better
float_max = sys.float_info.max
self._best_value = JSONPersistedValue(
initial_value=-float_max if higher_is_better else float_max,
filename=filename,
write_value=write_metric)
def __call__(self, output: runner.Output) -> bool:
"""Tests `output` and updates the current best value if necessary.
This is equivalent to `commit` below.
Args:
output: The train or eval output to test.
Returns:
`True` if `output` contains a new best metric value, `False` otherwise.
"""
return self.commit(output)
def metric_value(self, output: runner.Output) -> float:
"""Computes the metric value for the given `output`."""
if callable(self.metric):
value = self.metric(output)
else:
value = output[self.metric]
return float(utils.get_value(value))
@property
def best_value(self) -> float:
"""Returns the best metric value seen so far."""
return self._best_value.read()
def test(self, output: runner.Output) -> bool:
"""Tests `output` to see if it contains a new best metric value.
If `output` does contain a new best metric value, this method does *not*
save it (i.e., calling this method multiple times in a row with the same
`output` will continue to return `True`).
Args:
output: The train or eval output to test.
Returns:
`True` if `output` contains a new best metric value, `False` otherwise.
"""
metric_value = self.metric_value(output)
if self.higher_is_better:
if metric_value > self.best_value:
return True
else: # Lower is better.
if metric_value < self.best_value:
return True
return False
def commit(self, output: runner.Output) -> bool:
"""Tests `output` and updates the current best value if necessary.
Unlike `test` above, if `output` does contain a new best metric value, this
method *does* save it (i.e., subsequent calls to this method with the same
`output` will return `False`).
Args:
output: The train or eval output to test.
Returns:
`True` if `output` contains a new best metric value, `False` otherwise.
"""
if self.test(output):
self._best_value.write(self.metric_value(output))
return True
return False
class JSONPersistedValue:
"""Represents a value that is persisted via a file-based backing store.
The value must be JSON-serializable. Each time the value is updated, it will
be written to the backing file. It is only read from the file at
initialization.
"""
def __init__(self,
initial_value: Any,
filename: str,
write_value: bool = True):
"""Initializes the instance.
Args:
initial_value: The initial value to use if no backing file exists or was
given. This must be a JSON-serializable value (possibly nested
combination of lists, dicts, and primitive values).
filename: The path to use for persistent storage of the value. This may be
`None`, in which case the value is not stable across preemptions.
write_value: If `True`, new values will be written to `filename` on calls
to `write()`. If `False`, `filename` is only read once to restore any
persisted value, and new values will not be written to it. This can be
useful in certain multi-client settings to avoid race conditions or
excessive file writes. If `filename` is `None`, this parameter has no
effect.
"""
self._value = None
self._filename = filename
self._write_value = write_value
if self._filename is not None:
if tf.io.gfile.exists(self._filename):
if tf.io.gfile.stat(self._filename).length > 0:
with tf.io.gfile.GFile(self._filename, 'r') as f:
self._value = json.load(f)
elif self._write_value:
tf.io.gfile.makedirs(os.path.dirname(self._filename))
if self._value is None:
self.write(initial_value)
def read(self):
"""Returns the value."""
return self._value
def write(self, value):
"""Writes the value, updating the backing store if one was provided."""
self._value = value
if self._filename is not None and self._write_value:
# To achieve atomic writes, we first write to a temporary file, and then
# rename it to `self._filename`.
tmp_filename = f'{self._filename}.tmp.{uuid.uuid4().hex}'
with tf.io.gfile.GFile(tmp_filename, 'w') as f:
json.dump(self._value, f)
tf.io.gfile.rename(tmp_filename, self._filename, overwrite=True)
| 8,479 | 37.026906 | 80 | py |
models | models-master/orbit/actions/__init__.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines an "action" abstraction for use with `orbit.Controller`.
"Actions" are simply arbitrary callables that are applied by the `Controller`
to the output of train steps (after each inner loop of `steps_per_loop` steps)
or an evaluation. This provides a hook mechanism, enabling things like reporting
metrics to Vizier, model exporting, additional logging, etc.
The basic `Action` abstraction (just a type alias) is defined in the
`controller` module. This `actions` module adds a `ConditionalAction` utility
class to make it easy to trigger actions conditionally based on reusable
predicates, as well as a small handful of predefined conditions/actions (in
particular, a `NewBestMetric` condition and an `ExportSavedModel` action).
One example of using actions to do metric-conditional export:
new_best_metric = orbit.actions.NewBestMetric('accuracy')
export_action = orbit.actions.ConditionalAction(
condition=lambda x: x['accuracy'] > 0.9 and new_best_metric(x),
action=orbit.actions.ExportSavedModel(
model,
orbit.actions.ExportFileManager(
base_name=f'{FLAGS.model_dir}/saved_model',
next_id_fn=trainer.global_step.numpy),
signatures=model.infer))
controller = orbit.Controller(
strategy=strategy,
trainer=trainer,
evaluator=evaluator,
eval_actions=[export_action],
global_step=trainer.global_step,
steps_per_loop=FLAGS.steps_per_loop,
checkpoint_manager=checkpoint_manager,
summary_interval=1000)
Note: In multi-client settings where each client runs its own `Controller`
instance, some care should be taken in deciding which clients should run certain
actions. Isolating actions to an individual client (say client 0) can be
achieved using `ConditionalAction` as follows:
client_0_actions = orbit.actions.ConditionalAction(
condition=lambda _: client_id() == 0,
action=[
...
])
In particular, the `NewBestMetric` condition may be used in multi-client
settings if all clients are guaranteed to compute the same metric (ensuring this
is up to client code, not Orbit). However, when saving metrics it may be helpful
to avoid unnecessary writes by setting the `write_value` parameter to `False`
for most clients.
"""
from orbit.actions.conditional_action import ConditionalAction
from orbit.actions.export_saved_model import ExportFileManager
from orbit.actions.export_saved_model import ExportSavedModel
from orbit.actions.new_best_metric import JSONPersistedValue
from orbit.actions.new_best_metric import NewBestMetric
from orbit.actions.save_checkpoint_if_preempted import SaveCheckpointIfPreempted
| 3,327 | 42.220779 | 80 | py |
models | models-master/orbit/examples/__init__.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 604 | 39.333333 | 74 | py |
models | models-master/orbit/examples/single_task/single_task_evaluator.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An evaluator object that can evaluate models with a single output."""
import orbit
import tensorflow as tf
class SingleTaskEvaluator(orbit.StandardEvaluator):
"""Evaluates a single-output model on a given dataset.
This evaluator will handle running a model with one output on a single
dataset, and will apply the output of that model to one or more
`tf.keras.metrics.Metric` objects.
"""
def __init__(self,
eval_dataset,
label_key,
model,
metrics,
evaluator_options=None):
"""Initializes a `SingleTaskEvaluator` instance.
If the `SingleTaskEvaluator` should run its model under a distribution
strategy, it should be created within that strategy's scope.
Arguments:
eval_dataset: A `tf.data.Dataset` or `DistributedDataset` that contains a
string-keyed dict of `Tensor`s.
label_key: The key corresponding to the label value in feature
dictionaries dequeued from `eval_dataset`. This key will be removed from
the dictionary before it is passed to the model.
model: A `tf.Module` or Keras `Model` object to evaluate.
metrics: A single `tf.keras.metrics.Metric` object, or a list of
`tf.keras.metrics.Metric` objects.
evaluator_options: An optional `orbit.StandardEvaluatorOptions` object.
"""
self.label_key = label_key
self.model = model
self.metrics = metrics if isinstance(metrics, list) else [metrics]
# Capture the strategy from the containing scope.
self.strategy = tf.distribute.get_strategy()
super(SingleTaskEvaluator, self).__init__(
eval_dataset=eval_dataset, options=evaluator_options)
def eval_begin(self):
"""Actions to take once before every eval loop."""
for metric in self.metrics:
metric.reset_states()
def eval_step(self, iterator):
"""One eval step. Called multiple times per eval loop by the superclass."""
def step_fn(inputs):
# Extract the target value and delete it from the input dict, so that
# the model never sees it.
target = inputs.pop(self.label_key)
output = self.model(inputs)
for metric in self.metrics:
metric.update_state(target, output)
# This is needed to handle distributed computation.
self.strategy.run(step_fn, args=(next(iterator),))
def eval_end(self):
"""Actions to take once after an eval loop."""
with self.strategy.scope():
# Export the metrics.
metrics = {metric.name: metric.result() for metric in self.metrics}
return metrics
| 3,195 | 35.735632 | 80 | py |
models | models-master/orbit/examples/single_task/__init__.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 604 | 39.333333 | 74 | py |
models | models-master/orbit/examples/single_task/single_task_trainer_test.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the single_task_trainer."""
import orbit
from orbit.examples.single_task import single_task_trainer
import tensorflow as tf
import tensorflow_datasets as tfds
class SingleTaskTrainerTest(tf.test.TestCase):
def test_single_task_training(self):
iris = tfds.load('iris')
train_ds = iris['train'].batch(32).repeat()
model = tf.keras.Sequential([
tf.keras.Input(shape=(4,), name='features'),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(3),
tf.keras.layers.Softmax(),
])
trainer = single_task_trainer.SingleTaskTrainer(
train_ds,
label_key='label',
model=model,
loss_fn=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01))
controller = orbit.Controller(
trainer=trainer,
steps_per_loop=100,
global_step=trainer.optimizer.iterations)
controller.train(1)
start_loss = trainer.train_loss.result().numpy()
controller.train(500)
end_loss = trainer.train_loss.result().numpy()
# Assert that the model has trained 'significantly' - that the loss
# has dropped by over 50%.
self.assertLess(end_loss, start_loss / 2)
if __name__ == '__main__':
tf.test.main()
| 1,961 | 31.163934 | 74 | py |
models | models-master/orbit/examples/single_task/single_task_trainer.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A trainer object that can train models with a single output."""
import orbit
import tensorflow as tf
class SingleTaskTrainer(orbit.StandardTrainer):
"""Trains a single-output model on a given dataset.
This trainer will handle running a model with one output on a single
dataset. It will apply the provided loss function to the model's output
to calculate gradients and will apply them via the provided optimizer. It will
also supply the output of that model to one or more `tf.keras.metrics.Metric`
objects.
"""
def __init__(self,
train_dataset,
label_key,
model,
loss_fn,
optimizer,
metrics=None,
trainer_options=None):
"""Initializes a `SingleTaskTrainer` instance.
If the `SingleTaskTrainer` should run its model under a distribution
strategy, it should be created within that strategy's scope.
This trainer will also calculate metrics during training. The loss metric
is calculated by default, but other metrics can be passed to the `metrics`
arg.
Arguments:
train_dataset: A `tf.data.Dataset` or `DistributedDataset` that contains a
string-keyed dict of `Tensor`s.
label_key: The key corresponding to the label value in feature
dictionaries dequeued from `train_dataset`. This key will be removed
from the dictionary before it is passed to the model.
model: A `tf.Module` or Keras `Model` object to evaluate. It must accept a
`training` kwarg.
loss_fn: A per-element loss function of the form (target, output). The
output of this loss function will be reduced via `tf.reduce_mean` to
create the final loss. We recommend using the functions in the
`tf.keras.losses` package or `tf.keras.losses.Loss` objects with
`reduction=tf.keras.losses.reduction.NONE`.
optimizer: A `tf.keras.optimizers.Optimizer` instance.
metrics: A single `tf.keras.metrics.Metric` object, or a list of
`tf.keras.metrics.Metric` objects.
trainer_options: An optional `orbit.utils.StandardTrainerOptions` object.
"""
self.label_key = label_key
self.model = model
self.loss_fn = loss_fn
self.optimizer = optimizer
# Capture the strategy from the containing scope.
self.strategy = tf.distribute.get_strategy()
# We always want to report training loss.
self.train_loss = tf.keras.metrics.Mean('training_loss', dtype=tf.float32)
# We need self.metrics to be an iterable later, so we handle that here.
if metrics is None:
self.metrics = []
elif isinstance(metrics, list):
self.metrics = metrics
else:
self.metrics = [metrics]
super(SingleTaskTrainer, self).__init__(
train_dataset=train_dataset, options=trainer_options)
def train_loop_begin(self):
"""Actions to take once, at the beginning of each train loop."""
self.train_loss.reset_states()
for metric in self.metrics:
metric.reset_states()
def train_step(self, iterator):
"""A train step. Called multiple times per train loop by the superclass."""
def train_fn(inputs):
with tf.GradientTape() as tape:
# Extract the target value and delete it from the input dict, so that
# the model never sees it.
target = inputs.pop(self.label_key)
# Get the outputs of the model.
output = self.model(inputs, training=True)
# Get the average per-batch loss and scale it down by the number of
# replicas. This ensures that we don't end up multiplying our loss by
# the number of workers - gradients are summed, not averaged, across
# replicas during the apply_gradients call.
# Note, the reduction of loss is explicitly handled and scaled by
# num_replicas_in_sync. Recommend to use a plain loss function.
# If you're using tf.keras.losses.Loss object, you may need to set
# reduction argument explicitly.
loss = tf.reduce_mean(self.loss_fn(target, output))
scaled_loss = loss / self.strategy.num_replicas_in_sync
# Get the gradients by applying the loss to the model's trainable
# variables.
gradients = tape.gradient(scaled_loss, self.model.trainable_variables)
# Apply the gradients via the optimizer.
self.optimizer.apply_gradients(
list(zip(gradients, self.model.trainable_variables)))
# Update metrics.
self.train_loss.update_state(loss)
for metric in self.metrics:
metric.update_state(target, output)
# This is needed to handle distributed computation.
self.strategy.run(train_fn, args=(next(iterator),))
def train_loop_end(self):
"""Actions to take once after a training loop."""
with self.strategy.scope():
# Export the metrics.
metrics = {metric.name: metric.result() for metric in self.metrics}
metrics[self.train_loss.name] = self.train_loss.result()
return metrics
| 5,647 | 39.056738 | 80 | py |
models | models-master/orbit/examples/single_task/single_task_evaluator_test.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the single_task_evaluator."""
import orbit
from orbit.examples.single_task import single_task_evaluator
from orbit.examples.single_task import single_task_trainer
import tensorflow as tf
import tensorflow_datasets as tfds
class SingleTaskEvaluatorTest(tf.test.TestCase):
def test_single_task_evaluation(self):
iris = tfds.load('iris')
train_ds = iris['train'].batch(32)
model = tf.keras.Sequential([
tf.keras.Input(shape=(4,), name='features'),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(3)
])
trainer = single_task_trainer.SingleTaskTrainer(
train_ds,
label_key='label',
model=model,
loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01))
evaluator = single_task_evaluator.SingleTaskEvaluator(
train_ds,
label_key='label',
model=model,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
controller = orbit.Controller(
trainer=trainer,
evaluator=evaluator,
steps_per_loop=100,
global_step=trainer.optimizer.iterations)
controller.train(train_ds.cardinality().numpy())
controller.evaluate()
accuracy = evaluator.metrics[0].result().numpy()
self.assertGreater(0.925, accuracy)
if __name__ == '__main__':
tf.test.main()
| 2,088 | 30.651515 | 80 | py |
models | models-master/orbit/utils/tpu_summaries_test.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for orbit.utils.tpu_summaries."""
import functools
import os
from orbit.utils import common
from orbit.utils import tpu_summaries
import tensorflow as tf
class TrainFunctionWithSummaries(tpu_summaries.OptionalSummariesFunction):
"""Implements a two-program approach for summaries on TPU."""
def __call__(self, num_steps):
if tf.summary.should_record_summaries():
output = self.with_summaries(tf.constant(1))
num_steps -= 1
if num_steps >= 1:
output = self.without_summaries(num_steps)
return output
def train_function_with_summaries(function=None, **kwargs):
if function is not None:
return TrainFunctionWithSummaries(function, **kwargs)
return functools.partial(TrainFunctionWithSummaries, **kwargs)
class DummyTrainer(tf.Module):
def __init__(self):
self.step_counter = common.create_global_step()
@train_function_with_summaries
def train_with_tpu_summary_optimization(self, num_steps):
for _ in tf.range(num_steps):
tf.summary.scalar("step", self.step_counter, step=self.step_counter)
self.step_counter.assign_add(1)
return self.step_counter
@train_function_with_summaries(
input_signature=[tf.TensorSpec((), dtype=tf.int32)])
def train_with_tpu_summary_optimization_and_input_signature(self, num_steps):
for _ in tf.range(num_steps):
tf.summary.scalar("step", self.step_counter, step=self.step_counter)
self.step_counter.assign_add(1)
return self.step_counter
def train_with_tpu_summary_optimization_no_decorator(self, num_steps):
for _ in tf.range(num_steps):
tf.summary.scalar("step", self.step_counter, step=self.step_counter)
self.step_counter.assign_add(1)
return self.step_counter
class TpuSummariesTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self.trainer = DummyTrainer()
def _get_events_from_logdir(self, logdir):
event_files = tf.io.gfile.listdir(logdir)
self.assertLen(event_files, 1)
path = os.path.join(logdir, event_files[0])
events = list(tf.compat.v1.train.summary_iterator(path))
return [event for event in events if event.WhichOneof("what") == "summary"]
def _validate_tpu_summary_optimization(self, function, *args, **kwargs):
logdir = self.get_temp_dir()
with tf.summary.create_file_writer(logdir).as_default():
with tf.summary.record_if(lambda: self.trainer.step_counter % 20 == 0):
for _ in range(4):
output = function(tf.constant(10), *args, **kwargs)
events = self._get_events_from_logdir(logdir)
self.assertLen(events, 2)
self.assertEqual(events[0].step, 0)
self.assertEqual(events[1].step, 20)
return output
def test_train_with_tpu_summary_optimization(self):
output = self._validate_tpu_summary_optimization(
self.trainer.train_with_tpu_summary_optimization)
self.assertEqual(output, self.trainer.step_counter.numpy())
def test_train_with_tpu_summary_optimization_no_decorator(self):
optimized = train_function_with_summaries(
self.trainer.train_with_tpu_summary_optimization_no_decorator)
output = self._validate_tpu_summary_optimization(optimized)
self.assertEqual(output, self.trainer.step_counter.numpy())
def test_train_with_tpu_summary_optimization_and_input_signature(self):
output = self._validate_tpu_summary_optimization(
self.trainer.train_with_tpu_summary_optimization_and_input_signature)
self.assertEqual(output, self.trainer.step_counter.numpy())
function = self.trainer.train_with_tpu_summary_optimization_and_input_signature
expected = (tf.TensorSpec((), dtype=tf.int32),)
input_signature = function.with_summaries.input_signature
self.assertEqual(input_signature, expected)
input_signature = function.without_summaries.input_signature
self.assertEqual(input_signature, expected)
if __name__ == "__main__":
tf.test.main()
| 4,513 | 36.305785 | 83 | py |
models | models-master/orbit/utils/epoch_helper.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a utility class for training in epochs."""
import tensorflow as tf
class EpochHelper:
"""A helper class handle bookkeeping of epochs in custom training loops."""
def __init__(self, epoch_steps: int, global_step: tf.Variable):
"""Initializes the `EpochHelper` instance.
Args:
epoch_steps: An integer indicating how many steps are in an epoch.
global_step: A `tf.Variable` providing the current global step.
"""
self._epoch_steps = epoch_steps
self._global_step = global_step
self._current_epoch = None
self._epoch_start_step = None
self._in_epoch = False
def epoch_begin(self):
"""Returns whether a new epoch should begin."""
if self._in_epoch:
return False
current_step = self._global_step.numpy()
self._epoch_start_step = current_step
self._current_epoch = current_step // self._epoch_steps
self._in_epoch = True
return True
def epoch_end(self):
"""Returns whether the current epoch should end."""
if not self._in_epoch:
raise ValueError("`epoch_end` can only be called inside an epoch.")
current_step = self._global_step.numpy()
epoch = current_step // self._epoch_steps
if epoch > self._current_epoch:
self._in_epoch = False
return True
return False
@property
def batch_index(self):
"""Index of the next batch within the current epoch."""
return self._global_step.numpy() - self._epoch_start_step
@property
def current_epoch(self):
return self._current_epoch
| 2,136 | 31.378788 | 77 | py |
models | models-master/orbit/utils/tpu_summaries.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains utilities for TPU summary optimization."""
import contextlib
import functools
import tensorflow as tf
@contextlib.contextmanager
def _soft_device_placement():
"""Context manager for soft device placement, allowing summaries on CPU."""
original_setting = tf.config.get_soft_device_placement()
try:
tf.config.set_soft_device_placement(True)
yield
finally:
tf.config.set_soft_device_placement(original_setting)
class OptionalSummariesFunction:
"""Wrapper that provides versions of a function with and without summaries.
This is a utility class for implementing optimized summary recording via a
two-function approach, specifically important for TPUs. Two `tf.function`
versions of a given `function` are created: one with soft device placement
enabled (for use on steps that require summary writing), and one with summary
writing and soft device placement entirely disabled (for use on all other
steps). This removes any performance impact of summaries on steps where they
aren't recorded (b/148418718).
This class can be used as a base class to implement summary optimizations for
a function with a specific signature. For example, to implement efficient TPU
summaries for a standard `train()` method (as in `orbit.AbstractTrainer`):
class TrainFunctionWithSummaries(orbit.utils.OptionalSummariesFunction):
'''Implements a two-program approach for summaries on TPU.'''
def __call__(self, num_steps):
if tf.summary.should_record_summaries():
output = self.with_summaries(tf.constant(1))
num_steps -= 1
if num_steps >= 1:
output = self.without_summaries(num_steps)
return output
This can be used directly or to implement a decorator:
def train_function_with_summaries(function=None, **kwargs):
if function is not None:
return TrainFunctionWithSummaries(function, **kwargs)
return functools.partial(TrainFunctionWithSummaries, **kwargs)
The decorator can be applied directly to `train()` methods:
@train_function_with_summaries
def train(self, num_steps):
...
A similar approach approach can be implemented for functions with different
signatures.
Note: The above approach assumes that the frequency of summary writing is
based on a step interval that is divisible by the number of steps executed
in each call to the `train()` function. This is enforced by the
`orbit.Controller`.
This wrapper properly handles instance methods (see `__get__`).
Attributes:
with_summaries: A wrapped version of the underlying function with summaries
enabled (using whatever the active predicate is for
`tf.summary.record_if`), and placed inside a "soft device placement"
context to enable summary recording on TPU.
without_summaries: A wrapped version of the underlying function with all
summary recording disabled.
"""
def __init__(self, function, **tf_function_kwargs):
"""Constructs an instance wrapping the given `function`.
The given `function` is wrapped twice: Once in a "soft device placement"
context (allowing summaries to also run on TPU), and once with summary
recording entirely disabled.
Both of these versions are compiled via `tf.function` (optionally using any
supplied `tf.function` settings), and made available as attributes.
Args:
function: The underlying function to wrap.
**tf_function_kwargs: Additional arguments to pass to `tf.function`.
"""
@tf.function(**tf_function_kwargs)
@functools.wraps(function)
def with_summaries(*args, **kwargs):
with _soft_device_placement():
return function(*args, **kwargs)
@tf.function(**tf_function_kwargs)
@functools.wraps(function)
def without_summaries(*args, **kwargs):
with tf.summary.record_if(False):
return function(*args, **kwargs)
self.with_summaries = with_summaries
self.without_summaries = without_summaries
def __get__(self, instance, owner):
"""Allows this class to be used to wrap methods as well as free functions.
For `tf.function` to work properly in all cases (e.g., when an
input_signature is specified), any `tf.function`-converted methods must be
properly bound to an instance if they are called as an instance method.
This is done by implementing this `__get__` method of the descriptor
protocol, and forwarding to the `__get__` method on the underlying
`tf.function`s.
Args:
instance: The instance to bind to.
owner: The class type of the instance.
Returns:
A new bound instance of `TpuDiscretionarySummariesFunctions`.
"""
new = object.__new__(self.__class__)
# pytype: disable=attribute-error # See b/162476201.
new.with_summaries = self.with_summaries.__get__(instance, owner)
new.without_summaries = self.without_summaries.__get__(instance, owner)
# pytype: enable=attribute-error
return new
| 5,631 | 37.575342 | 79 | py |
models | models-master/orbit/utils/common_test.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for orbit.utils.common."""
from orbit.utils import common
import tensorflow as tf
class UtilsTest(tf.test.TestCase):
def test_create_global_step(self):
step = common.create_global_step()
self.assertEqual(step.name, "global_step:0")
self.assertEqual(step.dtype, tf.int64)
self.assertEqual(step, 0)
step.assign_add(1)
self.assertEqual(step, 1)
if __name__ == "__main__":
tf.test.main()
| 1,032 | 28.514286 | 74 | py |
models | models-master/orbit/utils/summary_manager_interface.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a utility class for managing summary writing."""
import abc
class SummaryManagerInterface(abc.ABC):
"""A utility interface for managing summary writing."""
@abc.abstractmethod
def flush(self):
"""Flushes the the recorded summaries."""
raise NotImplementedError
@abc.abstractmethod
def summary_writer(self, relative_path=""):
"""Returns the underlying summary writer for scoped writers."""
raise NotImplementedError
@abc.abstractmethod
def write_summaries(self, summary_dict):
"""Writes summaries for the given dictionary of values.
The summary_dict can be any nested dict. The SummaryManager should
recursively creates summaries, yielding a hierarchy of summaries which will
then be reflected in the corresponding UIs.
For example, users may evaluate on multiple datasets and return
`summary_dict` as a nested dictionary:
{
"dataset1": {
"loss": loss1,
"accuracy": accuracy1
},
"dataset2": {
"loss": loss2,
"accuracy": accuracy2
},
}
This will create two set of summaries, "dataset1" and "dataset2". Each
summary dict will contain summaries including both "loss" and "accuracy".
Args:
summary_dict: A dictionary of values. If any value in `summary_dict` is
itself a dictionary, then the function will create a new summary_dict
with name given by the corresponding key. This is performed recursively.
Leaf values are then summarized using the parent relative path.
"""
raise NotImplementedError
| 2,253 | 33.676923 | 80 | py |
models | models-master/orbit/utils/common.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some layered modules/functions to help users writing custom training loop."""
import inspect
import tensorflow as tf
def create_global_step() -> tf.Variable:
"""Creates a `tf.Variable` suitable for use as a global step counter.
Creating and managing a global step variable may be necessary for
`AbstractTrainer` subclasses that perform multiple parameter updates per
`Controller` "step", or use different optimizers on different steps.
In these cases, an `optimizer.iterations` property generally can't be used
directly, since it would correspond to parameter updates instead of iterations
in the `Controller`'s training loop. Such use cases should simply call
`step.assign_add(1)` at the end of each step.
Returns:
A non-trainable scalar `tf.Variable` of dtype `tf.int64`, with only the
first replica's value retained when synchronizing across replicas in
a distributed setting.
"""
return tf.Variable(
0,
dtype=tf.int64,
name="global_step",
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs):
"""A utility function to help create a `tf.distribute.DistributedDataset`.
Args:
strategy: An instance of `tf.distribute.Strategy`.
dataset_or_fn: A instance of `tf.data.Dataset`, or a "dataset function"
returning a `tf.data.Dataset`. If it is a function, it may optionally have
an argument named `input_context` which will be passed a
`tf.distribute.InputContext` instance.
*args: Any positional arguments to pass through to `dataset_or_fn`.
**kwargs: Any keyword arguments to pass through to `dataset_or_fn`, except
that the `input_options` keyword is used to specify a
`tf.distribute.InputOptions` for making the distributed dataset.
Returns:
A distributed Dataset.
"""
if strategy is None:
strategy = tf.distribute.get_strategy()
input_options = kwargs.pop("input_options", None)
if isinstance(dataset_or_fn, tf.data.Dataset):
return strategy.experimental_distribute_dataset(dataset_or_fn,
input_options)
if not callable(dataset_or_fn):
raise ValueError("`dataset_or_fn` should be either callable or an instance "
"of `tf.data.Dataset`.")
def dataset_fn(input_context):
"""Wraps `dataset_or_fn` for strategy.distribute_datasets_from_function."""
# If `dataset_or_fn` is a function and has an argument named
# `input_context`, pass through the given `input_context`. Otherwise
# `input_context` will be ignored.
argspec = inspect.getfullargspec(dataset_or_fn)
arg_names = argspec.args
if "input_context" in arg_names:
kwargs["input_context"] = input_context
return dataset_or_fn(*args, **kwargs)
return strategy.distribute_datasets_from_function(dataset_fn, input_options)
def get_value(x):
"""Returns input values, converting any TensorFlow values to NumPy values.
Args:
x: The input. May be a `tf.Tensor` or `tf.Variable`.
Returns:
If the input is a TensorFlow `Tensor`, returns the `Tensor`'s equivalent
NumPy value. Otherwise, just returns the input.
"""
if not tf.is_tensor(x):
return x
return x.numpy()
| 3,913 | 35.924528 | 80 | py |
models | models-master/orbit/utils/__init__.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines exported symbols for the `orbit.utils` package."""
from orbit.utils.common import create_global_step
from orbit.utils.common import get_value
from orbit.utils.common import make_distributed_dataset
from orbit.utils.epoch_helper import EpochHelper
from orbit.utils.loop_fns import create_loop_fn
from orbit.utils.loop_fns import create_tf_while_loop_fn
from orbit.utils.loop_fns import LoopFnWithSummaries
from orbit.utils.summary_manager import SummaryManager
from orbit.utils.summary_manager_interface import SummaryManagerInterface
from orbit.utils.tpu_summaries import OptionalSummariesFunction
| 1,218 | 38.322581 | 74 | py |
models | models-master/orbit/utils/summary_manager.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a utility class for managing summary writing."""
import os
from orbit.utils.summary_manager_interface import SummaryManagerInterface
import tensorflow as tf
class SummaryManager(SummaryManagerInterface):
"""A utility class for managing summary writing."""
def __init__(self, summary_dir, summary_fn, global_step=None):
"""Initializes the `SummaryManager` instance.
Args:
summary_dir: The directory in which to write summaries. If `None`, all
summary writing operations provided by this class are no-ops.
summary_fn: A callable defined accepting `name`, `value`, and `step`
parameters, making calls to `tf.summary` functions to write summaries.
global_step: A `tf.Variable` containing the global step value.
"""
self._enabled = summary_dir is not None
self._summary_dir = summary_dir
self._summary_fn = summary_fn
self._summary_writers = {}
if global_step is None:
self._global_step = tf.summary.experimental.get_step()
else:
self._global_step = global_step
def summary_writer(self, relative_path=""):
"""Returns the underlying summary writer for a specific subdirectory.
Args:
relative_path: The current path in which to write summaries, relative to
the summary directory. By default it is empty, which corresponds to the
root directory.
"""
if self._summary_writers and relative_path in self._summary_writers:
return self._summary_writers[relative_path]
if self._enabled:
self._summary_writers[relative_path] = tf.summary.create_file_writer(
os.path.join(self._summary_dir, relative_path))
else:
self._summary_writers[relative_path] = tf.summary.create_noop_writer()
return self._summary_writers[relative_path]
def flush(self):
"""Flushes the underlying summary writers."""
if self._enabled:
tf.nest.map_structure(tf.summary.flush, self._summary_writers)
def write_summaries(self, summary_dict):
"""Writes summaries for the given dictionary of values.
This recursively creates subdirectories for any nested dictionaries
provided in `summary_dict`, yielding a hierarchy of directories which will
then be reflected in the TensorBoard UI as different colored curves.
For example, users may evaluate on multiple datasets and return
`summary_dict` as a nested dictionary:
{
"dataset1": {
"loss": loss1,
"accuracy": accuracy1
},
"dataset2": {
"loss": loss2,
"accuracy": accuracy2
},
}
This will create two subdirectories, "dataset1" and "dataset2", inside the
summary root directory. Each directory will contain event files including
both "loss" and "accuracy" summaries.
Args:
summary_dict: A dictionary of values. If any value in `summary_dict` is
itself a dictionary, then the function will create a subdirectory with
name given by the corresponding key. This is performed recursively. Leaf
values are then summarized using the summary writer instance specific to
the parent relative path.
"""
if not self._enabled:
return
self._write_summaries(summary_dict)
def _write_summaries(self, summary_dict, relative_path=""):
for name, value in summary_dict.items():
if isinstance(value, dict):
self._write_summaries(
value, relative_path=os.path.join(relative_path, name))
else:
with self.summary_writer(relative_path).as_default():
self._summary_fn(name, value, step=self._global_step)
| 4,277 | 36.858407 | 80 | py |
models | models-master/orbit/utils/loop_fns.py | # Copyright 2023 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for creating loop functions."""
from absl import logging
from orbit.utils import tpu_summaries
import tensorflow as tf
def create_loop_fn(step_fn):
"""Creates a loop function driven by a Python `while` loop.
Args:
step_fn: A function taking a nested structure of `tf.data.Iterator` or
`DistributedIterator`. There are no constraints on the return value of the
function (except that it must be compatible with any `reduce_fn` provided
to the returned `loop_fn`).
Returns:
A loop function taking required `iterator` and `num_steps` parameters, as
well as optional `state` and `reduce_fn` parameters for accumulating state
over multiple iterations of the loop. See the `loop_fn` definition below for
additional details.
"""
def loop_fn(iterator, num_steps, state=None, reduce_fn=None):
"""Makes `num_steps` calls to `step_fn(iterator)`.
Additionally, state may be accumulated across iterations of the loop.
Conceptually, state accumulation is handled roughly as follows:
for _ in range(num_steps):
step_outputs = step_fn(iterator)
state = reduce_fn(state, step_outputs)
return state
However, the implementation is slightly more complicated in order to support
looping until the iterator is exhausted (when `num_steps == -1`) and to
properly catch exceptions when running under async remote eager (as is the
case in TPU training setups involving separate coordinator/worker machines).
Args:
iterator: A nested structure of `tf.data.Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. If `num_steps == -1`, will
iterate until exausting the iterator.
state: An optional initial state before running the loop.
reduce_fn: A callable taking two inputs, `state` and `value`, where
`state` is the previous output from `reduce_fn`, and `value` is the
output from `step_fn`.
Returns:
The final state returned by `reduce_fn`, or `None` if `state` and
`reduce_fn` are not provided.
"""
step = 0
try:
# To make sure the OutOfRangeError exception can be handled well under
# async remote eager, we need to wrap the loop body in `async_scope`.
with tf.experimental.async_scope():
while num_steps == -1 or step < num_steps:
outputs = step_fn(iterator)
if reduce_fn is not None:
state = reduce_fn(state, outputs)
step += 1
return state
except (StopIteration, tf.errors.OutOfRangeError):
logging.info("The dataset iterator is exhausted after %d steps.", step)
tf.experimental.async_clear_error()
return state
return loop_fn
def create_tf_while_loop_fn(step_fn):
"""Creates a loop function compatible with TF's AutoGraph loop conversion.
Args:
step_fn: A function taking a nested structure of `tf.data.Iterator` or
`DistributedIterator`. Currently, any return values are ignored.
Returns:
A loop function taking required `iterator` and `num_steps` parameters. If
called inside a `tf.function`, the loop will be converted by AutoGraph into
a `tf.while_loop` construct. See the `loop_fn` definition below for
additional details.
"""
def loop_fn(iterator, num_steps):
"""Makes `num_steps` calls to `step_fn(iterator)`.
Args:
iterator: A nested structure of `tf.data.Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. Should be passed as a
`tf.Tensor`. Iterating until iterator exhaustion is not supported.
"""
if not isinstance(num_steps, tf.Tensor):
raise ValueError(
"`num_steps` should be a `tf.Tensor`. Passing a Python value can "
"cause unnecessary retracing when wrapped by `tf.function`.")
for _ in tf.range(num_steps):
# Clear out the outer name scope so the ops created inside `tf.while_loop`
# don't get "while/" as name prefix.
with tf.name_scope(""):
step_fn(iterator)
return loop_fn
def create_tf_while_loop_fn_with_state(step_fn):
"""Creates a TF while loop function with state.
This function is similar to `create_tf_while_loop_fn`, but allowing a `state`
to be accumulated over multiple iterations of the loop. Note that the
structure of the `state` cannot be changed across iterations.
Args:
step_fn: A function taking a nested structure of `tf.data.Iterator` or
`DistributedIterator`. Currently, any return values are ignored.
Returns:
A loop function taking required `iterator`, `num_steps`, `state` and
`reduce_fn` parameters. If called inside a `tf.function`, the loop will be
converted by AutoGraph into a `tf.while_loop` construct. See the `loop_fn`
definition below for additional details.
"""
def loop_fn_with_state(iterator, num_steps, state, reduce_fn):
"""Makes `num_steps` calls to `step_fn(iterator)`.
Args:
iterator: A nested structure of `tf.data.Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. Should be passed as a
`tf.Tensor`. Iterating until iterator exhaustion is not supported.
state: An initial state before running the loop.
reduce_fn: A callable taking two inputs, `state` and `value`, where
`state` is the previous output from `reduce_fn`, and `value` is the
output from `step_fn`.
Returns:
The final state returned by `reduce_fn`.
"""
if not isinstance(num_steps, tf.Tensor):
raise ValueError(
"`num_steps` should be a `tf.Tensor`. Passing a Python value can "
"cause unnecessary retracing when wrapped by `tf.function`.")
def _get_relaxed_tensor_shape(t):
"""Returns a `TensorShape` with all `None` dimensions."""
if not tf.is_tensor(t):
return None
shape = t.shape
if shape.rank is not None and shape.rank > 0:
return tf.TensorShape([None] * shape.rank)
return shape
def _get_relaxed_shape_structure(s):
"""Returns the relaxed shape of the input nested structure `s`."""
return tf.nest.pack_sequence_as(
state, [_get_relaxed_tensor_shape(t) for t in tf.nest.flatten(s)])
for _ in tf.range(num_steps):
# Clear out the outer name scope so the ops created inside `tf.while_loop`
# don't get "while/" as name prefix.
with tf.name_scope(""):
# Relax the shapes within the loop, so the shape of `state` can change
# across iterations. This is useful to aggregate outputs from each step
# and concat to `state`.
tf.autograph.experimental.set_loop_options(
shape_invariants=[(state, _get_relaxed_shape_structure(state))])
outputs = step_fn(iterator)
state = reduce_fn(state, outputs)
return state
return loop_fn_with_state
class LoopFnWithSummaries(tpu_summaries.OptionalSummariesFunction):
"""Implements a two-program approach for optimizing summaries on TPU.
This version works with the result of `create_tf_while_loop_fn`.
"""
def __call__(self, iterator, num_steps):
if tf.summary.should_record_summaries():
output = self.with_summaries(iterator, tf.constant(1))
num_steps -= 1
if num_steps >= 1:
output = self.without_summaries(iterator, num_steps)
return output
| 8,016 | 37.543269 | 80 | py |
models | models-master/tensorflow_models/tensorflow_models_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_models imports."""
import tensorflow as tf
import tensorflow_models as tfm
class TensorflowModelsTest(tf.test.TestCase):
def testVisionImport(self):
_ = tfm.vision.layers.SqueezeExcitation(
in_filters=8, out_filters=4, se_ratio=1)
_ = tfm.vision.configs.image_classification.Losses()
def testNLPImport(self):
_ = tfm.nlp.layers.TransformerEncoderBlock(
num_attention_heads=2, inner_dim=10, inner_activation='relu')
_ = tfm.nlp.tasks.TaggingTask(params=tfm.nlp.tasks.TaggingConfig())
def testCommonImports(self):
_ = tfm.hyperparams.Config()
_ = tfm.optimization.LinearWarmup(
after_warmup_lr_sched=0.0, warmup_steps=10, warmup_learning_rate=0.1)
if __name__ == '__main__':
tf.test.main()
| 1,385 | 32.804878 | 77 | py |
models | models-master/tensorflow_models/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Models Libraries."""
# pylint: disable=wildcard-import
from tensorflow_models import nlp
from tensorflow_models import vision
from official import core
from official.modeling import hyperparams
from official.modeling import optimization
from official.modeling import tf_utils as utils
| 909 | 36.916667 | 74 | py |
models | models-master/tensorflow_models/nlp/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Models NLP Libraries."""
from official.nlp import tasks
from official.nlp.configs import encoders
from official.nlp.modeling import *
from official.nlp.serving import serving_modules
| 807 | 37.47619 | 74 | py |
models | models-master/tensorflow_models/vision/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Models Vision Libraries."""
from official.vision import configs
from official.vision import serving
from official.vision.modeling import *
from official.vision.ops import *
from official.vision.tasks import *
| 833 | 36.909091 | 74 | py |
cyphercat | cyphercat-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
from shutil import rmtree
from setuptools import setup, find_packages, Command
NAME = 'cyphercat'
DESCRIPTION = 'A toolkit for ML security',
MAINTAINER = 'lab41'
MAINTAINER_EMAIL = '[email protected]'
URL = 'https://github.com/Lab41/cyphercat'
LICENSE = 'Apache 2.0'
here = os.path.abspath(os.path.dirname(__file__))
def read(path, encoding='utf-8'):
with io.open(path, encoding=encoding) as f:
content = f.read()
return content
def get_install_requirements(path):
content = read(path)
requirements = [req for req in content.split("\n")
if req != '' and not req.startswith('#')]
return requirements
# README
LONG_DESCRIPTION = read(os.path.join(here, 'README.md'))
# Want to read in package version number from __version__.py
about = {}
with io.open(os.path.join(here, 'cyphercat', '__version__.py'), encoding='utf-8') as f:
exec(f.read(), about)
VERSION = about['__version__']
# requirements
INSTALL_REQUIRES = get_install_requirements(os.path.join(here, 'requirements.txt'))
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
license=LICENSE,
long_description=LONG_DESCRIPTION,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
url=URL,
packages=['cyphercat'],
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Security',
'Intended Audience :: Science/Research',
],
#packages=find_packages(),
install_requires=INSTALL_REQUIRES, #external packages as dependencies
setup_requires=['setuptools>=38.6.0'],
scripts=[
'scripts/test_load.py',
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 3,163 | 26.754386 | 87 | py |
cyphercat | cyphercat-master/Classification_baselines/LibriSpeech/LS_UTILS/Network_funcs.py | import os
import numpy as np
import soundfile as sf # To read .flac files.
import librosa
# For the neural network.
# Install PyBrain, e.g. pip install pybrain.
from pybrain.datasets import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from pybrain.tools.customxml.networkreader import NetworkReader
from pybrain.tools.customxml.networkwriter import NetworkWriter
from pybrain.structure import FullConnection
from pybrain.structure import FeedForwardNetwork
from pybrain.structure import LinearLayer
from pybrain.structure import SoftmaxLayer
from pybrain.structure import TanhLayer
# Read data from a folder into a list.
def getData(division,speaker,datapath,audioType,durationCheck,deltaT,lim1,lim2,numFeatures,noisy,transform):
#deltaT is the duration of the audio frame. Lim1 & Lim2 are the frequency ranges; each frequency is a feature.
#Noisy sets the limit for pauses in speech
#Division is the data, i.e. Train, CV, test
fname = datapath+division+speaker
subPaths = [v+"/" for v in os.listdir(fname) if v[0] != "."]
dataFiles = []
for subPath in subPaths:
files = [v for v in os.listdir(fname+subPath) if v[0] != "." and audioType in v]
for fil in files:
data,samplerate = sf.read(fname+subPath+fil)
duration = len(data)*1./samplerate
if duration >= durationCheck: dataFiles.append(fname+subPath+fil)
chunksF = []
for fil in dataFiles:
data,samplerate = sf.read(fil)
duration = len(data)*1./samplerate
# Divide audio data into frames, or chunks.
numChunks = int(duration/deltaT)
sizeChunk = int(len(data)/numChunks)
for lp in range(0,numChunks):
chunk = data[lp*sizeChunk:(lp+1)*sizeChunk] # get a chunk of speech.
# np.fft.rfft computes the one-dimensional discrete Fourier Transform of the data
if transform == 'Fourier':
chunksF.append(np.abs(np.fft.rfft(chunk))[lim1:lim2]) # take the FFT.
elif transform == 'Mel':
S = librosa.feature.melspectrogram(y=chunk, sr=samplerate, n_mels=128, fmax=lim2)
chunksF.append(np.abs(S))
# Delete quiet parts of speech, i.e. pauses.
# Most of the power is in the bottom 50% of frequencies.
mu = np.mean([np.mean(chunksF[i][:numFeatures//2]) for i in range(0,len(chunksF))])
dataF = []
for chunkF in chunksF:
if np.mean(chunkF[:numFeatures//2]) > noisy*mu:
dataF.append(chunkF)
return dataF
# Return data for all speakers.
def getDataSpeakers(division,speakers,datapath,audioType,durationCheck,deltaT,lim1,lim2,numFeatures,noisy,transform):
dataSpeakers = []
for speaker in speakers:
#print("Getting data for speaker: "+speaker)
dataSpeakers.append(getData(division,speaker,datapath,audioType,durationCheck,deltaT,lim1,lim2,numFeatures,noisy, transform))
N = np.sum([np.shape(s)[0] for s in dataSpeakers])
tX = np.mat(np.zeros((N,numFeatures)))
tY = []
speakerIndices = [0] # Index corresponding to start of speaker 'n'
ctr = 0; lp = 0
for dataSpeaker in dataSpeakers:
for j in range(0,len(dataSpeaker)):
for k in range(0,numFeatures):
tX[ctr,k] = dataSpeaker[j][k]
tY.append(lp)
ctr += 1
speakerIndices.append(ctr)
lp += 1
return tX,tY,speakerIndices
# This is the architecture of the network.
# Hyper-parameters to be fixed through cross-validation are:
# (i) How many layers are necessary?
# (ii) How many nodes per layer?
# (iii) What kind of activation function to use?
#
def setupNetwork(numHiddenNodes,numHiddenLayers,numFeatures,numSpeakers):
nn = FeedForwardNetwork()
inputLayer = LinearLayer(numFeatures)
nn.addInputModule(inputLayer)
hiddenLayers = []
for x in range(numHiddenLayers):
hiddenLayer = TanhLayer(numHiddenNodes)
nn.addModule(hiddenLayer)
hiddenLayers.append(hiddenLayer)
outputLayer = SoftmaxLayer(numSpeakers)
nn.addOutputModule(outputLayer)
inputConnection = FullConnection(inputLayer,hiddenLayers[0])
nn.addConnection(inputConnection)
for x in range(numHiddenLayers-1):
connect = FullConnection(hiddenLayers[x],hiddenLayers[x-1])
nn.addConnection(connect)
outputConnection = FullConnection(hiddenLayers[numHiddenLayers-1],outputLayer)
nn.addConnection(outputConnection)
nn.sortModules()
return nn
# Test the classifier.
# nns is a list of trained networks. It is sometimes helpful to pass more than one network,
# since errors made by different networks may cancel out.
# tX: Data to test
# tY: Target, i.e. speaker ID.
# idx: List of indices indicating the starting location of a speaker.
# skip: Number of increments of 'deltaT' to group together. For e.g. if 'deltaT' = 0.2:
# If skip = 1, a prediction is made for every 0.2 seconds.
# If skip = 5, a prediction is made for every 1.0 second.
#
def tstClassifier(nns,tX,tY,idx,skip,numSpeakers,numFeatures):
def maxIdx(A):
# Pick the prediction with the highest occurance.
ctr = {}
for pre in A:
if pre not in ctr: ctr[pre] = 1
else: ctr[pre] += 1
rev = {}
for key in ctr.keys():
rev[ctr[key]] = key
return rev[np.max(list(rev.keys()))]
# Confusion matrix: Speaker 'm' predicted as speaker 'n'.
confusion = np.mat(np.zeros((numSpeakers,numSpeakers)))
correct = 0; al = 0
for cvi in range(0,numSpeakers):
# idx contain the start location of each speaker.
for lpx in range(idx[cvi],idx[cvi+1]-skip,skip):
bestArray = []
# Consider "skip" number of data points together.
for lp in range(lpx,lpx+skip):
A = [tX[lp,i] for i in range(0,numFeatures)]
prediction = []
# Average over multiple trained networks.
for nn in nns:
pred = nn.activate(A)
ctr = {}
for i in range(0,numSpeakers):
ctr[pred[i]] = i
prediction.append(ctr[np.max(list(ctr.keys()))])
bestArray.append(maxIdx(prediction))
best = maxIdx(bestArray)
if best == tY[lpx]: correct += 1
# Populate the confusion matrix.
for i in range(0,numSpeakers):
if best == i: confusion[cvi,i] += 1
al += 1
return correct*1./al, confusion | 6,501 | 35.734463 | 129 | py |
cyphercat | cyphercat-master/Classification_baselines/LibriSpeech/LS_UTILS/Data_load.py | import os
from os import path, makedirs
# For LibriSpeech download
import codecs
import fnmatch
import requests
import subprocess
import tarfile
import unicodedata
from tensorflow.python.platform import gfile
# Processing
split_and_df = False #True if you haven't already done this step
def maybe_download(archive_name, target_dir, archive_url):
# If archive file does not exist, download it...
archive_path = path.join(target_dir, archive_name)
if not path.exists(target_dir):
print('No path "%s" - creating ...' % target_dir)
makedirs(target_dir)
if not path.exists(archive_path):
print('No archive "%s" - downloading...' % archive_path)
req = requests.get(archive_url, stream=True)
total_size = int(req.headers.get('content-length', 0))
done = 0
with open(archive_path, 'wb') as f:
for data in req.iter_content(1024*1024):
done += len(data)
f.write(data)
else:
print('Found archive "%s" - not downloading.' % archive_path)
return archive_path
def _download_and_preprocess_data(data_dir):
# Conditionally download data to data_dir
print("Downloading Librivox data set (55GB) into {} if not already present...".format(data_dir))
TRAIN_CLEAN_100_URL = "http://www.openslr.org/resources/12/train-clean-100.tar.gz"
TRAIN_CLEAN_360_URL = "http://www.openslr.org/resources/12/train-clean-360.tar.gz"
# TRAIN_OTHER_500_URL = "http://www.openslr.org/resources/12/train-other-500.tar.gz"
DEV_CLEAN_URL = "http://www.openslr.org/resources/12/dev-clean.tar.gz"
DEV_OTHER_URL = "http://www.openslr.org/resources/12/dev-other.tar.gz"
TEST_CLEAN_URL = "http://www.openslr.org/resources/12/test-clean.tar.gz"
TEST_OTHER_URL = "http://www.openslr.org/resources/12/test-other.tar.gz"
def filename_of(x): return os.path.split(x)[1]
train_clean_100 = maybe_download(filename_of(TRAIN_CLEAN_100_URL), data_dir, TRAIN_CLEAN_100_URL)
train_clean_360 = maybe_download(filename_of(TRAIN_CLEAN_360_URL), data_dir, TRAIN_CLEAN_360_URL)
# train_other_500 = maybe_download(filename_of(TRAIN_OTHER_500_URL), data_dir, TRAIN_OTHER_500_URL)
dev_clean = maybe_download(filename_of(DEV_CLEAN_URL), data_dir, DEV_CLEAN_URL)
dev_other = maybe_download(filename_of(DEV_OTHER_URL), data_dir, DEV_OTHER_URL)
test_clean = maybe_download(filename_of(TEST_CLEAN_URL), data_dir, TEST_CLEAN_URL)
test_other = maybe_download(filename_of(TEST_OTHER_URL), data_dir, TEST_OTHER_URL)
# Conditionally extract LibriSpeech data
# We extract each archive into data_dir, but test for existence in
# data_dir/LibriSpeech because the archives share that root.
print("Extracting librivox data if not already extracted...")
LIBRIVOX_DIR = "LibriSpeech"
work_dir = os.path.join(data_dir, LIBRIVOX_DIR)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "train-clean-100"), train_clean_100)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "train-clean-360"), train_clean_360)
# _maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "train-other-500"), train_other_500)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "dev-clean"), dev_clean)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "dev-other"), dev_other)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "test-clean"), test_clean)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "test-other"), test_other)
if split_and_df:
print("Splitting transcriptions and creating dataframes...")
train_100 = _convert_audio_and_split_sentences(work_dir, "train-clean-100", "train-clean-100-wav")
train_360 = _convert_audio_and_split_sentences(work_dir, "train-clean-360", "train-clean-360-wav")
# train_500 = _convert_audio_and_split_sentences(work_dir, "train-other-500", "train-other-500-wav")
dev_clean = _convert_audio_and_split_sentences(work_dir, "dev-clean", "dev-clean-wav")
dev_other = _convert_audio_and_split_sentences(work_dir, "dev-other", "dev-other-wav")
test_clean = _convert_audio_and_split_sentences(work_dir, "test-clean", "test-clean-wav")
test_other = _convert_audio_and_split_sentences(work_dir, "test-other", "test-other-wav")
# Write sets to disk as CSV files
train_100.to_csv(os.path.join(data_dir, "librivox-train-clean-100.csv"), index=False)
train_360.to_csv(os.path.join(data_dir, "librivox-train-clean-360.csv"), index=False)
# train_500.to_csv(os.path.join(data_dir, "librivox-train-other-500.csv"), index=False)
dev_clean.to_csv(os.path.join(data_dir, "librivox-dev-clean.csv"), index=False)
dev_other.to_csv(os.path.join(data_dir, "librivox-dev-other.csv"), index=False)
test_clean.to_csv(os.path.join(data_dir, "librivox-test-clean.csv"), index=False)
test_other.to_csv(os.path.join(data_dir, "librivox-test-other.csv"), index=False)
def _maybe_extract(data_dir, extracted_data, archive):
# If data_dir/extracted_data does not exist, extract archive in data_dir
if not gfile.Exists(os.path.join(data_dir, extracted_data)):
tar = tarfile.open(archive)
tar.extractall(data_dir)
tar.close()
def _convert_audio_and_split_sentences(extracted_dir, data_set, dest_dir):
source_dir = os.path.join(extracted_dir, data_set)
target_dir = os.path.join(extracted_dir, dest_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# Loop over transcription files and split each one
#
# The format for each file 1-2.trans.txt is:
# 1-2-0 transcription of 1-2-0.flac
# 1-2-1 transcription of 1-2-1.flac
# ...
#
# Each file is then split into several files:
# 1-2-0.txt (contains transcription of 1-2-0.flac)
# 1-2-1.txt (contains transcription of 1-2-1.flac)
# ...
#
files = []
for root, dirnames, filenames in os.walk(source_dir):
for filename in fnmatch.filter(filenames, '*.trans.txt'):
trans_filename = os.path.join(root, filename)
with codecs.open(trans_filename, "r", "utf-8") as fin:
for line in fin:
# Parse each segment line
first_space = line.find(" ")
seqid, transcript = line[:first_space], line[first_space+1:]
# We need to do the encode-decode dance here because encode
# returns a bytes() object on Python 3, and text_to_char_array
# expects a string.
transcript = unicodedata.normalize("NFKD", transcript) \
.encode("ascii", "ignore") \
.decode("ascii", "ignore")
transcript = transcript.lower().strip()
flac_file = os.path.join(root, seqid + ".flac")
flac_filesize = os.path.getsize(flac_file)
files.append((os.path.abspath(flac_file), flac_filesize, transcript))
return pd.DataFrame(data=files, columns=["flac_filename", "flac_filesize", "transcript"])
#code adapted from https://github.com/mozilla/DeepSpeech/blob/master/bin/import_librivox.py | 7,310 | 44.409938 | 108 | py |
cyphercat | cyphercat-master/Classification_baselines/LibriSpeech/LS_UTILS/Move_data.py | import os
def move_files(abs_dirname,speaker,datapath,trainSet,cvSet,tstSet):
"""Move files into subdirectories."""
for subdir in os.listdir(abs_dirname):
files = [os.path.join(abs_dirname,subdir, f) for f in os.listdir(os.path.join(abs_dirname,subdir))]
cv_dir = os.path.abspath(os.path.join(datapath + cvSet + speaker))
cv_subdir = os.path.join(cv_dir,subdir)
test_dir = os.path.abspath(os.path.join(datapath + tstSet + speaker))
test_subdir = os.path.join(test_dir,subdir)
if not os.path.isdir(test_subdir):
if not os.path.isdir(test_dir):
print('splitting',speaker)
os.mkdir(cv_dir)
os.mkdir(test_dir)
os.mkdir(cv_subdir)
os.mkdir(test_subdir)
#separate files
# 1 test
# 2 cv
# 3,4,5 train
# 6 test
# 7 cv
# 8,9,0 train
ncv = [2,7]
ntest = [1,6]
for f in files:
num = f[-9:-5]
if num != 'tran':
rem = int(num) % 10
if rem in ncv: #move to cv
# move file to target dir
f_base = os.path.basename(f)
shutil.move(f, cv_subdir)
elif rem in ntest:
# move file to target dir
f_base = os.path.basename(f)
shutil.move(f, test_subdir)
def main(speakers,datapath,trainSet,cvSet,tstSet):
for speaker in speakers:
src_dir = datapath + trainSet + speaker
if not os.path.exists(src_dir):
raise Exception('Directory does not exist ({0}).'.format(src_dir))
move_files(os.path.abspath(src_dir),speaker,datapath,trainSet,cvSet,tstSet)
| 1,889 | 33.363636 | 109 | py |
cyphercat | cyphercat-master/scripts/test_train.py | from __future__ import print_function
try:
import os
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from cyphercat.models import get_predef_model, weights_init
from cyphercat.train import *
from cyphercat.metrics import *
from cyphercat.datadefs import CCATDataset
from cyphercat.datadefs.cifar10_dataset import Cifar10_preload_and_split
from cyphercat.load_data import prep_data
from cyphercat.utils import Configurator, ModelConfig, DataStruct
except ImportError as e:
print(e)
raise ImportError
def main():
global args
parser = argparse.ArgumentParser(description="Convolutional NN Testing Script")
parser.add_argument("-c", "--config", dest="configfile", default='scripts/configs/lfw.yml', help="Path to yaml")
args = parser.parse_args()
print("Python: %s" % sys.version)
print("Pytorch: %s" % torch.__version__)
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Get configuration file
configr = Configurator(args.configfile)
# Get dataset configuration
dataset_config = configr.dataset
train_model_config = configr.train_model
# Training model params
train_config = ModelConfig(train_model_config)
model_name = train_config.name
# Datastruct for prepping data
data_struct = DataStruct(dataset_config)
# Simple download / unpacker function
prep_data(data_struct)
# Hyperparameters
n_epochs = train_config.epochs
batch_size = train_config.batchsize
learnrate = train_config.learnrate
loss = nn.CrossEntropyLoss()
# Data augmentation
train_transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
test_transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
splits = [0.4, 0.1, 0.25, 0.25]
data_name = data_struct.name
ccatset = CCATDataset(path=data_struct.save_path, name=data_name, splits=splits, transforms=[train_transform])
trainset = ccatset.get_split_n(0)
testset = ccatset.get_split_n(1)
n_classes = data_struct.n_classes
img_size = data_struct.height
n_in = data_struct.channels
## Define pyTorch ingestors for training and testing
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False, num_workers=2)
#
## helper function to unnormalize and plot image
#def imshow(img):
# img = np.array(img)
# img = img / 2 + 0.5
# img = np.moveaxis(img, 0, -1)
# plt.imshow(img)
#
## display sample from dataset
#imgs,labels = iter(trainloader).next()
#imshow(torchvision.utils.make_grid(imgs))
# Prepare the model for training
model = get_predef_model(model_name)(n_in=n_in, n_classes=n_classes, n_filters=64, size=img_size)
model.to(device)
model.apply(weights_init)
model_optim = optim.Adam(model.parameters(), lr=learnrate/10)
# Train the model
train(model=model, data_loader=trainloader, test_loader=testloader,
optimizer=model_optim, criterion=loss, n_epochs=n_epochs, classes=None, verbose=False)
# Evaluate analytics on triaining and testing sets
print("\nPerformance on training set: ")
train_accuracy = eval_target_model(model, trainloader, classes=None)
print("\nPerformance on test set: ")
test_accuracy = eval_target_model(model, testloader, classes=None)
if __name__ == "__main__":
main()
| 4,030 | 32.31405 | 116 | py |
cyphercat | cyphercat-master/scripts/test_load.py | from __future__ import print_function
try:
import time
import os
import sys
import argparse
import torch
from cyphercat.load_data import prep_data
from cyphercat.utils import Configurator, DataStruct
except ImportError as e:
print(e)
raise ImportError
def main():
global args
parser = argparse.ArgumentParser(description="Convolutional NN Testing Script")
parser.add_argument("-c", "--config", dest="configfile", default='scripts/configs/config.yml', help="Path to yaml")
#model_parse = parser.add_mutually_exclusive_group()
#model_parse.add_argument("-r", "--rand_rot_angle", dest="rand_rot_angle", default=0., type=float, help="Random image rotation angle range [deg]")
#model_parse.add_argument("-f", "--fixed_rot_angle", dest="fixed_rot_angle", nargs=3, type=float, help="(low, high, spacing) fixed image rotation angle [deg]")
args = parser.parse_args()
print("Testing")
print("Python: %s" % sys.version)
print("Pytorch: %s" % torch.__version__)
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Get configuration file
configr = Configurator(args.configfile)
# Get dataset configuration
dataset_config = configr.dataset
# Directory structures for data and model saving
data_struct = DataStruct(dataset_config)
prep_data(dataset_config)
if __name__ == "__main__":
main()
| 1,500 | 26.290909 | 163 | py |
cyphercat | cyphercat-master/scripts/test_train_libri.py | from __future__ import print_function
try:
import os
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from cyphercat.models import get_predef_model, weights_init
from cyphercat.train import *
from cyphercat.metrics import *
from cyphercat.load_data import prep_data
from cyphercat.utils import Configurator, ModelConfig, DataStruct
except ImportError as e:
print(e)
raise ImportError
def main():
global args
parser = argparse.ArgumentParser(description="Convolutional NN Testing Script")
parser.add_argument("-c", "--config", dest="configfile", default='scripts/configs/librispeech.yml', help="Path to yaml")
args = parser.parse_args()
print("Python: %s" % sys.version)
print("Pytorch: %s" % torch.__version__)
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Get configuration file
configr = Configurator(args.configfile)
# Get dataset configuration
dataset_config = configr.dataset
train_model_config = configr.train_model
# Datastruct for prepping data
data_struct = DataStruct(dataset_config)
# Simple download / unpacker function
prep_data(data_struct)
## Training model params
#train_config = ModelConfig(train_model_config)
#model_name = train_config.name
## Hyperparameters
#n_epochs = train_config.epochs
#batch_size = train_config.batchsize
#learnrate = train_config.learnrate
#loss = nn.CrossEntropyLoss()
#
## Data augmentation
#train_transform = torchvision.transforms.Compose([
# #torchvision.transforms.RandomRotation(10),
# #torchvision.transforms.RandomHorizontalFlip(),
# #torchvision.transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1),
#
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
#])
#
#test_transform = torchvision.transforms.Compose([
# #torchvision.transforms.Pad(2),
# torchvision.transforms.ToTensor(),
# #torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
# torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
#])
#
## Defined training and testing set splits
#trainset, testset = get_split_dataset(dataset_config=dataset_config, transforms=[train_transform, test_transform])
#n_classes = trainset.n_classes
#
## Define pyTorch ingestors for training and testing
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
#testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False, num_workers=2)
#
## Prepare the model for training
#model = get_predef_model(model_name)(n_in=3, n_classes=n_classes, n_filters=64, size=250)
#model.to(device)
#model.apply(weights_init)
#model_optim = optim.Adam(model.parameters(), lr=learnrate/10)
## Train the model
#train(model=model, data_loader=trainloader, test_loader=testloader,
# optimizer=model_optim, criterion=loss, n_epochs=n_epochs, classes=None, verbose=False)
## Evaluate analytics on triaining and testing sets
#print("\nPerformance on training set: ")
#train_accuracy = eval_target_model(model, trainloader, classes=None)
#print("\nPerformance on test set: ")
#test_accuracy = eval_target_model(model, testloader, classes=None)
if __name__ == "__main__":
main()
| 3,781 | 33.697248 | 124 | py |
cyphercat | cyphercat-master/Visualizations/visualize_util.py | from graphviz import Digraph
def visualize(model, name):
print(model.source) # doctest: +NORMALIZE_WHITESPACE
model.render(name, view=False) # doctest: +SKIP
#name.pdf
# print(fool_attack.source) # doctest: +NORMALIZE_WHITESPACE
# fool_attack.render('test-output/fooling_attack-table.gv', view=False) # doctest: +SKIP
# #'test-output/fooling_attack.gv.pdf'
# print(fool_defense.source) # doctest: +NORMALIZE_WHITESPACE
# fool_defense.render('test-output/fooling_defense-table.gv', view=False) # doctest: +SKIP
# #'test-output/fooling_defense.gv.pdf'
# print(inversion_attack.source) # doctest: +NORMALIZE_WHITESPACE
# inversion_attack.render('test-output/inversion_attack-table.gv', view=False) # doctest: +SKIP
# #'test-output/inversion_attack.gv.pdf'
# print(inversion_defense.source) # doctest: +NORMALIZE_WHITESPACE
# inversion_defense.render('test-output/inversion_defense-table.gv', view=False) # doctest: +SKIP
# #'test-output/inversion_defense.gv.pdf' | 990 | 44.045455 | 98 | py |
cyphercat | cyphercat-master/Visualizations/model_fooling.py | from graphviz import Digraph
# MODEL FOOLING
def fooling_attack():
fool_attack = Digraph('Fooling Attacks',graph_attr={'size':'8.5,11.5'},comment='Taxonomy of Secure Deep Learning', )
fool_attack #doctest: +ELLIPSIS
# NODES:
fool_attack.node('Fooling Attacks', r'{<f0> Fooling Attacks |<f1> '+
r'https://arxiv.org/abs/1804.00097'+
r'\n\n}', shape='record')
fool_attack.node('L-BFGS', r'{<f0> L-BFGS |<f1> '+
r'https://arxiv.org/abs/1312.6199'+
r'\n\n}', shape='record')
fool_attack.node('FGSM', r'{<f0> Fast Gradient Sign \n Method (FGSM) |<f1> '+
r'https://arxiv.org/abs/1412.6572'+
r'\n\n}', shape='record')
fool_attack.node('Black Box', r'{<f0> Black Box/\n Transferable Attacks |<f1> '+
r'https://arxiv.org/abs/1611.02770'+
r'\n\n}' , shape = 'record')
fool_attack.node('BIM', r'{<f0> Basic Iterative Method(BIM)\n/ Iterative FGSM(I-FGSM) |<f1> '+
r'https://arxiv.org/abs/1607.02533'+
r'\n\n}', shape='record')
fool_attack.node('PGD', r'{<f0> Projected Gradient \n Descent |<f1> '+
r'https://arxiv.org/abs/1706.06083'+
r'\n\n}', shape='record')
fool_attack.node('ATN', r'{<f0> Adversarial Transformation \n Networks/ GANs |<f1> '+
r'https://arxiv.org/abs/1703.09387'+
r'\n\n}', shape='record')
# EDGES:
fool_attack.edge('Fooling Attacks', 'Black Box')
fool_attack.edge('Fooling Attacks', 'White Box')
fool_attack.edge('White Box', 'L-BFGS')
fool_attack.edge('White Box', 'FGSM')
fool_attack.edge('White Box', 'BIM')
fool_attack.edge('White Box', 'PGD')
fool_attack.edge('White Box', 'ATN')
return fool_attack
#
def fooling_defense():
fool_defense = Digraph('Fooling Defense',comment='Taxonomy of Secure Deep Learning', )
fool_defense.edge('Fooling Defenses', 'Gradient Masking')
fool_defense.edge('Gradient Masking', 'Complex Nonlinearities')
fool_defense.edge('Fooling Defenses', 'Adversarial Training')
fool_defense.edge('Fooling Defenses', 'Preprocessing')
fool_defense.edge('Complex Nonlinearities', 'RBF Neural Networks')
fool_defense.edge('Complex Nonlinearities', 'SVM Layers')
fool_defense.edge('Adversarial Training', 'Data Augmentation')
fool_defense.edge('Preprocessing', 'Noise Removal')
return fool_defense
| 2,500 | 43.660714 | 120 | py |
cyphercat | cyphercat-master/Visualizations/visualize.py | from graphviz import Digraph
from model_inversion import *
from model_fooling import *
from visualize_util import *
# Generate the PDF's
visualize(fooling_attack(), 'fooling_attack.gv')
visualize(fooling_defense(), 'fooling_defense.gv')
visualize(inversion_attack(), 'inversion_attack.gv')
visualize(inversion_defense(), 'inversion_defense.gv')
| 346 | 30.545455 | 54 | py |
cyphercat | cyphercat-master/Visualizations/model_inversion.py | from graphviz import Digraph
# MODEL INVERSION
def inversion_attack():
inversion_attack = Digraph('Inversion Attacks',comment='Taxonomy of Secure Deep Learning')
inversion_attack.node('Inversion Attacks', r'{<f0> Inversion Attacks |<f1> '+
r'https://arxiv.org/abs/1804.00097'+
r'\n\n}', shape='record')
inversion_attack.node('Shadow Networks', r'{<f0> Shadow Networks |<f1> '+
r'https://arxiv.org/abs/1610.05820\n'+
r'https://arxiv.org/abs/1806.01246'+
r'\n\n}', shape='record')
inversion_attack.node('Data Extraction', r'{<f0> Data Extraction |<f1> '+
r'https://arxiv.org/abs/1806.00400v1'+
r'\n\n}', shape='record')
inversion_attack.edge('Inversion Attacks', 'Membership Inference')
inversion_attack.edge('Membership Inference', 'Shadow Networks')
inversion_attack.edge('Inversion Attacks', 'Data Extraction')
inversion_attack.edge('Data Extraction', 'GAN Networks')
inversion_attack.edge('Data Extraction', 'Gradient Ascent')
inversion_attack.edge('Inversion Attacks', 'Model Extraction')
return inversion_attack
def inversion_defense():
inversion_defense = Digraph('Inversion Defenses',comment='Taxonomy of Secure Deep Learning')
inversion_defense.node('Adversarial Training', r'{<f0> Adversarial Training |<f1> '+
r'https://arxiv.org/abs/1807.05852'+
r'\n\n}', shape='record')
inversion_defense.edge('Inversion Defense', 'Complex Nonlinearities')
inversion_defense.edge('Inversion Defense', 'Hidden Networks')
inversion_defense.edge('Inversion Defense', 'Adversarial Training')
inversion_defense.edge('Complex Nonlinearities', 'Gradient Lookups')
return inversion_defense
| 1,812 | 45.487179 | 96 | py |
cyphercat | cyphercat-master/Utils/transformations.py | import torch
import librosa as libr
import numpy as np
class ToMFCC:
'''
Transformation to convert soundfile loaded via LibriSpeechDataset to Mel-
frequency cepstral coefficients (MFCCs)
Args:
number_of_mels: Number of bins to use for cepstral coefficients
Returns:
torch.float tensor
'''
def __init__(self, number_of_mels=128):
self.number_of_mels = number_of_mels
def __call__(self, y):
dims = y.shape
y = libr.feature.melspectrogram(np.reshape(y, (dims[1],)), 16000,
n_mels=self.number_of_mels, fmax=8000)
y = libr.feature.mfcc(S=libr.power_to_db(y))
y = torch.from_numpy(y)
return y.float()
class STFT:
'''
Short-time Fourier transform (STFT) for librosa dataset
Args:
phase: If true, will return the magnitude and phase of the transformation,
if false only returns magnitude
Returns:
torch.float tensor
'''
def __init__(self, phase=False):
self.phase = phase
def __call__(self, y):
dims = y.shape
y = libr.core.stft(np.reshape(y, (dims[1],)))
y, phase = np.abs(y), np.angle(y)
y = torch.from_numpy(y).permute(1, 0)
phase = torch.from_numpy(phase).permute(1, 0)
if self.phase:
return torch.cat( (y, phase), dim=0).float()
else:
return y.float()
| 1,449 | 28.591837 | 79 | py |
cyphercat | cyphercat-master/Utils/datasets.py | from torch.utils.data import Dataset
from tqdm import tqdm
import soundfile as sf
import pandas as pd
import numpy as np
import os
LIBRISPEECH_SAMPLING_RATE = 16000
PATH = '/home/mlomnitz/mlomnitz/Datasets'
sex_to_label = {'M': False, 'F': True}
label_to_sex = {False: 'M', True: 'F'}
def to_categorical(y, num_classes):
"""Transforms an integer class label into a one-hot label (single integer to 1D vector)."""
if y >= num_classes:
raise(ValueError, 'Integer label is greater than the number of classes.')
one_hot = np.zeros(num_classes)
one_hot[y] = 1
return one_hot
def Libri_preload_and_split(path,subsets,seconds,pad=False,cache=True,splits = [.8,.2], attacking = False):
fragment_seconds = seconds
print('Initialising LibriSpeechDataset with minimum length = {}s and subsets = {}'.format(seconds, subsets))
# Convert subset to list if it is a string
# This allows to handle list of multiple subsets the same a single subset
if isinstance(subsets, str):
subsets = [subsets]
cached_df = []
found_cache = {s: False for s in subsets}
if cache:
# Check for cached files
for s in subsets:
subset_index_path = path + '/{}.index.csv'.format(s)
if os.path.exists(subset_index_path):
cached_df.append(pd.read_csv(subset_index_path))
found_cache[s] = True
# Index the remaining subsets if any
if all(found_cache.values()) and cache:
df = pd.concat(cached_df)
else:
df = pd.read_csv(path+'/LibriSpeech/SPEAKERS.TXT', skiprows=11, delimiter='|', error_bad_lines=False)
df.columns = [col.strip().replace(';', '').lower() for col in df.columns]
df = df.assign(
sex=df['sex'].apply(lambda x: x.strip()),
subset=df['subset'].apply(lambda x: x.strip()),
name=df['name'].apply(lambda x: x.strip()),
)
audio_files = []
for subset, found in found_cache.items():
if not found:
audio_files += index_subset(path, subset)
# Merge individual audio files with indexing dataframe
df = pd.merge(df, pd.DataFrame(audio_files))
# # Concatenate with already existing dataframe if any exist
df = pd.concat(cached_df+[df])
# Save index files to data folder
for s in subsets:
df[df['subset'] == s].to_csv(path + '/{}.index.csv'.format(s), index=False)
# Trim too-small files
if not pad:
df = df[df['seconds'] > fragment_seconds]
num_speakers = len(df['id'].unique())
# Renaming for clarity
df = df.rename(columns={'id': 'speaker_id', 'minutes': 'speaker_minutes'})
# Index of dataframe has direct correspondence to item in dataset
df = df.reset_index(drop=True)
df = df.assign(id=df.index.values)
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers - 1) labels
unique_speakers = sorted(df['speaker_id'].unique())
print('Finished indexing data. {} usable files found.'.format(len(df)))
dfs = {} #dictionary of dataframes
#split df into data-subsets
if attacking == 1: #adversary 1, which requires an additional split for a shadow network
#splits unique speakers into three unequal parts.
# num speakers for train & test is the same.
# the below was solved with a system of equations
n = int(num_speakers//(2+2*splits[0]))#amt data depends on amt train dat
#n is train data for shadow & target networks
unique_speakers1 = unique_speakers[:n] #target
unique_speakers2 = unique_speakers[n:2*n] # shadow
unique_speakers3 = unique_speakers[2*n:] # out (target + shadow)
dfs = splitter(dfs,df,unique_speakers1, splits,0)
dfs = splitter(dfs,df,unique_speakers2, splits,2)
dfs = splitter(dfs,df,unique_speakers3, splits=[0.5,0.5],N = 4) #split out data for attack train + test evenly
elif attacking == 3: #adversary 3, which just requires in & out data
#splits unique speakers into two unequal parts.
# the below was solved with a system of equations
n = int(num_speakers//(1+splits[0]))#amt data depends on amt train dat
#n is train data for target networks
unique_speakers1 = unique_speakers[:n] #target
unique_speakers2 = unique_speakers[n:] # out (target + shadow)
dfs = splitter(dfs,df,unique_speakers1, splits,0)
dfs = splitter(dfs,df,unique_speakers2, splits=[1,0],N=2) #split out data for just attack eval
else: # just split into train & test
dfs = splitter(dfs, df,unique_speakers, splits, 0)
#check that the splits were as desired:
for d in dfs:
print(len(dfs[d]))
print('Finished splitting data.')
return dfs
def index_subset(path , subset):
"""
Index a subset by looping through all of it's files and recording their speaker ID, filepath and length.
:param subset: Name of the subset
:return: A list of dicts containing information about all the audio files in a particular subset of the
LibriSpeech dataset
"""
audio_files = []
print('Indexing {}...'.format(subset))
# Quick first pass to find total for tqdm bar
subset_len = 0
for root, folders, files in os.walk(path + '/LibriSpeech/{}/'.format(subset)):
subset_len += len([f for f in files if f.endswith('.flac')])
progress_bar = tqdm(total=subset_len)
for root, folders, files in os.walk(path + '/LibriSpeech/{}/'.format(subset)):
if len(files) == 0:
continue
librispeech_id = int(root.split('/')[-2])
for f in files:
# Skip non-sound files
if not f.endswith('.flac'):
continue
progress_bar.update(1)
instance, samplerate = sf.read(os.path.join(root, f))
audio_files.append({
'id': librispeech_id,
'filepath': os.path.join(root, f),
'length': len(instance),
'seconds': len(instance) * 1. / LIBRISPEECH_SAMPLING_RATE
})
progress_bar.close()
return audio_files
def VOiCES_preload_and_split(path,subsets,seconds,pad=False,cache=True,splits = [.8,.2], attacking = False):
fragment_seconds = seconds
speakerF = 'Lab41-SRI-VOiCES-speaker-gender-dataset_SUBSET.csv'
print('Initialising VOiCES Dataset with minimum length = {}s and subsets = {}'.format(seconds, subsets))
for file in os.listdir(path):
if file.startswith("part"):
subsets.append(file)
# Convert subset to list if it is a string
# This allows to handle list of multiple subsets the same a single subset
if isinstance(subsets, str):
subsets = [subsets]
cached_df = []
found_cache = {s: False for s in subsets}
if cache:
# Check for cached files
# if data == 'Libri':
# for s in subsets:
# subset_index_path = path + '/{}.index.csv'.format(s)
# if os.path.exists(subset_index_path):
# print(subset_index_path)
# cached_df.append(pd.read_csv(subset_index_path))
# found_cache[s] = True
subset_index_path = path + '/index.csv'
if os.path.exists(subset_index_path):
cached_df.append(pd.read_csv(subset_index_path))
for s in subsets:
found_cache[s] = True
# Index the remaining subsets if any
if all(found_cache.values()) and cache:
df = pd.concat(cached_df)
else:
df = pd.read_csv(path+speakerF, skiprows=0, delimiter=',', error_bad_lines=False)
df.columns = [col.strip().replace(';', '').lower() for col in df.columns]
df = df.rename(columns={'speaker': 'id', 'gender': 'sex','dataset':'subset'})
df = df.assign(
sex=df['sex'].apply(lambda x: x.strip()),
subset=df['subset'].apply(lambda x: x.strip()),
)
audio_files = []
for subset, found in found_cache.items():
if not found:
audio_files += index_subset_VOiCES(path, subset)
# Merge individual audio files with indexing dataframe
df = pd.merge(df, pd.DataFrame(audio_files))
# # Concatenate with already existing dataframe if any exist
df = pd.concat(cached_df+[df])
# Save index files to data folder
# if data == 'Libri':
# for s in subsets:
# df[df['subset'] == s].to_csv(path + '/{}.index.csv'.format(s), index=False)
df.to_csv(path + '/index.csv', index=False)
# Trim too-small files
if not pad:
df = df[df['seconds'] > fragment_seconds]
num_speakers = len(df['id'].unique())
# Renaming for clarity
df = df.rename(columns={'id': 'speaker_id'})
# Index of dataframe has direct correspondence to item in dataset
df = df.reset_index(drop=True)
df = df.assign(id=df.index.values)
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers - 1) labels
unique_speakers = sorted(df['speaker_id'].unique())
print('Finished indexing data. {} usable files found.'.format(len(df)))
dfs = {} #dictionary of dataframes
#split df into data-subsets
if attacking == 1: #adversary 1, which requires an additional split for a shadow network
#splits unique speakers into three unequal parts.
# num speakers for train & test is the same.
# the below was solved with a system of equations
n = int(num_speakers//(2+2*splits[0]))#amt data depends on amt train dat
#n is train data for shadow & target networks
unique_speakers1 = unique_speakers[:n] #target
unique_speakers2 = unique_speakers[n:2*n] # shadow
unique_speakers3 = unique_speakers[2*n:] # out (target + shadow)
dfs = splitter(dfs,df,unique_speakers1, splits,0)
dfs = splitter(dfs,df,unique_speakers2, splits,2)
dfs = splitter(dfs,df,unique_speakers3, splits=[0.5,0.5],N = 4) #split out data for attack train + test evenly
elif attacking == 3: #adversary 3, which just requires in & out data
#splits unique speakers into two unequal parts.
# the below was solved with a system of equations
n = int(num_speakers//(1+splits[0]))#amt data depends on amt train dat
#n is train data for target networks
unique_speakers1 = unique_speakers[:n] #target
unique_speakers2 = unique_speakers[n:] # out (target + shadow)
dfs = splitter(dfs,df,unique_speakers1, splits,0)
dfs = splitter(dfs,df,unique_speakers2, splits=[1,0],N=2) #split out data for just attack eval
else: # just split into train & test
dfs = splitter(dfs, df,unique_speakers, splits, 0)
#check that the splits were as desired:
for d in dfs:
print(len(dfs[d]))
print('Finished splitting data.')
return dfs
def index_subset_VOiCES(path , subset):
"""
Index a subset by looping through all of it's files and recording their speaker ID, filepath and length.
:param subset: Name of the subset
:return: A list of dicts containing information about all the audio files in a particular subset of the
LibriSpeech dataset
"""
audio_files = []
print('Indexing {}...'.format(subset))
# Quick first pass to find total for tqdm bar
subset_len = 0
addpath = ''
ftype = '.wav'
for root, folders, files in os.walk(path + addpath +'{}/'.format(subset)):
subset_len += len([f for f in files if f.endswith(ftype)])
progress_bar = tqdm(total=subset_len)
for root, folders, files in os.walk(path + addpath + '{}/'.format(subset)):
if len(files) == 0:
continue
for f in files:
# Skip non-sound files
if not f.endswith(ftype):
continue
librispeech_id = int(f[f.index('sp')+2:f.index('sp')+6])
progress_bar.update(1)
instance, samplerate = sf.read(os.path.join(root, f))
audio_files.append({
'id': librispeech_id,
'filepath': os.path.join(root, f),
'length': len(instance),
'seconds': len(instance) * 1. / LIBRISPEECH_SAMPLING_RATE
})
progress_bar.close()
return audio_files
def splitter(dfs,df,unique_speakers, splits,N):
#N is to keep track of the dataframe dict keys
n_splits = len(splits)
for speaker in unique_speakers: #for each speaker
# speaker = valid_sequence.unique_speakers[0]
tot_files = sum(df['speaker_id']==speaker)
mini_df = df[df['speaker_id']==speaker]
mini_df = mini_df.reset_index()
used_files = 0
start_file = 0
for idx, s in enumerate(splits): #for each split
if idx != n_splits-1:
n_files = int(s*tot_files)
used_files += n_files
else:
n_files = tot_files - used_files
#get stop index for the desired # of files:
stop_file = start_file + n_files
#initialize if first speaker, or append if later speaker
if speaker == unique_speakers[0]:
dfs[idx + N] = (mini_df.iloc[start_file:stop_file])
else:
dfs[idx + N] = dfs[idx + N].append(mini_df.iloc[start_file:stop_file])
#update start_file
start_file += n_files
for idx in range(n_splits): #for each dataframe
dfs[idx + N] = dfs[idx + N].reset_index()
return dfs
class LibriSpeechDataset(Dataset):
"""This class subclasses the torch.utils.data.Dataset object. The __getitem__ function will return a raw audio
sample and it's label.
This class also contains functionality to build verification tasks and n-shot, k-way classification tasks.
# Arguments
subsets: What LibriSpeech datasets to include.
seconds: Minimum length of audio to include in the dataset. Any files smaller than this will be ignored.
downsampling:
label: One of {speaker, sex}. Whether to use sex or speaker ID as a label.
stochastic: bool. If True then we will take a random fragment from each file of sufficient length. If False we
will always take a fragment starting at the beginning of a file.
pad: bool. Whether or not to pad samples with 0s to get them to the desired length. If `stochastic` is True
then a random number of 0s will be appended/prepended to each side to pad the sequence to the desired length.
cache: bool. Whether or not to use the cached index file
"""
def __init__(self, path, df, seconds, downsampling, label='speaker', stochastic=True, pad=False,
transform = None, cache=True):
if label not in ('sex', 'speaker'):
raise(ValueError, 'Label type must be one of (\'sex\', \'speaker\')')
if int(seconds * LIBRISPEECH_SAMPLING_RATE) % downsampling != 0:
raise(ValueError, 'Down sampling must be an integer divisor of the fragment length.')
self.fragment_seconds = seconds
self.downsampling = downsampling
self.fragment_length = int(seconds * LIBRISPEECH_SAMPLING_RATE)
self.stochastic = stochastic
self.pad = pad
self.label = label
self.transform = transform
# load df from splitting function
self.df = df
self.num_speakers = len(self.df['speaker_id'].unique())
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers - 1) labels
self.unique_speakers = sorted(self.df['speaker_id'].unique())
self.speaker_id_mapping = {self.unique_speakers[i]: i for i in range(self.num_classes())}
# Create dicts
self.datasetid_to_filepath = self.df.to_dict()['filepath']
self.datasetid_to_speaker_id = self.df.to_dict()['speaker_id']
self.datasetid_to_sex = self.df.to_dict()['sex']
def __getitem__(self, index):
instance, samplerate = sf.read(self.datasetid_to_filepath[index])
# Choose a random sample of the file
if self.stochastic:
fragment_start_index = np.random.randint(0, max(len(instance)-self.fragment_length, 1))
else:
fragment_start_index = 0
instance = instance[fragment_start_index:fragment_start_index+self.fragment_length]
# Check for required length and pad if necessary
if self.pad and len(instance) < self.fragment_length:
less_timesteps = self.fragment_length - len(instance)
if self.stochastic:
# Stochastic padding, ensure instance length == self.fragment_length by appending a random number of 0s
# before and the appropriate number of 0s after the instance
less_timesteps = self.fragment_length - len(instance)
before_len = np.random.randint(0, less_timesteps)
after_len = less_timesteps - before_len
instance = np.pad(instance, (before_len, after_len), 'constant')
else:
# Deterministic padding. Append 0s to reach self.fragment_length
instance = np.pad(instance, (0, less_timesteps), 'constant')
if self.label == 'sex':
sex = self.datasetid_to_sex[index]
label = sex_to_label[sex]
elif self.label == 'speaker':
label = self.datasetid_to_speaker_id[index]
label = self.speaker_id_mapping[label]
else:
raise(ValueError, 'Label type must be one of (\'sex\', \'speaker\')'.format(self.label))
# Reindex to channels first format as supported by pytorch and downsample by desired amount
instance = instance[np.newaxis, ::self.downsampling]
# Add transforms
if self.transform is not None:
instance = self.transform(instance)
return instance, label
def __len__(self):
return len(self.df)
def num_classes(self):
return len(self.df['speaker_id'].unique())
| 18,446 | 38.082627 | 119 | py |
cyphercat | cyphercat-master/Utils/visualize_object_survey.py | #!/usr/bin/python3
"""
Set of functions used to call a series of algorithms used to visualize the object localization of a pre-trained
network in PyTorch. The different algorithms are discussed in several papers, while the implementation is based,
roughly, on work in the following repository (https://github.com/sar-gupta/weakly-supervised-localization-survey)
"""
import numpy as np
import PIL
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
def saliency_map_general(model, input, label, plot = False):
"""
saliency_map_general: implementation to return the most general form of the saliency map, informing
on the regions of interest that activate a specific label.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
return: numpy array with heatmap data
"""
input = Variable(input.unsqueeze_(0),requires_grad = True)
output = model.forward(input)
model.zero_grad()
output[0][label].backward()
grads = input.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0,1)
grads.transpose_(1,2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def guided_saliency_map(model, input, label, plot = False):
"""
guided_saliency_map: implementation to return a guided saliency map, informing
on the regions of interest that activate a specific label.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
return: numpy array with heatmap data
"""
input = Variable(input.unsqueeze_(0), requires_grad=True)
try:
h = [0]*len(list(model.modules()))
def hookfunc(module, gradInput, gradOutput):
return tuple([(None if g is None else g.clamp(min=0)) for g in gradInput])
for j, i in enumerate(list(model.modules())):
h[j] = i.register_backward_hook(hookfunc)
output = model.forward(input)
model.zero_grad()
output[0][label].backward()
for i in range(len(list(model.modules()))):
h[i].remove()
except Exception as e:
print(e)
for i in range(len(list(model.modules()))):
h[i].remove()
grads = input.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0,1)
grads.transpose_(1,2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def gradcam(model, input, label, layer_name, plot=False):
"""
gradcam: implementation to return a class activation map using the gradient of class score with each
of last conv layer filters. Calculate weighted sum of gradients and filters to finally obtain a map
of size equal to size of filters.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
- layer_name: Name of the layer to target, should be the last CNN.
return:
PIL image with cativation map
"""
imgs_shape = (input.shape[1], input.shape[2])
rs = torchvision.transforms.Resize( imgs_shape )
#find the right layer
last_conv = None
for name, item in model._modules.items():
if name == layer_name:
last_conv = item
if last_conv == None:
print('Cant find target layer')
return None
pre_image = input
global gcdata
global gcgrads
def bhook(module, gradInputs, gradOutputs):
global gcgrads
gcgrads = gradOutputs
def fhook(module, input, output):
global gcdata
gcdata = output
hb = last_conv.register_backward_hook(bhook)
hf = last_conv.register_forward_hook(fhook)
out = model(input.unsqueeze_(0))
model.zero_grad()
out[0, label].backward()
hb.remove()
hf.remove()
gcdata = gcdata[0]
gcgrads = gcgrads[0].squeeze()
gcgrads = gcgrads.mean(dim=2, keepdim=True)
gcgrads = gcgrads.mean(dim=1, keepdim=True)
#
gcdata = gcdata.mul(gcgrads)
gcdata = gcdata.sum(dim=0, keepdim=True)
gcdata = gcdata.clamp(min=0)
gcdata -= gcdata.min()
gcdata /= gcdata.max()
toi = torchvision.transforms.ToPILImage()
gcdata = np.array(rs(toi(gcdata.data.cpu())))
input.squeeze()
return gcdata
def guided_gradcam(model, input, label,layer_name, plot = False):
"""
guided_gradcam: returns a combination of a guided saliency map and class activation map. this combines
the sensitivity to different classes from gradcam toguether with the greater resolution of the
saliency map.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
- layer_name: Name of the layer to target, should be the last CNN.
return:
PIL image with cativation map
"""
gc = gradcam(model, input, label, layer_name, plot=False)
guided = guided_saliency_map(model=model, input=input[0], label=label, plot=False)
gc = gc * guided
rs = torchvision.transforms.Resize((32,32))
gc -= gc.min()
gc = np.divide(gc, gc.max())
gc *= 255
gc = gc.astype(int)
return gc
def smooth_guided_saliency_map(model, input, label, transform,x=10, percent_noise=10, plot = True):
"""
smooth_guided_saliency_map: Implementation of guided saliency map accounting for the fact
small, local variations in the local derivatives lead to the apparent noise one sees. This implementation smooths
these.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- x: Number fo times to sample for the smoothing
- percent_nois: Percentage of noise to be itroduced during sampling for smoothing
return:
PIL image with cativation map
"""
tensor_input = input
final_grad = torch.zeros(input.shape).cuda()
final_grad = final_grad.unsqueeze(0)
h = [0]*len(list(model.modules()))
def hookfunc(module, gradInput, gradOutput):
return tuple([(None if g is None else g.clamp(min=0)) for g in gradInput])
for j, i in enumerate(list(model.modules())):
h[j] = i.register_backward_hook(hookfunc)
for i in range(x):
temp_input = tensor_input
noise = torch.from_numpy(np.random.normal(loc=0, scale=(percent_noise/100) *
(tensor_input.max() - tensor_input.min()),
size=temp_input.shape)).type(torch.cuda.FloatTensor)
temp_input = (temp_input.cuda() + noise).cpu().numpy()
temp_input = np.transpose(temp_input, (1,2,0) )
temp_input = PIL.Image.fromarray(temp_input.astype(np.uint8))
temp_input = Variable(transform(temp_input).unsqueeze(0).cuda(), requires_grad=True)
output = model.forward(temp_input)
model.zero_grad()
output[0][label].backward()
final_grad += temp_input.grad.data
for i in range(len(list(model.modules()))):
h[i].remove()
grads = final_grad/x
grads = grads.clamp(min=0)
grads.squeeze_()
grads.transpose_(0,1)
grads.transpose_(1,2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def smooth_guided_gradcam(model, input, label, transform, layer_name, plot = False ):
guided = smooth_guided_saliency_map(model, input, label,transform = transform, plot = False)
gc = gradcam(model, input, label, layer_name = layer_name, plot=False)
gc = gc * guided
rs = torchvision.transforms.Resize((32,32))
gc -= gc.min()
gc = np.divide(gc, gc.max())
gc *= 255
gc = gc.astype(int)
return gc
| 8,615 | 31.513208 | 117 | py |
cyphercat | cyphercat-master/Utils/models.py | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import os.path
def new_size_conv(size, kernel, stride=1, padding=0):
return np.floor((size + 2*padding - (kernel -1)-1)/stride +1)
def new_size_max_pool(size, kernel, stride=None, padding=0):
if stride == None:
stride = kernel
return np.floor((size + 2*padding - (kernel -1)-1)/stride +1)
def calc_alexnet_size(size):
x = new_size_conv(size, 6,3,2)
x = new_size_max_pool(x,3,2)
x = new_size_conv(x,5,1,2)
x = new_size_max_pool(x,3,2)
x = new_size_conv(x,3,1,1)
x = new_size_conv(x,3,1,1)
x = new_size_conv(x,3,1,1)
out = new_size_max_pool(x,2,2)
return out
class AlexNet(nn.Module):
def __init__(self, n_classes, size=32):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=6, stride=3, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
out_feat_size = calc_alexnet_size(size)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * out_feat_size * out_feat_size, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, n_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class tiny_cnn(nn.Module):
def __init__(self, n_in=3, n_out=10, n_hidden=64, size=64):
super(tiny_cnn, self).__init__()
self.size = size
self.n_hidden = n_hidden
self.conv_block_1 = nn.Sequential(
nn.Conv2d(n_in, n_hidden, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(n_hidden),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_hidden, 2*n_hidden, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(2*n_hidden),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc = nn.Linear(2*n_hidden * (self.size//4) * (self.size//4), 2*n_hidden)
self.output = nn.Linear(2*n_hidden, n_out)
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(x.size(0), -1)
#x = x.view(-1, 2*self.n_hidden * (self.size//4) * (self.size//4))
x = self.fc(x)
out = self.output(x)
return out
def calc_mlleaks_cnn_size(size):
x = new_size_conv(size, 5,1,2)
x = new_size_max_pool(x,2,2)
x = new_size_conv(x,5,1,2)
out = new_size_max_pool(x,2,2)
return out
class mlleaks_cnn(nn.Module):
def __init__(self, n_in=3, n_out=10, n_hidden=64, size=32):
super(mlleaks_cnn, self).__init__()
self.n_hidden = n_hidden
self.conv_block_1 = nn.Sequential(
nn.Conv2d(n_in, n_hidden, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(n_hidden),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_hidden, 2*n_hidden, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(2*n_hidden),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
fc_feature_size = calc_mlleaks_cnn_size(size)
self.fc = nn.Linear(int(2*n_hidden * fc_feature_size * fc_feature_size), 128)
self.output = nn.Linear(2*n_hidden, n_out)
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
out = self.output(x)
return out
class ConvBlock(nn.Module):
#for audio_CNN_classifier
def __init__(self, n_input, n_out, kernel_size):
super(ConvBlock, self).__init__()
self.cnn_block = nn.Sequential(
nn.Conv1d(n_input, n_out, kernel_size, padding=1),
nn.BatchNorm1d(n_out),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=4)
)
def forward(self, x):
return self.cnn_block(x)
class audio_CNN_classifier(nn.Module):
def __init__(self, in_size, n_hidden, n_classes):
super(audio_CNN_classifier, self).__init__()
self.down_path = nn.ModuleList()
self.down_path.append(ConvBlock(in_size, 2*in_size, 3))
self.down_path.append(ConvBlock(2*in_size, 4*in_size, 3))
self.down_path.append(ConvBlock(4*in_size, 8*in_size, 3))
self.fc = nn.Sequential(
nn.Linear(8*in_size, n_hidden),
nn.ReLU()
)
self.out = nn.Linear(n_hidden, n_classes)
def forward(self, x):
for down in self.down_path:
x = down(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return self.out(x)
class STFT_CNN_classifier(nn.Module):
def __init__(self, in_size, n_hidden, n_classes):
super(STFT_CNN_classifier, self).__init__()
self.down_path = nn.ModuleList()
self.down_path.append(ConvBlock(in_size, in_size, 7))
self.down_path.append(ConvBlock(in_size, in_size*2, 7))
self.down_path.append(ConvBlock(in_size*2, in_size*4, 7))
self.fc = nn.Sequential(
nn.Linear(5264, n_hidden),
nn.ReLU()
)
self.out = nn.Linear(n_hidden, n_classes)
def forward(self, x):
for down in self.down_path:
x = down(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return self.out(x)
class mlleaks_mlp(nn.Module):
def __init__(self, n_in=3, n_out=1, n_hidden=64):
super(mlleaks_mlp, self).__init__()
self.hidden = nn.Linear(n_in, n_hidden)
#self.bn = nn.BatchNorm1d(n_hidden)
self.output = nn.Linear(n_hidden, n_out)
def forward(self, x):
x = F.sigmoid(self.hidden(x))
#x = self.bn(x)
out = self.output(x)
#out = F.sigmoid(self.output(x))
return out
class cnn(nn.Module):
def __init__(self, in_channels, out_channels, n_filters):
super(cnn, self).__init__()
self.n_filters = n_filters
self.conv_block_1 = nn.Sequential(
nn.Conv2d(in_channels, n_filters, kernel_size=3, padding=1),
nn.BatchNorm2d(n_filters),
nn.ReLU(inplace=True),
nn.MaxPool2d(2)
)
# shape = [Batch_size, n_filters, height/2, width/2]
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_filters, n_filters*2, kernel_size=3, padding=1),
nn.BatchNorm2d(n_filters*2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2)
)
# shape = [Batch_size, n_filters*2, height/4, width/4]
self.dense_block_1 = nn.Sequential(
##nn.Linear(n_filters * 2 * 8 * 8, 64),
nn.Linear(n_filters*2 * 8 * 8, 128),
##nn.BatchNorm1d(64),
##nn.ReLU(inplace=True)
)
# shape = [Batch_size, 64]
self.dense_block_2 = nn.Sequential(
nn.Linear(64, 32),
nn.BatchNorm1d(32),
nn.ReLU(inplace=True)
)
# shape = [Batch_size, 32]
self.dense_block_3 = nn.Sequential(
nn.Linear(32, out_channels),
nn.BatchNorm1d(out_channels)
)
# shape = [Batch_size, 10]
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(-1, self.n_filters*2 * 8 * 8)
x = self.dense_block_1(x)
x = self.dense_block_2(x)
out = self.dense_block_3(x)
return out
class mlp(nn.Module):
def __init__(self, in_channels, out_channels, n_filters):
super(mlp, self).__init__()
self.n_filters = n_filters
# shape = [Batch_size, k (top k posteriors)]
self.dense_block_1 = nn.Sequential(
nn.Linear(in_channels, n_filters*2),
#nn.BatchNorm1d(n_filters*2),
nn.ReLU(inplace=True)
)
# shape = [Batch_size, n_filters*2]
self.dense_block_2 = nn.Sequential(
nn.Linear(n_filters*2, n_filters*2),
#nn.BatchNorm1d(n_filters*2),
nn.ReLU(inplace=True)
)
# shape = [Batch_size, 32]
self.dense_block_3 = nn.Sequential(
nn.Linear(n_filters*2, out_channels),
#nn.BatchNorm1d(out_channels),
nn.Sigmoid()
)
# shape = [Batch_size, 10]
def forward(self, x):
x = self.dense_block_1(x)
x = self.dense_block_2(x)
out = self.dense_block_3(x)
return out
class audio_cnn_block(nn.Module):
'''
1D convolution block used to build audio cnn classifiers
Args:
input: input channels
output: output channels
kernel_size: convolution kernel size
'''
def __init__(self, n_input, n_out, kernel_size):
super(audio_cnn_block, self).__init__()
self.cnn_block = nn.Sequential(
nn.Conv1d(n_input, n_out, kernel_size, padding=1),
nn.BatchNorm1d(n_out),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=4)
)
def forward(self, x):
return self.cnn_block(x)
class audio_tiny_cnn(nn.Module):
'''
Template for convolutional audio classifiers.
'''
def __init__(self, cnn_sizes, n_hidden, kernel_size, n_classes):
'''
Init
Args:
cnn_sizes: List of sizes for the convolution blocks
n_hidden: number of hidden units in the first fully connected layer
kernel_size: convolution kernel size
n_classes: number of speakers to classify
'''
super(audio_tiny_cnn, self).__init__()
self.down_path = nn.ModuleList()
self.down_path.append(audio_cnn_block(cnn_sizes[0], cnn_sizes[1],
kernel_size,))
self.down_path.append(audio_cnn_block(cnn_sizes[1], cnn_sizes[2],
kernel_size,))
self.down_path.append(audio_cnn_block(cnn_sizes[2], cnn_sizes[3],
kernel_size,))
self.fc = nn.Sequential(
nn.Linear(cnn_sizes[4], n_hidden),
nn.ReLU()
)
self.out = nn.Linear(n_hidden, n_classes)
def forward(self, x):
for down in self.down_path:
x = down(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return self.out(x)
def MFCC_cnn_classifier(n_classes):
'''
Builds speaker classifier that ingests MFCC's
'''
in_size = 20
n_hidden = 512
sizes_list = [in_size, 2*in_size, 4*in_size, 8*in_size, 8*in_size]
return audio_tiny_cnn(cnn_sizes=sizes_list, n_hidden=n_hidden,
kernel_size=3, n_classes=125)
def ft_cnn_classifer(n_classes):
'''
Builds speaker classifier that ingests the abs value of fourier transforms
'''
in_size = 94
n_hidden = 512
sizes_list = [in_size, in_size, 2*in_size, 4*in_size, 14*4*in_size]
return audio_tiny_cnn(cnn_sizes=sizes_list, n_hidden=n_hidden,
kernel_size=7, n_classes=125)
class RNN(torch.nn.Module):
'''
Bidirectional LSTM for sentiment analysis
'''
def __init__(self, vocab_size, embedding_size, hidden_size, output_size, n_layers=2, bidirectional=True, dropout=0.5):
super(RNN, self).__init__()
self.embedding = torch.nn.Embedding(vocab_size, embedding_size)
self.rnn = torch.nn.LSTM(embedding_size, hidden_size, num_layers=n_layers, bidirectional=bidirectional, dropout=dropout)
self.fc = torch.nn.Linear(hidden_size*2, output_size)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, x):
embedded = self.dropout(self.embedding(x))
output, (hidden, cell) = self.rnn(embedded)
hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1))
return self.fc(hidden.squeeze(0))
def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias, 0)
def save_checkpoint(model=None, optimizer=None, epoch=None,
data_descriptor=None, loss=None, accuracy=None, path='./',
filename='checkpoint', ext='.pth.tar'):
state = {
'epoch': epoch,
'arch': str(model.type),
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'loss': loss,
'accuracy': accuracy,
'dataset': data_descriptor
}
torch.save(state, path+filename+ext)
def load_checkpoint(model=None, optimizer=None, checkpoint=None):
assert os.path.isfile(checkpoint), 'Checkpoint not found, aborting load'
chpt = torch.load(checkpoint)
assert str(model.type) == chpt['arch'], 'Model arquitecture mismatch,\
aborting load'
model.load_state_dict(chpt['state_dict'])
if optimizer is not None:
optimizer.load_state_dict['optimizer']
print('Succesfully loaded checkpoint \nDataset: %s \nEpoch: %s \nLoss: %s\
\nAccuracy: %s' % (chpt['dataset'], chpt['epoch'], chpt['loss'],
chpt['accuracy']))
| 14,560 | 32.018141 | 128 | py |
cyphercat | cyphercat-master/Utils/metrics.py | import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from SVC_Utils import *
from sklearn.metrics import roc_curve, auc
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def eval_target_net(net, testloader, classes=None):
if classes is not None:
class_correct = np.zeros(10)
class_total = np.zeros(10)
total = 0
correct = 0
with torch.no_grad():
net.eval()
for i, (imgs, lbls) in enumerate(testloader):
imgs, lbls = imgs.to(device), lbls.to(device)
output = net(imgs)
predicted = output.argmax(dim=1)
total += imgs.size(0)
correct += predicted.eq(lbls).sum().item()
if classes is not None:
for prediction, lbl in zip(predicted, lbls):
class_correct[lbl] += prediction == lbl
class_total[lbl] += 1
accuracy = 100*(correct/total)
if classes is not None:
for i in range(len(classes)):
print('Accuracy of %s : %.2f %%' % (classes[i], 100 * class_correct[i] / class_total[i]))
print("\nAccuracy = %.2f %%\n\n" % (accuracy) )
return accuracy
def eval_attack_net(attack_net, target, target_train, target_out, k):
"""Assess accuracy, precision, and recall of attack model for in training set/out of training set classification.
Edited for use with SVCs."""
in_predicts=[]
out_predicts=[]
losses = []
if type(target) is not Pipeline:
target_net=target
target_net.eval()
attack_net.eval()
precisions = []
recalls = []
accuracies = []
#for threshold in np.arange(0.5, 1, 0.005):
thresholds = np.arange(0.5, 1, 0.005)
total = np.zeros(len(thresholds))
correct = np.zeros(len(thresholds))
true_positives = np.zeros(len(thresholds))
false_positives = np.zeros(len(thresholds))
false_negatives = np.zeros(len(thresholds))
train_top = np.empty((0,2))
out_top = np.empty((0,2))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train, target_out)):
mini_batch_size = train_imgs.shape[0]
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
#[mini_batch_size x num_classes] tensors, (0,1) probabilities for each class for each sample)
if type(target) is Pipeline:
traininputs=train_imgs.view(train_imgs.shape[0], -1)
outinputs=out_imgs.view(out_imgs.shape[0], -1)
train_posteriors=torch.from_numpy(target.predict_proba(traininputs)).float()
out_posteriors=torch.from_numpy(target.predict_proba(outinputs)).float()
else:
train_posteriors = F.softmax(target_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(target_net(out_imgs.detach()), dim=1)
#[k x mini_batch_size] tensors, (0,1) probabilities for top k probable classes
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:,:k].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:,:k].clone().to(device)
#Collects probabilities for predicted class.
for p in train_top_k:
in_predicts.append((p.max()).item())
for p in out_top_k:
out_predicts.append((p.max()).item())
if type(target) is not Pipeline:
train_top = np.vstack((train_top,train_top_k[:,:2].cpu().detach().numpy()))
out_top = np.vstack((out_top, out_top_k[:,:2].cpu().detach().numpy()))
#print("train_top_k = ",train_top_k)
#print("out_top_k = ",out_top_k)
#print(train_top.shape)
train_lbl = torch.ones(mini_batch_size).to(device)
out_lbl = torch.zeros(mini_batch_size).to(device)
#Takes in probabilities for top k most likely classes, outputs ~1 (in training set) or ~0 (out of training set)
train_predictions = F.sigmoid(torch.squeeze(attack_net(train_top_k)))
out_predictions = F.sigmoid(torch.squeeze(attack_net(out_top_k)))
for j, t in enumerate(thresholds):
true_positives[j] += (train_predictions >= t).sum().item()
false_positives[j] += (out_predictions >= t).sum().item()
false_negatives[j] += (train_predictions < t).sum().item()
#print(train_top >= threshold)
#print((train_top >= threshold).sum().item(),',',(out_top >= threshold).sum().item())
correct[j] += (train_predictions >= t).sum().item()
correct[j] += (out_predictions < t).sum().item()
total[j] += train_predictions.size(0) + out_predictions.size(0)
#print(true_positives,',',false_positives,',',false_negatives)
for j, t in enumerate(thresholds):
accuracy = 100 * correct[j] / total[j]
precision = true_positives[j] / (true_positives[j] + false_positives[j]) if true_positives[j] + false_positives[j] != 0 else 0
recall = true_positives[j] / (true_positives[j] + false_negatives[j]) if true_positives[j] + false_negatives[j] !=0 else 0
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
print("threshold = %.4f, accuracy = %.2f, precision = %.2f, recall = %.2f" % (t, accuracy, precision, recall))
plt.plot(recalls, precisions)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
def eval_attack_roc(attack_net, target_net, target_train, target_out, k):
losses = []
target_net.eval()
attack_net.eval()
total = 0
correct = 0
train_top = np.empty((0,2))
out_top = np.empty((0,2))
true_positives = 0
false_positives = 0
false_negatives = 0
predictions = np.array([])
labels = np.array([])
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train, target_out)):
train_size = train_imgs.shape[0]
out_size = out_imgs.shape[0]
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
train_posteriors = F.softmax(target_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(target_net(out_imgs.detach()), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:,:k].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:,:k].clone().to(device)
train_top = np.vstack((train_top,train_top_k[:,:2].cpu().detach().numpy()))
out_top = np.vstack((out_top, out_top_k[:,:2].cpu().detach().numpy()))
#print("train_top_k = ",train_top_k)
#print("out_top_k = ",out_top_k)
train_lbl = torch.ones(train_size).to(device)
out_lbl = torch.zeros(out_size).to(device)
train_predictions = F.sigmoid(torch.squeeze(attack_net(train_top_k)))
out_predictions = F.sigmoid(torch.squeeze(attack_net(out_top_k)))
predictions = np.concatenate((predictions, train_predictions.detach().cpu().numpy()), axis=0)
labels = np.concatenate((labels, np.ones(train_size)), axis=0)
predictions = np.concatenate((predictions, out_predictions.detach().cpu().numpy()), axis=0)
labels = np.concatenate((labels, np.zeros(out_size)), axis=0)
#print("train_predictions = ",train_predictions)
#print("out_predictions = ",out_predictions)
true_positives += (train_predictions >= 0.5).sum().item()
false_positives += (out_predictions >= 0.5).sum().item()
false_negatives += (train_predictions < 0.5).sum().item()
correct += (train_predictions>=0.5).sum().item()
correct += (out_predictions<0.5).sum().item()
total += train_predictions.size(0) + out_predictions.size(0)
accuracy = 100 * correct / total
precision = true_positives / (true_positives + false_positives) if true_positives + false_positives != 0 else 0
recall = true_positives / (true_positives + false_negatives) if true_positives + false_negatives !=0 else 0
print("Membership Inference Performance")
print("Accuracy = %.2f%%, Precision = %.2f, Recall = %.2f" % (accuracy, precision, recall))
fpr, tpr, thresholds = roc_curve(labels, predictions, pos_label=1)
roc_auc = auc(fpr, tpr)
return fpr, tpr, roc_auc
def eval_membership_inference(target_net, target_train, target_out):
target_net.eval()
precisions = []
recalls = []
accuracies = []
#for threshold in np.arange(0.5, 1, 0.005):
thresholds = np.arange(0.5, 1, 0.005)
total = np.zeros(len(thresholds))
correct = np.zeros(len(thresholds))
true_positives = np.zeros(len(thresholds))
false_positives = np.zeros(len(thresholds))
false_negatives = np.zeros(len(thresholds))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train, target_out)):
mini_batch_size = train_imgs.shape[0]
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
train_posteriors = F.softmax(target_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(target_net(out_imgs.detach()), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top = train_sort[:,0].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top = out_sort[:,0].clone().to(device)
#print(train_top.shape)
for j, t in enumerate(thresholds):
true_positives[j] += (train_top >= t).sum().item()
false_positives[j] += (out_top >= t).sum().item()
false_negatives[j] += (train_top < t).sum().item()
#print(train_top >= threshold)
#print((train_top >= threshold).sum().item(),',',(out_top >= threshold).sum().item())
correct[j] += (train_top >= t).sum().item()
correct[j] += (out_top < t).sum().item()
total[j] += train_top.size(0) + out_top.size(0)
#print(true_positives,',',false_positives,',',false_negatives)
for j, t in enumerate(thresholds):
accuracy = 100 * correct[j] / total[j]
precision = true_positives[j] / (true_positives[j] + false_positives[j]) if true_positives[j] + false_positives[j] != 0 else 0
recall = true_positives[j] / (true_positives[j] + false_negatives[j]) if true_positives[j] + false_negatives[j] !=0 else 0
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
print("threshold = %.4f, accuracy = %.2f, precision = %.2f, recall = %.2f" % (t, accuracy, precision, recall))
plt.plot(recalls, precisions)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
| 11,084 | 35.22549 | 134 | py |
cyphercat | cyphercat-master/Utils/SVC_Utils.py | import numpy as np
import matplotlib.pyplot as plt
import os
from sklearn import svm
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.externals import joblib
import torch
import torchvision
def load(dataloader):
"""Loads/flattens inputs and targets for use in SVM. Returns inputs and targets."""
for data in dataloader:
x,y=data
x=x.view(x.shape[0],-1)
return x,y
def hp_grid(n_components, C_range, gamma_range):
"""Creates and returns list of classifiers with grid of hyperparameters given by C_range and gamma_range."""
clfs=[]
pca=PCA(n_components=n_components)
scaling = MinMaxScaler(feature_range=(-1,1))
for i in C_range:
for j in gamma_range:
svc=svm.SVC(C=i, gamma=j)
clf=make_pipeline(pca, scaling, svc)
clfs.append(clf)
return clfs
def train_grid(clfs, inputs, targets):
"""Trains classifiers in a list; returns list of trained classifiers."""
fitted_clfs=[]
for i in range(len(clfs)):
x=clfs[i].fit(inputs, targets)
fitted_clfs.append(x)
print('Fitted: ', i+1, '/', len(clfs))
return fitted_clfs
def predict_eval(clf, inputs, targets, training=False):
"""Given a classifier and inputs, returns predictions and evaluated classifier accuracy."""
preds=clf.predict(inputs)
num_correct=torch.eq(torch.from_numpy(preds), targets).sum().item()
acc=(num_correct/len(targets))*100
if training:
print('C: ', clf.get_params(deep=True)['svc__C'], 'gamma: ', clf.get_params(deep=True)['svc__gamma'])
print('Training Accuracy: ', acc)
else:
print('Testing Accuracy: ', acc)
return preds, acc
def maxacc_gen(test_accs, train_accs, clfs):
"""Finds and returns model with highest test accuracy and model with train/test accuracy ratio closest to 1."""
test=np.array(test_accs)
train=np.array(train_accs)
maxacc=clfs[np.argmax(test)]
gen=clfs[np.argmin(train-test)]
return maxacc, gen
def save_proba(fn, pipe, inputs, targets):
"""Fits svm with probabilities and saves to disk."""
params=pipe.get_params(deep=True)
pca=PCA(n_components=180)
scaling = MinMaxScaler(feature_range=(-1,1))
pipe_prob=make_pipeline(pca, scaling, svm.SVC(C=params['svc__C'], gamma=params['svc__gamma'], probability=True))
pipe_prob.fit(inputs, targets)
joblib.dump(pipe_prob, fn)
def load_svm(directory, gen=True):
"""Returns loaded SVM saved with classification baselines.
'gen' : Model with train/test accuracy ratio closest to 1.
'maxacc' : Model with highest test accuracy."""
if gen:
clf='gen'
if not gen:
clf='maxacc'
dataset=directory.split('/')[-1]
path='SVM' + dataset + '_' + clf + '_proba.pkl'
svm=joblib.load(os.path.join(directory, path))
return svm
def class_acc(preds, targets, classes):
"Returns classifier accuracy for each class."
correct=0
class_correct=np.zeros(len(classes))
class_total=np.zeros(len(classes))
for j in range(len(targets)):
class_total[targets[j]]+=1
if np.argmax(preds[j])==targets[j]:
class_correct[targets[j]]+=1
correct+=1
class_accuracies=(class_correct/class_total)*100
accuracy=(correct/len(targets))*100
for i in range(len(class_accuracies)):
print('Accuracy of', classes[i], ': ', class_accuracies[i], '%')
print('Total Accuracy: ', accuracy, '%')
| 3,655 | 30.791304 | 116 | py |
cyphercat | cyphercat-master/Utils/data_downloaders.py | import os, shutil
import urllib.request
import zipfile
import tarfile
# For LibriSpeech download
import codecs
import fnmatch
import requests
import subprocess
import unicodedata
from tensorflow.python.platform import gfile
def get_tiny_imagenet(datasets_dir):
if os.path.isdir(os.path.join(datasets_dir,'tiny-imagenet-200/val/images/')):
os.rmdir(os.path.join(datasets_dir,'tiny-imagenet-200/val/images/'))
if os.path.isdir(os.path.join(datasets_dir,'tiny-imagenet-200')):
print('Tiny ImageNet already downloaded.')
return
if not os.path.isdir(datasets_dir):
os.makedirs(datasets_dir)
print('Downloading Tiny ImageNet')
url = 'http://cs231n.stanford.edu/tiny-imagenet-200.zip'
urllib.request.urlretrieve(url, os.path.join(datasets_dir,'tiny-imagenet-200.zip'))
z = zipfile.ZipFile(os.path.join(datasets_dir,'tiny-imagenet-200.zip'), 'r')
z.extractall(datasets_dir)
z.close()
train_dir = os.path.join(datasets_dir,'tiny-imagenet-200/train')
class_dirs = [os.path.join(train_dir, o) for o in os.listdir(train_dir) if os.path.isdir(os.path.join(train_dir,o))]
for c in class_dirs:
for f in os.listdir(os.path.join(c,'images')):
os.rename(os.path.join(c,'images',f), os.path.join(c,f))
for d in os.listdir(c):
if d.find("JPEG") == -1:
if os.path.isfile(os.path.join(c,d)):
os.remove(os.path.join(c,d))
elif os.path.isdir(os.path.join(c,d)):
os.rmdir(os.path.join(c,d))
with open(os.path.join(datasets_dir,'tiny-imagenet-200/val/val_annotations.txt')) as f:
content = f.readlines()
for x in content:
line = x.split()
if not os.path.exists(os.path.join(datasets_dir,'tiny-imagenet-200/val/',line[1])):
os.makedirs(os.path.join(datasets_dir,'tiny-imagenet-200/val/',line[1]))
new_file_name = os.path.join(datasets_dir,'tiny-imagenet-200/val',line[1],line[0])
old_file_name = os.path.join(datasets_dir,'tiny-imagenet-200/val/images',line[0])
os.rename(old_file_name, new_file_name)
print('Tiny ImageNet successfully downloaded and preprocessed.')
def get_lfw(datasets_dir):
if os.path.isdir(os.path.join(datasets_dir,'lfw')):
print('LFW already downloaded.')
return
if not os.path.isdir(datasets_dir):
os.makedirs(datasets_dir)
print('Downloading LFW.')
url = 'http://vis-www.cs.umass.edu/lfw/lfw.tgz'
urllib.request.urlretrieve(url, os.path.join(datasets_dir,'lfw.tgz'))
tar = tarfile.open(os.path.join(datasets_dir,'lfw.tgz'))
tar.extractall(path=os.path.join(datasets_dir,'lfw/'))
os.rename(os.path.join(datasets_dir,'lfw/lfw/'), os.path.join(datasets_dir,'lfw/lfw_original/'))
lfw_dir = os.path.join(datasets_dir,'lfw/lfw_original/')
people_dir = os.listdir(lfw_dir)
num_per_class = 20
new_dir = os.path.join(datasets_dir,'lfw/lfw_'+str(num_per_class))
if not os.path.isdir(new_dir):
os.makedirs(new_dir)
for p in people_dir:
imgs = os.listdir(os.path.join(lfw_dir,p))
if len(imgs) >= num_per_class:
shutil.copytree(os.path.join(lfw_dir,p),os.path.join(new_dir,p))
print('LFW successfully downloaded and preprocessed.')
def maybe_download(archive_name, target_dir, archive_url):
# this and below audio downloaders adapted from https://github.com/mozilla/DeepSpeech/blob/master/bin/import_librivox.py
#to run this: data_downloaders._download_and_preprocess_data('data/')
# If archive file does not exist, download it...
archive_path = os.path.join(target_dir, archive_name)
if not os.path.exists(target_dir):
print('No path "%s" - creating ...' % target_dir)
os.makedirs(target_dir)
if not os.path.exists(archive_path):
print('No archive "%s" - downloading...' % archive_path)
req = requests.get(archive_url, stream=True)
total_size = int(req.headers.get('content-length', 0))
done = 0
with open(archive_path, 'wb') as f:
for data in req.iter_content(1024*1024):
done += len(data)
f.write(data)
else:
print('Found archive "%s" - not downloading.' % archive_path)
return archive_path
def _download_and_preprocess_data(data_dir):
# Conditionally download data to data_dir
print("Downloading Librivox data set (55GB) into {} if not already present...".format(data_dir))
TRAIN_CLEAN_100_URL = "http://www.openslr.org/resources/12/train-clean-100.tar.gz"
TRAIN_CLEAN_360_URL = "http://www.openslr.org/resources/12/train-clean-360.tar.gz"
TRAIN_OTHER_500_URL = "http://www.openslr.org/resources/12/train-other-500.tar.gz"
DEV_CLEAN_URL = "http://www.openslr.org/resources/12/dev-clean.tar.gz"
DEV_OTHER_URL = "http://www.openslr.org/resources/12/dev-other.tar.gz"
TEST_CLEAN_URL = "http://www.openslr.org/resources/12/test-clean.tar.gz"
TEST_OTHER_URL = "http://www.openslr.org/resources/12/test-other.tar.gz"
def filename_of(x): return os.path.split(x)[1]
train_clean_100 = maybe_download(filename_of(TRAIN_CLEAN_100_URL), data_dir, TRAIN_CLEAN_100_URL)
train_clean_360 = maybe_download(filename_of(TRAIN_CLEAN_360_URL), data_dir, TRAIN_CLEAN_360_URL)
train_other_500 = maybe_download(filename_of(TRAIN_OTHER_500_URL), data_dir, TRAIN_OTHER_500_URL)
dev_clean = maybe_download(filename_of(DEV_CLEAN_URL), data_dir, DEV_CLEAN_URL)
dev_other = maybe_download(filename_of(DEV_OTHER_URL), data_dir, DEV_OTHER_URL)
test_clean = maybe_download(filename_of(TEST_CLEAN_URL), data_dir, TEST_CLEAN_URL)
test_other = maybe_download(filename_of(TEST_OTHER_URL), data_dir, TEST_OTHER_URL)
# Conditionally extract LibriSpeech data
# We extract each archive into data_dir, but test for existence in
# data_dir/LibriSpeech because the archives share that root.
print("Extracting librivox data if not already extracted...")
LIBRIVOX_DIR = "LibriSpeech"
work_dir = os.path.join(data_dir, LIBRIVOX_DIR)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "train-clean-100"), train_clean_100)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "train-clean-360"), train_clean_360)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "train-other-500"), train_other_500)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "dev-clean"), dev_clean)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "dev-other"), dev_other)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "test-clean"), test_clean)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "test-other"), test_other)
def _maybe_extract(data_dir, extracted_data, archive):
# If data_dir/extracted_data does not exist, extract archive in data_dir
if not gfile.Exists(os.path.join(data_dir, extracted_data)):
tar = tarfile.open(archive)
tar.extractall(data_dir)
tar.close()
def _convert_audio_and_split_sentences(extracted_dir, data_set, dest_dir):
source_dir = os.path.join(extracted_dir, data_set)
target_dir = os.path.join(extracted_dir, dest_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# Loop over transcription files and split each one
#
# The format for each file 1-2.trans.txt is:
# 1-2-0 transcription of 1-2-0.flac
# 1-2-1 transcription of 1-2-1.flac
# ...
#
# Each file is then split into several files:
# 1-2-0.txt (contains transcription of 1-2-0.flac)
# 1-2-1.txt (contains transcription of 1-2-1.flac)
# ...
#
files = []
for root, dirnames, filenames in os.walk(source_dir):
for filename in fnmatch.filter(filenames, '*.trans.txt'):
trans_filename = os.path.join(root, filename)
with codecs.open(trans_filename, "r", "utf-8") as fin:
for line in fin:
# Parse each segment line
first_space = line.find(" ")
seqid, transcript = line[:first_space], line[first_space+1:]
# We need to do the encode-decode dance here because encode
# returns a bytes() object on Python 3, and text_to_char_array
# expects a string.
transcript = unicodedata.normalize("NFKD", transcript) \
.encode("ascii", "ignore") \
.decode("ascii", "ignore")
transcript = transcript.lower().strip()
flac_file = os.path.join(root, seqid + ".flac")
flac_filesize = os.path.getsize(flac_file)
files.append((os.path.abspath(flac_file), flac_filesize, transcript))
return pd.DataFrame(data=files, columns=["flac_filename", "flac_filesize", "transcript"])
| 9,016 | 38.034632 | 124 | py |
cyphercat | cyphercat-master/Utils/train.py | import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from metrics import *
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train(net, data_loader, test_loader, optimizer, criterion, n_epochs, classes=None, verbose=False):
losses = []
for epoch in range(n_epochs):
net.train()
for i, batch in enumerate(data_loader):
imgs, labels = batch
imgs, labels = imgs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = net(imgs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
print("[%d/%d][%d/%d] loss = %f" % (epoch, n_epochs, i, len(data_loader), loss.item()))
# evaluate performance on testset at the end of each epoch
print("[%d/%d]" %(epoch, n_epochs))
print("Training:")
train_accuracy = eval_target_net(net, data_loader, classes=classes)
print("Test:")
test_accuracy = eval_target_net(net, test_loader, classes=classes)
#plt.plot(losses)
#plt.show()
return train_accuracy, test_accuracy
def train_attacker(attack_net, shadow, shadow_train, shadow_out, optimizer, criterion, n_epochs, k):
"""
Trains attack model (classifies a sample as in or out of training set) using
shadow model outputs (probabilities for sample class predictions).
The type of shadow model used can vary.
"""
in_predicts=[]
out_predicts=[]
losses = []
if type(shadow) is not Pipeline:
shadow_net=shadow
shadow_net.eval()
for epoch in range(n_epochs):
total = 0
correct = 0
#train_top = np.array([])
#train_top = []
train_top = np.empty((0,2))
out_top = np.empty((0,2))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(shadow_train, shadow_out)):
if train_imgs.shape[0] != out_imgs.shape[0]:
break
#######out_imgs = torch.randn(out_imgs.shape)
mini_batch_size = train_imgs.shape[0]
if type(shadow) is not Pipeline:
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
train_posteriors = F.softmax(shadow_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(shadow_net(out_imgs.detach()), dim=1)
else:
traininputs= train_imgs.view(train_imgs.shape[0],-1)
outinputs=out_imgs.view(out_imgs.shape[0], -1)
in_preds=shadow.predict_proba(traininputs)
train_posteriors=torch.from_numpy(in_preds).float()
#for p in in_preds:
# in_predicts.append(p.max())
out_preds=shadow.predict_proba(outinputs)
out_posteriors=torch.from_numpy(out_preds).float()
#for p in out_preds:
# out_predicts.append(p.max())
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:,:k].clone().to(device)
for p in train_top_k:
in_predicts.append((p.max()).item())
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:,:k].clone().to(device)
for p in out_top_k:
out_predicts.append((p.max()).item())
train_top = np.vstack((train_top,train_top_k[:,:2].cpu().detach().numpy()))
out_top = np.vstack((out_top, out_top_k[:,:2].cpu().detach().numpy()))
train_lbl = torch.ones(mini_batch_size).to(device)
out_lbl = torch.zeros(mini_batch_size).to(device)
optimizer.zero_grad()
train_predictions = torch.squeeze(attack_net(train_top_k))
out_predictions = torch.squeeze(attack_net(out_top_k))
loss_train = criterion(train_predictions, train_lbl)
loss_out = criterion(out_predictions, out_lbl)
loss = (loss_train + loss_out) / 2
if type(shadow) is not Pipeline:
loss.backward()
optimizer.step()
correct += (F.sigmoid(train_predictions)>=0.5).sum().item()
correct += (F.sigmoid(out_predictions)<0.5).sum().item()
total += train_predictions.size(0) + out_predictions.size(0)
print("[%d/%d][%d/%d] loss = %.2f, accuracy = %.2f" % (epoch, n_epochs, i, len(shadow_train), loss.item(), 100 * correct / total))
#Plot distributions for target predictions in training set and out of training set
"""
fig, ax = plt.subplots(2,1)
plt.subplot(2,1,1)
plt.hist(in_predicts, bins='auto')
plt.title('In')
plt.subplot(2,1,2)
plt.hist(out_predicts, bins='auto')
plt.title('Out')
"""
'''
plt.scatter(out_top.T[0,:], out_top.T[1,:], c='b')
plt.scatter(train_top.T[0,:], train_top.T[1,:], c='r')
plt.show()
'''
class softCrossEntropy(torch.nn.Module):
def __init__(self, alpha=0.95):
"""
:param alpha: Strength (0-1) of influence from soft labels in training
"""
super(softCrossEntropy, self).__init__()
self.alpha = alpha
return
def forward(self, inputs, target, true_labels):
"""
:param inputs: predictions
:param target: target (soft) labels
:param true_labels: true (hard) labels
:return: loss
"""
KD_loss = self.alpha*torch.nn.KLDivLoss(size_average=False)(F.log_softmax(inputs, dim=1),
F.softmax(target, dim=1))
+ (1-self.alpha)*F.cross_entropy(inputs, true_labels)
return KD_loss
def distill_training(teacher, learner, data_loader, test_loader, optimizer,
criterion, n_epochs, verbose=False):
"""
:param teacher: network to provide soft labels in training
:param learner: network to distill knowledge into
:param data_loader: data loader for training data set
:param test_loaderL data loader for validation data
:param optimizer: optimizer for training
:param criterion: objective function, should allow for soft labels.
We suggest softCrossEntropy
:param n_epochs: epochs for training
:param verbose: verbose == True will print loss at each batch
:return: None, teacher model is trained in place
"""
losses = []
for epoch in range(n_epochs):
teacher.eval()
learner.train()
for i, batch in enumerate(data_loader):
with torch.set_grad_enabled(False):
imgs, labels = batch
imgs, labels = imgs.to(device), labels.to(device)
soft_lables = teacher(imgs)
with torch.set_grad_enabled(True):
optimizer.zero_grad()
outputs = learner(imgs)
loss = criterion(outputs, soft_lables, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
print("[%d/%d][%d/%d] loss = %f" % (epoch, n_epochs, i,
len(data_loader),
loss.item()))
# evaluate performance on testset at the end of each epoch
print("[%d/%d]" %(epoch, n_epochs))
print("Training:")
eval_target_net(learner, data_loader, classes=None)
print("Test:")
eval_target_net(learner, test_loader, classes=None)
# plt.plot(losses)
# plt.show()
| 8,087 | 35.432432 | 142 | py |
cyphercat | cyphercat-master/cyphercat/definitions.py | import os
# Local directory of CypherCat API
CYCAT_DIR = os.path.dirname(os.path.abspath(__file__))
# Local directory containing entire repo
REPO_DIR = os.path.split(CYCAT_DIR)[0]
# Local directory for datasets
DATASETS_DIR = os.path.join(REPO_DIR, 'Datasets')
# Local directory for datasets
DATASPLITS_DIR = os.path.join(DATASETS_DIR, 'splits')
| 350 | 24.071429 | 54 | py |
cyphercat | cyphercat-master/cyphercat/__version__.py |
__version__ = '0.1.dev0'
| 26 | 8 | 24 | py |
cyphercat | cyphercat-master/cyphercat/defenses.py | import copy
# Torch imports
import torch
import torch.optim as optim
# Local imports
from .train import train, softCrossEntropy
from .metrics import eval_target_model
def transfer_learn(model=None, data_loader=None, test_loader=None,
optimizer=None, criterion=None, lr=0, n_epochs=0,
unfreeze_layers=None, fine_tune=True, verbose=False):
"""Routine to perform transfer learning given a model and a new dataset.
Args:
model (nn.Module): Pretrained model.
data_loader (Dataloader): Dataloader pointing to training dataset.
test_loader (Dataloader): Dataloader poinitng to validation dataset.
optimizer (config): This remains to be implemented
criterion (nn.Module): Criterion for loss calculation.
lr (float): Learning rate for training.
n_epochs (int): Maximum number of epochs during training of last
layers and fine-tunning step.
unfreze_layers ((int, int)): Tuple with indices (first, last) of
layers to unfreeze during first stage of training.
fine_tune (bool): If true will do a second stage of training with all
of the layers unfrozen and 1/10th of original learning rate.
verbose (bool): If True will print loss at each training step.
Returns:
Todos:
Implement generalized optimizer, loaded from configuration. Currentl
hardcoded to SGD.
"""
unfrozen = []
param_list = list()
for idx, mod in enumerate(model._modules.items()):
if unfreeze_layers[0] <= idx <= unfreeze_layers[1]:
param_list += list(mod[1].parameters())
unfrozen.append(mod[0])
for param in mod[1].parameters():
param.requires_grad = True
else:
for param in mod[1].parameters():
param.requires_grad = False
print('Training parameters in modules:')
for x in unfrozen:
print('\t %s' % x)
optimizer = optim.SGD(params=param_list, lr=lr, momentum=0.9)
train(model=model, data_loader=data_loader, test_loader=test_loader,
optimizer=optimizer, criterion=criterion, n_epochs=n_epochs,
verbose=verbose)
print('Finished training last layers, performance: \n'
'Training: %lf \nTest: %lf' % (
eval_target_model(model=model, data_loader=data_loader),
eval_target_model(model=model, data_loader=test_loader)))
if fine_tune is False:
return
print('Moving on to fine tunning entire network')
for param in model.parameters():
param.requires_grad = True
optimizer = optim.SGD(params=model.parameters(), lr=lr/10., momentum=0.9)
train(model=model, data_loader=data_loader, test_loader=test_loader,
optimizer=optimizer, criterion=criterion, n_epochs=n_epochs,
verbose=verbose)
print('Finished training last layers, performance: \n'
'Training: %lf \nTest: %lf' % (
eval_target_model(model=model, data_loader=data_loader),
eval_target_model(model=model, data_loader=test_loader)))
return
class dimensionality_reduction():
"""Returns a wrapped model that will return only the top-n_dim most
probable classes during inference.
"""
def __init__(self, model=None, n_top=1, break_posterior=False):
""" Initializes the wrapped model.
Args:
model (nn.Module): Original, trained model to defend.
n_dim (int): New dimensionality, i.e. the number of top ranked
labels to return.
break_posterior (bool): If true, will return fixed posterior
values instead model calculated values.
Returns:
"""
self.model = copy.deepcopy(model)
self.n_top = n_top
self.in_eval = False
self.break_posterior = break_posterior
def __call__(self, x):
"""Calls the model on input x and returns the reduced (n_top) output
Args:
x (torch.tensor): Same as any model input
Returns:
(torch.tensor): Returns (n_top,) dimensional torch.tensor with
scores on top classes.
"""
output = self.model(x)
if self.in_eval is False:
return output
reduced = torch.zeros(output.shape)
arr = output.detach().cpu().numpy()
to_del = arr.argsort(axis=1)[:, -self.n_top:]
for idx, img in enumerate(to_del):
for idy, label in enumerate(img[::-1]):
if self.break_posterior:
reduced[idx][label] = 1./(idy+1)
else:
reduced[idx][label] = output[idx][label]
return reduced
def eval(self):
"""Sets the model and wrapper to eval mode
"""
self.in_eval = True
self.model.eval()
def train(self):
"""Sets the model and wrapped to train mode
"""
self.in_eval = False
self.model.train()
def distill_model(teacher=None, student=None, data_loader=None,
test_loader=None, optimizer=None,
criterion=softCrossEntropy(), n_epochs=0, T=1.,
verbose=False):
"""Performs defensive distillation at desired temperature
Args:
teacher (nn.Module): Teacher model used to in distillation.
student (nn.Module): Student model into which to distill. If left as
None will copy and randomly initialize the teacher.
data_loader (Dataloader): Dataloader pointing to training dataset.
test_loader (Dataloader): Dataloader poinitng to validation dataset.
optimizer (nn.optim): Optimizer for distillation.
criterion (nn.Module): Criterion for loss calculation. Default is
softCrossEntropy(alpha = 0.95)
n_epochs (int): Maximum number of epochs during distillation.
T (int): Distillation temperature. Assumes the teacher was trained at
the same temperature.
verbose (bool): If True will output loss at each training step.
"""
| 6,149 | 37.198758 | 78 | py |
cyphercat | cyphercat-master/cyphercat/models.py | import os
import torch
import numpy as np
from torch import nn
import torch.nn.functional as fcnal
def new_size_conv(size, kernel, stride=1, padding=0):
"""Calculates the output size of a convolutional layer
Args:
size (int): Size of input (assumed square).
kernel (int): kernel size for convolution (assumed square).
stride (int): Convolution stride.
padding (int): Padding used in convolution.
Returns:
(int): Returns the output size of a theoritical convolution.
"""
return np.floor((size + 2*padding - (kernel - 1)-1)/stride + 1)
def new_size_max_pool(size, kernel, stride=None, padding=0):
"""Calculates the output size of a maxpool operation.
Args:
size (int): Input size (assumed square).
kernel (int): Maxpool kernel size (assumed square).
stride (int): Maxpool stride.
padding (int): Maxpool padding.
Returns:
(int): Returns the output size of a theoritical maxpool layer.
"""
if stride is None:
stride = kernel
return np.floor((size + 2*padding - (kernel - 1)-1)/stride + 1)
class AlexNet(nn.Module):
def __init__(self, n_in=3, n_classes=10, n_filters=64, size=32):
super(AlexNet, self).__init__()
n_h1 = 3 * n_filters
n_h2 = 2 * n_h1
self.features = nn.Sequential(
nn.Conv2d(n_in, n_filters, kernel_size=6, stride=3, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(n_filters, n_h1, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(n_h1, n_h2, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(n_h2, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
out_feat_size = self.calc_alexnet_size(size)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * out_feat_size * out_feat_size, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, n_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def calc_alexnet_size(self, size):
x = new_size_conv(size, 6, 3, 2)
x = new_size_max_pool(x, 3, 2)
x = new_size_conv(x, 5, 1, 2)
x = new_size_max_pool(x, 3, 2)
x = new_size_conv(x, 3, 1, 1)
x = new_size_conv(x, 3, 1, 1)
x = new_size_conv(x, 3, 1, 1)
out = new_size_max_pool(x, 2, 2)
return out
class tiny_cnn(nn.Module):
def __init__(self, n_in=3, n_classes=10, n_filters=64, size=64):
super(tiny_cnn, self).__init__()
self.size = size
self.n_filters = n_filters
self.conv_block_1 = nn.Sequential(
nn.Conv2d(n_in, n_filters, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(n_filters),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_filters, 2*n_filters, kernel_size=5, stride=1,
padding=2),
nn.BatchNorm2d(2*n_filters),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc = nn.Linear(2*n_filters * (self.size//4) * (self.size//4),
2*n_filters)
self.output = nn.Linear(2*n_filters, n_classes)
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(x.size(0), -1)
# x = x.view(-1, 2*self.n_filters * (self.size//4) * (self.size//4))
x = self.fc(x)
out = self.output(x)
return out
class mlleaks_cnn(nn.Module):
def __init__(self, n_in=3, n_classes=10, n_filters=64, size=128):
super(mlleaks_cnn, self).__init__()
self.n_filters = n_filters
self.conv_block_1 = nn.Sequential(
nn.Conv2d(n_in, n_filters, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(n_filters),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_filters, 2*n_filters, kernel_size=5, stride=1,
padding=2),
nn.BatchNorm2d(2*n_filters),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc = nn.Linear(2*n_filters * 8 * 8, size)
self.output = nn.Linear(2*n_filters, n_classes)
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(-1, 2*self.n_filters * 8 * 8)
x = self.fc(x)
out = self.output(x)
return out
class mlleaks_mlp(nn.Module):
def __init__(self, n_in=3, n_classes=1, n_filters=64, size=64):
super(mlleaks_mlp, self).__init__()
self.hidden = nn.Linear(n_in, n_filters)
# self.bn = nn.BatchNorm1d(n_filters)
self.output = nn.Linear(n_filters, n_classes)
def forward(self, x):
x = fcnal.sigmoid(self.hidden(x))
# x = self.bn(x)
out = self.output(x)
out = fcnal.sigmoid(self.output(x))
return out
class cnn(nn.Module):
def __init__(self, n_in, n_classes, n_filters, size):
super(cnn, self).__init__()
self.n_filters = n_filters
self.conv_block_1 = nn.Sequential(
nn.Conv2d(n_in, n_filters, kernel_size=3, padding=1),
nn.BatchNorm2d(n_filters),
nn.ReLU(inplace=True),
nn.MaxPool2d(2)
)
# shape = [Batch_size, n_filters, height/2, width/2]
self.conv_block_2 = nn.Sequential(
nn.Conv2d(n_filters, n_filters*2, kernel_size=3, padding=1),
nn.BatchNorm2d(n_filters*2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2)
)
# shape = [Batch_size, n_filters*2, height/4, width/4]
self.dense_block_1 = nn.Sequential(
# nn.Linear(n_filters * 2 * 8 * 8, 64),
nn.Linear(n_filters*2 * 8 * 8, 128),
# nn.BatchNorm1d(64),
# nn.ReLU(inplace=True)
)
# shape = [Batch_size, 64]
self.dense_block_2 = nn.Sequential(
nn.Linear(64, 32),
nn.BatchNorm1d(32),
nn.ReLU(inplace=True)
)
# shape = [Batch_size, 32]
self.dense_block_3 = nn.Sequential(
nn.Linear(32, n_classes),
nn.BatchNorm1d(n_classes)
)
# shape = [Batch_size, 10]
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = x.view(-1, self.n_filters*2 * 8 * 8)
x = self.dense_block_1(x)
x = self.dense_block_2(x)
out = self.dense_block_3(x)
return out
class mlp(nn.Module):
def __init__(self, n_in, n_classes, n_filters, size):
super(mlp, self).__init__()
self.n_filters = n_filters
# shape = [Batch_size, k (top k posteriors)]
self.dense_block_1 = nn.Sequential(
nn.Linear(n_in, n_filters*2),
# nn.BatchNorm1d(n_filters*2),
nn.ReLU(inplace=True)
)
# shape = [Batch_size, n_filters*2]
self.dense_block_2 = nn.Sequential(
nn.Linear(n_filters*2, n_filters*2),
# nn.BatchNorm1d(n_filters*2),
nn.ReLU(inplace=True)
)
# shape = [Batch_size, 32]
self.dense_block_3 = nn.Sequential(
nn.Linear(n_filters*2, n_classes),
# nn.BatchNorm1d(n_classes),
nn.Sigmoid()
)
# shape = [Batch_size, 10]
def forward(self, x):
x = self.dense_block_1(x)
x = self.dense_block_2(x)
out = self.dense_block_3(x)
return out
class audio_cnn_block(nn.Module):
'''
1D convolution block used to build audio cnn classifiers
Args:
input: input channels
output: output channels
kernel_size: convolution kernel size
'''
def __init__(self, n_input, n_out, kernel_size):
super(audio_cnn_block, self).__init__()
self.cnn_block = nn.Sequential(
nn.Conv1d(n_input, n_out, kernel_size, padding=1),
nn.BatchNorm1d(n_out),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=4)
)
def forward(self, x):
return self.cnn_block(x)
class audio_tiny_cnn(nn.Module):
'''
Template for convolutional audio classifiers.
'''
def __init__(self, cnn_sizes, n_hidden, kernel_size, n_classes):
'''
Init
Args:
cnn_sizes: List of sizes for the convolution blocks
n_hidden: number of hidden units in the first fully connected layer
kernel_size: convolution kernel size
n_classes: number of speakers to classify
'''
super(audio_tiny_cnn, self).__init__()
self.down_path = nn.ModuleList()
self.down_path.append(audio_cnn_block(cnn_sizes[0], cnn_sizes[1],
kernel_size,))
self.down_path.append(audio_cnn_block(cnn_sizes[1], cnn_sizes[2],
kernel_size,))
self.down_path.append(audio_cnn_block(cnn_sizes[2], cnn_sizes[3],
kernel_size,))
self.fc = nn.Sequential(
nn.Linear(cnn_sizes[4], n_hidden),
nn.ReLU()
)
self.out = nn.Linear(n_hidden, n_classes)
def forward(self, x):
for down in self.down_path:
x = down(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return self.out(x)
def MFCC_cnn_classifier(n_classes=125):
'''
Builds speaker classifier that ingests MFCC's
'''
in_size = 20
n_hidden = 512
sizes_list = [in_size, 2*in_size, 4*in_size, 8*in_size, 8*in_size]
return audio_tiny_cnn(cnn_sizes=sizes_list, n_hidden=n_hidden,
kernel_size=3, n_classes=n_classes)
def ft_cnn_classifer(n_classes=125):
'''
Builds speaker classifier that ingests the abs value of fourier transforms
'''
in_size = 94
n_hidden = 512
sizes_list = [in_size, in_size, 2*in_size, 4*in_size, 14*4*in_size]
return audio_tiny_cnn(cnn_sizes=sizes_list, n_hidden=n_hidden,
kernel_size=7, n_classes=n_classes)
def weights_init(m):
"""
Initializes weights of layers of model m
"""
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias, 0)
# Dictionary for access of models defined above
PREDEF_MODELS = {"alexnet" : AlexNet,
"cnn" : cnn,
"tiny_cnn" : tiny_cnn,
"mlleaks_cnn" : mlleaks_cnn,
"mlp" : mlp,
"mlleaks_mlp" : mlleaks_mlp}
def get_predef_model(name=""):
"""
Convenience function for retreiving predefined model arch
Parameters
----------
name : {'alexnet', 'cnn', 'tiny_cnn', 'mlleaks_cnn', 'mlp', 'mlleaks_mlp'}
Name of model
Returns
-------
model : Model
Predefined model arch
"""
name = name.lower()
if name in PREDEF_MODELS:
model = PREDEF_MODELS[name]
return model
else:
raise ValueError('Invalid predefined model, {}, requested.'
' Must be in {}'.format(name, PREDEF_MODELS.keys()))
def save_checkpoint(model=None, optimizer=None, epoch=None,
data_descriptor=None, loss=None, accuracy=None, path='./',
filename='checkpoint', ext='.pth.tar'):
"""Saves model and optimizer state to a desired checkpoint file.
Args:
model (nn.Module): Model to save.
optimizer (nn.optim): Optimizer used to train the model.
epoch (int): Training epoch of current model and optimizer state.
data_descriptor (str): Description of the data used to train the model.
loss (int): Model loss at last training step.
accuracy (list(int)): List of model training, validation... accuracy.
path (str): Path to desired directory for checkpoint.
filename (str): Checkpoint name.
ext (str): Extension for checkpoint file (suggested 'pth.tar' or 'pth')
Returns:
"""
state = {
'epoch': epoch,
'arch': str(model.type),
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'loss': loss,
'accuracy': accuracy,
'dataset': data_descriptor
}
torch.save(state, path+filename+ext)
def load_checkpoint(model=None, optimizer=None, checkpoint=None):
"""Loads a checkpoint into a model saved using save_chekcpoint function.
Args:
model (nn.Module): Model into which to load the weights (should match
the saved arquitecture).
optimizer (nn.optim): Optimizer into which to load the saved optimizer
state.
checkpoint (str): Path to the checkpoint file.
Returns:
(dict): Returns the loaded dicitonary
"""
assert os.path.isfile(checkpoint), 'Checkpoint not found, aborting load'
chpt = torch.load(checkpoint)
assert str(model.type) == chpt['arch'], 'Model arquitecture mismatch,\
aborting load'
model.load_state_dict(chpt['state_dict'])
if optimizer is not None:
optimizer.load_state_dict['optimizer']
print('Succesfully loaded checkpoint \nDataset: {} \nEpoch: {} \nLoss: {}\
\nAccuracy: {}'.format(chpt['dataset'], chpt['epoch'], chpt['loss'],
chpt['accuracy']))
return chpt
| 14,856 | 31.509847 | 79 | py |
cyphercat | cyphercat-master/cyphercat/metrics.py | import torch
import torch.nn.functional as fcnal
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def eval_target_model(model=None, data_loader=None, classes=None):
"""
Function to evaluate a target model provided
specified data sets.
Parameters
----------
model : Module
PyTorch conforming nn.Module function
data_loader : DataLoader
PyTorch dataloader function
classes : list
list of classes
Returns
-------
accuracy : float
accuracy of target model
"""
if classes is not None:
n_classes = len(classes)
class_correct = np.zeros(n_classes)
class_total = np.zeros(n_classes)
total = 0
correct = 0
with torch.no_grad():
model.eval()
for i, (imgs, lbls) in enumerate(data_loader):
imgs, lbls = imgs.to(device), lbls.to(device)
output = model(imgs)
predicted = output.argmax(dim=1)
total += imgs.size(0)
correct += predicted.eq(lbls).sum().item()
if classes is not None:
for prediction, lbl in zip(predicted, lbls):
class_correct[lbl] += prediction == lbl
class_total[lbl] += 1
accuracy = 100*(correct/total)
if classes is not None:
for i in range(len(classes)):
print('Accuracy of {} : {:.2f} %%'
.format(classes[i],
100 * class_correct[i] / class_total[i]))
print("\nAccuracy = {:.2f} %%\n\n".format(accuracy))
return accuracy
def eval_attack_model(attack_model=None, target=None,
target_train=None, target_out=None, k=0, verbose=False):
"""
Assess accuracy, precision, and recall of attack model
for in training set/out of training set classification.
Edited for use with SVCs.
Parameters
----------
attack_model : Module
PyTorch conforming nn.Module function
target : Module
PyTorch conforming nn.Module function
target_train : DataLoader
PyTorch dataloader function
target_out : DataLoader
PyTorch dataloader function
k : int
Value at which to end using train data list
"""
in_predicts = []
out_predicts = []
if type(target) is not Pipeline:
target_model = target
target_model.eval()
attack_model.eval()
precisions = []
recalls = []
accuracies = []
# For threshold in np.arange(0.5, 1, 0.005):
thresholds = np.arange(0.5, 1, 0.005)
total = np.zeros(len(thresholds))
correct = np.zeros(len(thresholds))
true_positives = np.zeros(len(thresholds))
false_positives = np.zeros(len(thresholds))
false_negatives = np.zeros(len(thresholds))
train_top = np.empty((0, 2))
out_top = np.empty((0, 2))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train,
target_out)):
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
# [mini_batch_size x num_classes] tensors,
# (0,1) probabilities for each class for each sample)
if type(target) is Pipeline:
traininputs = train_imgs.view(train_imgs.shape[0], -1)
outinputs = out_imgs.view(out_imgs.shape[0], -1)
train_posteriors = torch.from_numpy(
target.predict_proba(traininputs)).float()
out_posteriors = torch.from_numpy(
target.predict_proba(outinputs)).float()
else:
train_posteriors = fcnal.softmax(target_model(
train_imgs.detach()), dim=1)
out_posteriors = fcnal.softmax(target_model(
out_imgs.detach()), dim=1)
# [k x mini_batch_size] tensors,
# (0,1) probabilities for top k probable classes
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:, :k].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:, :k].clone().to(device)
# Collects probabilities for predicted class.
for p in train_top_k:
in_predicts.append((p.max()).item())
for p in out_top_k:
out_predicts.append((p.max()).item())
if type(target) is not Pipeline:
train_top = np.vstack((train_top,
train_top_k[:, :2].cpu().detach().numpy()))
out_top = np.vstack((out_top,
out_top_k[:, :2].cpu().detach().numpy()))
# Takes in probabilities for top k most likely classes,
# outputs ~1 (in training set) or ~0 (out of training set)
train_predictions = torch.squeeze(attack_model(train_top_k))
out_predictions = torch.squeeze(attack_model(out_top_k))
for j, t in enumerate(thresholds):
true_positives[j] += (train_predictions >= t).sum().item()
false_positives[j] += (out_predictions >= t).sum().item()
false_negatives[j] += (train_predictions < t).sum().item()
correct[j] += (train_predictions >= t).sum().item()
correct[j] += (out_predictions < t).sum().item()
total[j] += train_predictions.size(0) + out_predictions.size(0)
for j, t in enumerate(thresholds):
accuracy = 100 * correct[j] / total[j]
precision = (true_positives[j] / (true_positives[j] +
false_positives[j])
if true_positives[j] + false_positives[j] != 0 else 0)
recall = (true_positives[j] / (true_positives[j] + false_negatives[j])
if true_positives[j] + false_negatives[j] != 0 else 0)
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
if verbose:
print("threshold = %.4f, acc. = %.2f, precision = %.2f, \
recall = %.2f" % (t, accuracy, precision, recall))
# Make a dataframe of precision & recall results
data = np.transpose([thresholds, accuracies, precisions, recalls])
df_pr = pd.DataFrame(columns=['Thresholds', 'Accuracy', 'Precision',
'Recall'], data=data)
return df_pr
def eval_membership_inference(target_model=None,
target_train=None, target_out=None):
"""
Function to evaluate a target model for
membership inference.
Parameters
----------
target_model : Module
PyTorch conforming nn.Module function
target_train : DataLoader
PyTorch dataloader function
target_out : DataLoader
PyTorch dataloader function
"""
target_model.eval()
precisions = []
recalls = []
accuracies = []
thresholds = np.arange(0.5, 1, 0.005)
total = np.zeros(len(thresholds))
correct = np.zeros(len(thresholds))
true_positives = np.zeros(len(thresholds))
false_positives = np.zeros(len(thresholds))
false_negatives = np.zeros(len(thresholds))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train,
target_out)):
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
train_posteriors = fcnal.softmax(
target_model(train_imgs.detach()), dim=1)
out_posteriors = fcnal.softmax(
target_model(out_imgs.detach()), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top = train_sort[:, 0].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top = out_sort[:, 0].clone().to(device)
for j, t in enumerate(thresholds):
true_positives[j] += (train_top >= t).sum().item()
false_positives[j] += (out_top >= t).sum().item()
false_negatives[j] += (train_top < t).sum().item()
correct[j] += (train_top >= t).sum().item()
correct[j] += (out_top < t).sum().item()
total[j] += train_top.size(0) + out_top.size(0)
for j, t in enumerate(thresholds):
accuracy = 100 * correct[j] / total[j]
precision = 0
if true_positives[j] + false_positives[j] != 0:
precision = (true_positives[j] / (true_positives[j]
+ false_positives[j]))
recall = 0
if true_positives[j] + false_negatives[j] != 0:
recall = (true_positives[j] / (true_positives[j]
+ false_negatives[j]))
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
print("threshold = {:.4f}, accuracy = {:.2f},"
"precision = {:.2f}, recall = {:.2f}"
.format(t, accuracy, precision, recall))
| 9,273 | 33.221402 | 78 | py |
cyphercat | cyphercat-master/cyphercat/load_data.py | import os
from .utils.file_utils import downloader, unpacker
def prep_data(data_struct=None):
"""
Function to prepare data set
based on input configuration
Parameters
----------
data_struct : config structure
parameters from 'data' field
of global yaml configuration file
"""
data_name = data_struct.name
datasets_dir = data_struct.data_path
out_dir = data_struct.save_path
# If dataset already downloaded an unpacked, do nothing
if os.path.isdir(out_dir):
print('{} already downloaded, unpacked and processed.'
.format(data_name))
return
# Download and unpack any required dataset files
url_list = data_struct.url
for data_url in url_list:
# Check if download is required
compressed_file_name = downloader(datasets_dir, data_url)
# Unpack compressed dataset file
unpacker(compressed_file_name, out_dir)
# OBSOLETE, KEEP FOR TILL TINYIMAGENET DATASET INCLUDED
# def custom_preprocessor(out_dir=''):
# """
# Custom preprocessing functions for
# specific data sets.
#
# Parameters
# ----------
# out_dir : string
# directory of unpacked data set
# """
#
# # Get name of data set from output directory
# data_name = os.path.split(out_dir)[1]
#
# # For tiny-imagenet-200
# if 'tiny' in data_name.lower():
#
# # Structure the training, validation, and test data directories
# train_dir = os.path.join(out_dir, 'train')
# class_dirs = [os.path.join(train_dir, o) for
# o in os.listdir(train_dir)
# if os.path.isdir(os.path.join(train_dir, o))]
# NOTE: ABOVE NEEDS TO BE FIXED IF THIS CODE IS USED (TOO LONG)
#
# for c in class_dirs:
# for f in os.listdir(os.path.join(c, 'images')):
# os.rename(os.path.join(c, 'images', f), os.path.join(c, f))
# for d in os.listdir(c):
# if d.find("JPEG") == -1:
# if os.path.isfile(os.path.join(c, d)):
# os.remove(os.path.join(c, d))
# elif os.path.isdir(os.path.join(c, d)):
# os.rmdir(os.path.join(c, d))
#
# # Get validation annotations
# with open(os.path.join(out_dir, 'val/val_annotations.txt')) as f:
# content = f.readlines()
#
# for x in content:
# line = x.split()
#
# if not os.path.exists(os.path.join(out_dir, 'val/', line[1])):
# os.makedirs(os.path.join(out_dir, 'val/', line[1]))
#
# new_file_name = os.path.join(out_dir, 'val', line[1], line[0])
# old_file_name = os.path.join(out_dir, 'val/images', line[0])
# os.rename(old_file_name, new_file_name)
#
# print('{} successfully downloaded and preprocessed.'.format(data_name))
| 2,903 | 32.37931 | 76 | py |
cyphercat | cyphercat-master/cyphercat/__init__.py | # __init__.py
from .__version__ import __version__
from .utils import *
from .train import *
from .models import *
from .metrics import *
from .attacks import *
from .datadefs import *
from .defenses import *
from .load_data import *
from .definitions import *
from .datadefs.libri_dataset import *
from .datadefs.voices_dataset import * | 339 | 21.666667 | 38 | py |
cyphercat | cyphercat-master/cyphercat/train.py |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as fcnal
from sklearn.pipeline import Pipeline
from .metrics import eval_target_model
# Determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def label_to_onehot(labels, num_classes=10):
""" Converts label into a vector.
Args:
labels (int): Class label to convert to tensor.
num_classes (int): Number of classes for the model.
Returns:
(torch.tensor): Torch tensor with 0's everywhere except for 1 in
correct class.
"""
one_hot = torch.eye(num_classes)
return one_hot[labels.long()]
def train(model=None, data_loader=None, test_loader=None,
optimizer=None, criterion=None, n_epochs=0,
classes=None, verbose=False):
"""
Function to train a model provided
specified train/test sets and associated
training parameters.
Parameters
----------
model : Module
PyTorch conforming nn.Module function
data_loader : DataLoader
PyTorch dataloader function
test_loader : DataLoader
PyTorch dataloader function
optimizer : opt object
PyTorch conforming optimizer function
criterion : loss object
PyTorch conforming loss function
n_epochs : int
number of training epochs
classes : list
list of classes
verbose : boolean
flag for verbose print statements
"""
losses = []
for epoch in range(n_epochs):
model.train()
for i, batch in enumerate(data_loader):
data, labels = batch
data, labels = data.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(data)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
print("[{}/{}][{}/{}] loss = {}"
.format(epoch, n_epochs, i,
len(data_loader), loss.item()))
# evaluate performance on testset at the end of each epoch
print("[{}/{}]".format(epoch, n_epochs))
print("Training:")
train_acc = eval_target_model(model, data_loader, classes=classes)
print("Test:")
test_acc = eval_target_model(model, test_loader, classes=classes)
# plt.plot(losses)
# plt.show()
return train_acc, test_acc
def train_attacker(attack_model=None, shadow_model=None,
shadow_train=None, shadow_out=None,
optimizer=None, criterion=None, n_epochs=0, k=0,
verbose=False):
"""
Trains attack model (classifies a sample as in or
out of training set) using shadow model outputs
(probabilities for sample class predictions).
The type of shadow model used can vary.
Parameters
----------
attack_model : Module
PyTorch conforming nn.Module function
shadow_model : Module
PyTorch conforming nn.Module function
shadow_train : DataLoader
PyTorch dataloader function
shadow_out : DataLoader
PyTorch dataloader function
optimizer : opt object
PyTorch conforming optimizer function
criterion : loss object
PyTorch conforming loss function
n_epochs : int
number of training epochs
k : int
Value at which to end using train data list
"""
in_predicts = []
out_predicts = []
if type(shadow_model) is not Pipeline:
shadow_model = shadow_model
shadow_model.eval()
for epoch in range(n_epochs):
total = 0
correct = 0
train_top = np.empty((0, 2))
out_top = np.empty((0, 2))
for i, ((train_data, train_lbls),
(out_data, out_lbls)) in enumerate(zip(shadow_train,
shadow_out)):
# out_data = torch.randn(out_data.shape)
mini_batch_size = train_data.shape[0]
out_mini_batch_size = out_data.shape[0]
if mini_batch_size != out_mini_batch_size:
continue
'''if mini_batch_size != out_mini_batch_size:
break'''
if type(shadow_model) is not Pipeline:
train_data = train_data.to(device).detach()
out_data = out_data.to(device).detach()
train_posteriors = fcnal.softmax(shadow_model(train_data),
dim=1)
out_posteriors = fcnal.softmax(shadow_model(out_data),
dim=1)
else:
traininputs = train_data.view(train_data.shape[0], -1)
outinputs = out_data.view(out_data.shape[0], -1)
in_preds = shadow_model.predict_proba(traininputs)
train_posteriors = torch.from_numpy(in_preds).float()
# for p in in_preds:
# in_predicts.append(p.max())
out_preds = shadow_model.predict_proba(outinputs)
out_posteriors = torch.from_numpy(out_preds).float()
# for p in out_preds:
# out_predicts.append(p.max())
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:, :k].clone().to(device)
for p in train_top_k:
in_predicts.append((p.max()).item())
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:, :k].clone().to(device)
for p in out_top_k:
out_predicts.append((p.max()).item())
train_top = np.vstack((train_top,
train_top_k[:, :2].cpu().detach().numpy()))
out_top = np.vstack((out_top,
out_top_k[:, :2].cpu().detach().numpy()))
train_lbl = torch.ones(mini_batch_size).to(device)
out_lbl = torch.zeros(out_mini_batch_size).to(device)
optimizer.zero_grad()
train_predictions = torch.squeeze(attack_model(train_top_k))
out_predictions = torch.squeeze(attack_model(out_top_k))
loss_train = criterion(train_predictions, train_lbl)
loss_out = criterion(out_predictions, out_lbl)
loss = (loss_train + loss_out) / 2
if type(shadow_model) is not Pipeline:
loss.backward()
optimizer.step()
correct += (train_predictions >= 0.5).sum().item()
correct += (out_predictions < 0.5).sum().item()
total += train_predictions.size(0) + out_predictions.size(0)
if verbose:
print("[{}/{}][{}/{}] loss = {:.2f}, accuracy = {:.2f}"
.format(epoch, n_epochs, i, len(shadow_train),
loss.item(), 100 * correct / total))
# Plot distributions for target predictions
# in training set and out of training set
"""
fig, ax = plt.subplots(2,1)
plt.subplot(2,1,1)
plt.hist(in_predicts, bins='auto')
plt.title('In')
plt.subplot(2,1,2)
plt.hist(out_predicts, bins='auto')
plt.title('Out')
"""
'''
plt.scatter(out_top.T[0,:], out_top.T[1,:], c='b')
plt.scatter(train_top.T[0,:], train_top.T[1,:], c='r')
plt.show()
'''
class softCrossEntropy(torch.nn.Module):
def __init__(self, alpha=0.95):
"""
:param alpha: Strength (0-1) of influence from soft labels in training
"""
super(softCrossEntropy, self).__init__()
self.alpha = alpha
return
def forward(self, inputs, target, true_labels):
"""
:param inputs: predictions
:param target: target (soft) labels
:param true_labels: true (hard) labels
:return: loss
"""
KD_loss = self.alpha
KD_loss *= nn.KLDivLoss(size_average=False)(
fcnal.log_softmax(inputs, dim=1),
fcnal.softmax(target, dim=1)
)
KD_loss += (1-self.alpha)*fcnal.cross_entropy(inputs, true_labels)
return KD_loss
def distill_training(teacher=None, learner=None, data_loader=None,
test_loader=None, optimizer=None,
criterion=None, n_epochs=0, verbose=False):
"""
:param teacher: network to provide soft labels in training
:param learner: network to distill knowledge into
:param data_loader: data loader for training data set
:param test_loaderL data loader for validation data
:param optimizer: optimizer for training
:param criterion: objective function, should allow for soft labels.
We suggest softCrossEntropy
:param n_epochs: epochs for training
:param verbose: verbose == True will print loss at each batch
:return: None, teacher model is trained in place
"""
losses = []
for epoch in range(n_epochs):
teacher.eval()
learner.train()
for i, batch in enumerate(data_loader):
with torch.set_grad_enabled(False):
data, labels = batch
data, labels = data.to(device), labels.to(device)
soft_lables = teacher(data)
with torch.set_grad_enabled(True):
optimizer.zero_grad()
outputs = learner(data)
loss = criterion(outputs, soft_lables, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
if verbose:
print("[{}/{}][{}/{}] loss = {}"
.format(epoch, n_epochs, i,
len(data_loader), loss.item()))
# evaluate performance on testset at the end of each epoch
print("[{}/{}]".format(epoch, n_epochs))
print("Training:")
train_acc = eval_target_model(learner, data_loader, classes=None)
print("Testing:")
test_acc = eval_target_model(learner, test_loader, classes=None)
return train_acc, test_acc
def inf_adv_train(target_model=None, inf_model=None, train_set=None,
test_set=None, inf_in_set=None, target_optim=None,
target_criterion=None, inf_optim=None, inf_criterion=None,
n_epochs=0, privacy_theta=0, verbose=False):
"""Method to run adversarial training during membership inference
Args:
target_model (nn.Module): Target classifier to adversarially train.
inf_model (nn.Module): Adversary attacking the target during training.
train_set (DataLoader): DataLoader pointing to the classfier trainign
set (split[0]).
test_set (DataLoader): DataLoader poiting to the validation set. Also
used as out-of-set for the inference (split[1]).
inf_in_set (DataLoader): Data loader pointing to a subset of the
train_set used for inference in-set (split[4])
target_optim (torch.optim): Target optimizer.
target_criterion (nn.Module): Target loss criterion.
inf_optim (torch.optim): Adversary optimizer.
inf_criterion (nn.Module): Adversary loss criterion.
privacy_theta (float): Regularization constant. Sets relative
importance of classification loss vs. adversarial loss.
vebose (bool): If True will print the loss at each step in training.
Returns:
Example:
Todos:
Include example.
"""
# inf_losses = []
# losses = []
inf_model.train()
target_model.train()
for epoch in range(n_epochs):
train_top = np.array([])
out_top = np.array([])
train_p = np.array([])
out_p = np.array([])
total_inference = 0
total_correct_inference = 0
for k_count, ((in_data, _), (out_data, _)) in enumerate(zip(inf_in_set,
test_set)):
# train inference network
in_data, out_data = in_data.to(device), out_data.to(device)
mini_batch_size = in_data.shape[0]
out_mini_batch_size = out_data.shape[0]
train_lbl = torch.ones(mini_batch_size).to(device)
out_lbl = torch.zeros(out_mini_batch_size).to(device)
train_posteriors = fcnal.softmax(target_model(in_data), dim=1)
out_posteriors = fcnal.softmax(target_model(out_data), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
out_sort, _ = torch.sort(out_posteriors, descending=True)
t_p = train_sort[:, :4].cpu().detach().numpy().flatten()
o_p = out_sort[:, :4].cpu().detach().numpy().flatten()
train_p = np.concatenate((train_p, t_p))
out_p = np.concatenate((out_p, o_p))
train_top = np.concatenate((train_top,
train_sort[:, 0].cpu().
detach().numpy()))
out_top = np.concatenate((out_top,
out_sort[:, 0].cpu().detach().numpy()))
inf_optim.zero_grad()
train_inference = inf_model(train_posteriors,
label_to_onehot(train_lbl).to(device))
train_inference = torch.squeeze(train_inference)
#
out_inference = inf_model(out_posteriors,
label_to_onehot(out_lbl).to(device))
out_inference = torch.squeeze(out_inference)
#
total_inference += 2*mini_batch_size
total_correct_inference += torch.sum(train_inference > 0.5).item()
total_correct_inference += torch.sum(out_inference < 0.5).item()
loss_train = inf_criterion(train_inference, train_lbl)
loss_out = inf_criterion(out_inference, out_lbl)
loss = privacy_theta * (loss_train + loss_out)/2
loss.backward()
inf_optim.step()
# train classifiction network
train_imgs, train_lbls = iter(train_set).next()
train_imgs, train_lbls = train_imgs.to(device), train_lbls.to(device)
target_optim.zero_grad()
outputs = target_model(train_imgs)
train_posteriors = fcnal.softmax(outputs, dim=1)
loss_classification = target_criterion(outputs, train_lbls)
train_lbl = torch.ones(mini_batch_size).to(device)
train_inference = inf_model(train_posteriors,
label_to_onehot(train_lbls).to(device))
train_inference = torch.squeeze(train_inference)
loss_infer = inf_criterion(train_inference, train_lbl)
loss = loss_classification - privacy_theta * loss_infer
loss.backward()
target_optim.step()
if verbose:
print("[{}/{}] loss = {}"
.format(epoch, n_epochs, loss.item()))
| 15,691 | 35.749415 | 79 | py |
cyphercat | cyphercat-master/cyphercat/attacks.py | # Pytorch imports
import torch
# Cyphercat imports
from .train import train, train_attacker
from .metrics import eval_membership_inference, eval_attack_model
# Device to run on
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def ml_leaks1(target=None, shadow_model=None, attacker_model=None,
target_in_loader=None, target_out_loader=None,
shadow_train_loader=None, shadow_out_loader=None,
shadow_optim=None, attack_optim=None, shadow_criterion=None,
attack_criterion=None, shadow_epochs=0, attack_epochs=0,
classes=None, n_max_posteriors=3, retrain=True, verbose=False):
'''Implementation of ml_leaks 1 membership inference attack
Trains shadow network an independent data set and then trains the
attacker to infer membership on this shadow net. Finally, the attacker is
used to run mberbeship inference on the target.
Args:
target (nn.Module): Trained target network.
shadow_model (nn.Module): Shadow network to help train the attacker in
membership inference task.
attacker_model (nn.Module): Network to be trained in membership
inference task.
target_in_loader (DataLoader): DataLoader pointing to target in-data
used for testing the attack (split[4])
target_out_loader (DataLoader): Loads data pointing to target out-of-
training dataset (split[1]) used for attack evaluation.
shadow_train_loader (DataLoader): Loader for shadow_model training
(split[2]).
shadow_out_loader: Out-of-sample from shadow net, used to train the
attacker (split[3]).
shadow_optim (torch.optim): Optimizer for shadow_model training.
attack_optim (torch.optim): Optimizer for attacker_model training.
shadow_criterion (torch.nn): Loss function for shadow_model training.
attack_criterion (torch.nn): Loss function for attacker_model
training.
shadow_epochs (int): Number of epochs used to train the shadow network.
attack_epochs (int): Number of epochs used to train the attack network.
classes (list): Classes for membership inference task.
n_max_posteriors (int): Number of maximal posteriors to use in
membership inference attack.
retrain (bool): If True will retrain the shadow and attack network,
otherwise will simply use the provided attacker model as is fed.
verbose (bool): If True will print the loss at each batch during all
training steps.
Example:
To-do:
Add example to docstring.
'''
if retrain:
print('---- Training shadow network ----')
train(model=shadow_model, data_loader=shadow_train_loader,
test_loader=shadow_out_loader, optimizer=shadow_optim,
criterion=shadow_criterion, n_epochs=shadow_epochs,
classes=classes, verbose=verbose)
#
print('---- Training attack network ----')
train_attacker(attack_model=attacker_model, shadow_model=shadow_model,
shadow_train=shadow_train_loader,
shadow_out=shadow_out_loader, optimizer=attack_optim,
criterion=attack_criterion, n_epochs=attack_epochs,
k=n_max_posteriors)
#
print('---- Evaluate attack ----')
df_pr = eval_attack_model(attack_model=attacker_model, target=target,
target_train=target_in_loader,
target_out=target_out_loader, k=n_max_posteriors)
return df_pr
def ml_leaks3(target=None, target_in_loader=None, target_out_loader=None):
''' Implementation of ml_leaks 3 membership inference attack
Args:
target (nn.Module): Trained target network to attack
target_in_loader (DataLoader): Loader pointing to data used to
train target (split[4]). Used here to evaluate attack
performance.
target_out_loader: Loader pointing to the target out-of-training data
(split[1])
Example:
To-do:
Add example to docstring.
'''
eval_membership_inference(target_model=target,
target_train=target_in_loader,
target_out=target_out_loader)
def mi_gradient_ascent(input_sample=None, target_model=None, optimizer=None,
category=None, iterations=0, verbose=False):
""" Implementation of gradient based model inversion attack
Args:
input_sample (torch.tensor): Initialized input sample, usually
randomly generated. Size should match the model input.
target_model (nn.Module): Pretrained model to attack.
optimizer (nn.optim): Optimizer (initialized on image parameters) used
in attack.
category (int): Category to invert.
iterations (int): Query iterations in the attack.
verbose (bool): If True will print the loss at each step in attack.
Returns:
(list(float)): Returns a list of the losses at each iteration.
Example:
Todos:
Write example
"""
category = torch.Variable(torch.LongTensor([category])).to(device)
losses = []
for i_step in range(iterations):
target_model.zero_grad()
out = target_model(input_sample)
loss = -out.take(category)
loss.backward()
#
optimizer.step()
input_sample.grad.zero_()
losses.append(loss.data)
#
return losses
| 5,614 | 40.286765 | 79 | py |
cyphercat | cyphercat-master/cyphercat/datadefs/lfw_dataset.py | import os
import shutil
import numpy as np
from skimage import io
from torch.utils.data.dataset import Dataset
class LFWDataset(Dataset):
"""
Faces in the Wild specific dataset class.
Includes indexing functionality.
Inherets from PyTorch Dataset class.
"""
def __init__(self, data_struct=None, train_set=True, transform=None):
self.data_struct = data_struct
self.custom_prep_data()
self.test_train_split = 0.8
self.transform = transform
n_classes, file_list, class_to_label = self.index(train_set)
self.n_classes = n_classes
self.file_list = file_list
self.people_to_idx = class_to_label
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
img_path = self.file_list[idx]
image = io.imread(img_path)
label = self.people_to_idx[img_path.split('/')[-2]]
if self.transform is not None:
image = self.transform(image)
return image, label
def index(self, is_train_set):
data_dir = self.data_struct.save_path
img_paths = []
for p in os.listdir(data_dir):
for i in os.listdir(os.path.join(data_dir, p)):
img_paths.append(os.path.join(data_dir, p, i))
class_list = []
class_to_idx = {}
k = 0
for i in img_paths:
name = i.split('/')[-2]
if name not in class_to_idx:
class_list.append(name)
class_to_idx[name] = k
k += 1
n_classes = len(class_list)
img_paths = np.random.permutation(img_paths)
dataset_size = len(img_paths)
trainset_size = int(self.test_train_split * dataset_size)
if is_train_set:
file_list = img_paths[:trainset_size]
else:
file_list = img_paths[trainset_size:]
return n_classes, file_list, class_to_idx
def custom_prep_data(self):
data_name = self.data_struct.name
out_dir = self.data_struct.save_path
# LFW specific prep steps
lfw_dir = out_dir + '_original/'
# If dataset already downloaded an unpacked, do nothing
if os.path.isdir(lfw_dir):
return
os.rename(out_dir, lfw_dir)
people_dir = os.listdir(lfw_dir)
num_per_class = 20
for p in people_dir:
imgs = os.listdir(os.path.join(lfw_dir, p))
if len(imgs) >= num_per_class:
shutil.copytree(os.path.join(lfw_dir, p),
os.path.join(out_dir, p))
print('{} successfully downloaded and preprocessed.'.format(data_name))
| 2,692 | 27.956989 | 79 | py |
cyphercat | cyphercat-master/cyphercat/datadefs/cyphercat_dataset.py | from cyphercat.definitions import DATASETS_DIR, DATASPLITS_DIR
from .cifar10_dataset import Cifar10_preload_and_split
from .libri_dataset import Libri_preload_and_split
class CCATDataset():
"""
This is a generic cyphercat dataset class for accessing the various
datasets accessible to the package.
# Args
name (string): dataset name
splits (list): Dataframe with data path and metadata.
transform List(torch): list of torch transform functions
Must be None or length must == {1, len(splits)}
cache: bool. Whether or not to use the cached index file
"""
def __init__(self, path='', name='', splits=[1], transforms=None):
self.path = path
self.name = name
self.splits = splits
self.nsplits = len(splits)
self.transforms = transforms
self.datasplits = self.prep_dataset_splits()
def prep_dataset_splits(self):
# Check that there is either 1 transform fn,
# or the same as the number of requested splits
if self.transforms:
tlen = len(self.transforms)
slen = self.nsplits
assert tlen==1 or tlen == slen, "Error: transform list incorrect. "\
"Must be 1 element or same length as splits. "\
"len(transforms) == {}".format(tlen)
# Grab appropriate preloader_splitter function
presplit_fn = get_preload_split_fn(self.name)
# Do the splits preloading...
return presplit_fn(path=self.path, splits=self.splits, transform=self.transforms)
def get_dataset_all_splits(self):
return self.datasplits
def get_split_n(self, n=0):
assert n >= 0 and n < self.nsplits, "Error: requesting invalid split."\
"Choose split btw 0 and {}".format(self.nsplits-1)
return self.datasplits[n]
# Functions
PRELOAD_SPLIT_FN_DICT = {'cifar-10': Cifar10_preload_and_split,
'librispeech': Libri_preload_and_split,
}
def get_preload_split_fn(name=''):
"""
Convenience function for retrieving allowed
cyphercat split dataset functions.
Parameters
----------
name : {'cifar-10', 'librispeech'}
Name of dataset
Returns
-------
fn : function
Dataset specific splitter function
"""
if name in PRELOAD_SPLIT_FN_DICT:
fn = PRELOAD_SPLIT_FN_DICT[name]
return fn
else:
raise ValueError('Invalid dataset, {}, entered. Must be '
'in {}'.format(name, PRELOAD_SPLIT_FN_DICT.keys()))
| 2,715 | 33.820513 | 93 | py |
cyphercat | cyphercat-master/cyphercat/datadefs/cifar10_dataset.py | import torch
from torch.utils.data import Dataset, ConcatDataset
import torchvision
from cyphercat.definitions import DATASETS_DIR, DATASPLITS_DIR
from tqdm import tqdm
import pandas as pd
import numpy as np
import pickle
import os
from .splitter import splitter, dataset_split
def Cifar10_preload_and_split(path=None, splits=[0.4, 0.1, 0.25, 0.25], transform=None):
"""Index and split CIFAR10 dataset.
Args:
path (string): Path to location containing dataset. If left as None
will search default location 'DATASETS_DIR' specified in
definitions.
splits (list): list of fractional splits
Returns:
dict(Dataframes): Dictionary containing the dataframes corresponding
to each split inclduing metadata.
Example:
Todo:
- Write Example.
- More work on user specified splits.
"""
if path is None:
path = DATASETS_DIR
index_file = os.path.join(path, 'cifar10.index.csv')
indices = None
if os.path.exists(index_file):
index_csv = np.loadtxt(index_file)
indices = torch.tensor(index_csv)
print('Found predefined indexing file {}'.format(index_file))
trainset = torchvision.datasets.CIFAR10(path, train=True, transform=transform[0], download=False)
testset = torchvision.datasets.CIFAR10(path, train=False, transform=transform[0], download=False)
fullset = ConcatDataset([trainset, testset])
print('Initializing CIFAR10Dataset splits')
# Currently five equal splits
dset_size = fullset.cumulative_sizes[-1]
int_splits = []
for i in range(len(splits)):
int_splits.append(int(dset_size * splits[i]))
if sum(int_splits) < dset_size:
rem = dset_size - sum(int_splits)
int_splits[-1] += rem
indices, splitsets = dataset_split(fullset, int_splits, indices=indices)
if not os.path.exists(index_file):
print('No predefined indexing file found, so index permutations saving to {}'.format(index_file))
np.savetxt(index_file, indices.numpy(), fmt='%i', delimiter=',')
print('Finished splitting data.')
return splitsets
| 2,161 | 31.757576 | 105 | py |
cyphercat | cyphercat-master/cyphercat/datadefs/splitter.py | from torch import randperm
from torch._utils import _accumulate
from torch.utils.data.dataset import Subset
import pandas as pd
def dataset_split(dataset=None, lengths=None, indices=None):
"""
Split a dataset into non-overlapping new datasets of given lengths.
If indices is undefined, then a random permutation of dataset
is generated. Slight modification of torch.utils.data.random_split
to gain access to permuted indices.
Arguments:
dataset (Dataset): Dataset to be split
lengths (sequence): lengths of splits to be produced
indices (tensor): permutations of instances
Returns:
indices (tensor): premutations of instances
"""
if sum(lengths) != len(dataset):
raise ValueError('Sum of input lengths does not equal the length of \
the input dataset!')
# If requested a random split of dataset
if indices is None:
indices = randperm(sum(lengths))
indices = (indices).long()
return indices, [Subset(dataset, indices[offset - length:offset])
for offset, length in zip(_accumulate(lengths), lengths)]
def splitter(dfs={}, df=None, unique_categories=[], category_id='', splits=[],
N=-1, split_by_class=False):
""" Splits the data for given unqie categories according to specified
fractions.
Args:
dfs (dict(Dataframe): Current dictionary of dataframes. New splits
will be concatenated to this dict.
df (Dataframe): Dataframe containg all of the data and metadata.
unique_categories (list(int)): List containing the indices of
categories to include in these splits.
category_id (string): Defining category for dataset in Dataframe
object.
splits (list(float)): List containing the fraction of the data to be
included in each split.
N (int): index to assign new splits when appending to dfs.
split_by_class=False (bool): If true, will split by class of false
will split by data
Returns:
(dict(Dataframe)): Updated dictionary of data splits.
Example:
Todo:
- Add example.
"""
# N is to keep track of the dataframe dict keys
n_splits = len(splits)
tot_categories = len(unique_categories)
# This if statement is terminated by a return to avoid else
if split_by_class:
start_category = 0
used_categories = 0
for idx, s in enumerate(splits):
if idx != n_splits-1:
n_categories = int(s*tot_categories)
used_categories += n_categories
else:
n_categories = tot_categories - used_categories
stop_category = start_category + n_categories
for i_cat, category in enumerate(unique_categories[start_category:
stop_category]):
if i_cat == 0:
dfs[idx + N] = df[df['speaker_id'] == category]
else:
dfs[idx + N] = dfs[idx + N].append(df[df['speaker_id'] ==
category])
start_category += n_categories
for idx in range(n_splits):
dfs[idx + N] = dfs[idx + N].reset_index()
return dfs
for category in unique_categories: # for each category
# category = valid_sequence.unique_categories[0]
tot_files = sum(df[category_id] == category)
mini_df = df[df[category_id] == category]
mini_df = mini_df.reset_index()
used_files = 0
start_file = 0
for idx, s in enumerate(splits): # for each split
if idx != n_splits-1:
n_files = int(s*tot_files)
used_files += n_files
else:
n_files = tot_files - used_files
# get stop index for the desired # of files:
stop_file = start_file + n_files
# initialize if first category, or append if later category
if category == unique_categories[0]:
dfs[idx + N] = (mini_df.iloc[start_file:stop_file])
else:
dfs[idx + N] = dfs[idx + N].append(mini_df.iloc[start_file:
stop_file])
# update start_file
start_file += n_files
for idx in range(n_splits): # for each dataframe
dfs[idx + N] = dfs[idx + N].reset_index()
return dfs
def splitter2(dfs={}, df=None, unique_categories=[], category_id='', splits=[],
N=-1, split_by_class=False):
""" Splits the data for given unqie categories according to specified
fractions.
Args:
dfs (dict(Dataframe): Current dictionary of dataframes. New splits
will be concatenated to this dict.
df (Dataframe): Dataframe containg all of the data and metadata.
unique_categories (list(int)): List containing the indices of
categories to include in these splits.
category_id (string): Defining category for dataset in Dataframe
object.
splits (list(float)): List containing the fraction of the data to be
included in each split.
N (int): index to assign new splits when appending to dfs.
split_by_class=False (bool): If true, will split by class of false
will split by data
Returns:
(dict(Dataframe)): Updated dictionary of data splits.
Example:
Todo:
- Add example.
"""
# N is to keep track of the dataframe dict keys
n_splits = len(splits)
dfs[N] = pd.DataFrame(columns=df.columns)
dfs[N+1] = pd.DataFrame(columns=df.columns)
tot_categories = len(unique_categories)
# This if statement is terminated by a return to avoid else
if split_by_class:
start_category = 0
used_categories = 0
for idx, s in enumerate(splits):
if idx != n_splits-1:
n_categories = int(s*tot_categories)
used_categories += n_categories
else:
n_categories = tot_categories - used_categories
stop_category = start_category + n_categories
for i_cat, category in enumerate(unique_categories[start_category:
stop_category]):
if i_cat == 0:
dfs[idx + N] = df[df['speaker_id'] == category]
else:
dfs[idx + N] = dfs[idx + N].append(df[df['speaker_id'] ==
category])
start_category += n_categories
for idx in range(n_splits):
dfs[idx + N] = dfs[idx + N].reset_index()
return dfs
for category in unique_categories: # for each category
mini_df = df[df[category_id] == category]
mini_df = mini_df.reset_index()
# Identify segments:
n_seg = len(mini_df.Section.unique())
seg1 = round(splits[0]*n_seg)
# Segments are not ordered, so just pick the first few for seg1
seg1s = mini_df.Section.unique()[:seg1]
dfs[N] = dfs[N].append(mini_df[mini_df['Section'].isin(seg1s)])
dfs[N+1] = dfs[N+1].append(mini_df[~mini_df['Section'].isin(seg1s)])
for idx in range(n_splits): # for each dataframe
dfs[idx + N] = dfs[idx + N].reset_index()
return dfs
| 7,516 | 35.848039 | 79 | py |
cyphercat | cyphercat-master/cyphercat/datadefs/voices_dataset.py | from torch.utils.data import Dataset
from cyphercat.definitions import DATASETS_DIR, DATASPLITS_DIR
from tqdm import tqdm
import soundfile as sf
import pandas as pd
import numpy as np
import os
from .splitter import splitter, splitter2
LIBRISPEECH_SAMPLING_RATE = 16000
sex_to_label = {'M': False, 'F': True}
label_to_sex = {False: 'M', True: 'F'}
def load_or_index_subset(subset=None, path=None, fragment_seconds=3,
pad=False):
""" Subroutine to either load existing subset dataframe or index and save it
Args:
subset (string): Librispeech subset to either load or index.
path (string): Path to search for or save indexed subset.
fragment_seconds (float): Number of seconds for audio samples.
pad (bool): If true will accept short framgents and pad with silence.
Returns:
(pandas.Dataframe): Returns indexed subset in dataframe.
"""
index_file = path + '/VOiCES-{}.index.csv'.format(subset)
subset_index_path = index_file
if os.path.exists(subset_index_path):
df = pd.read_csv(subset_index_path)
# otherwise cache them
else:
print('Files not found, indexing {}'.format(subset))
speaker_file = '/VOiCES/Lab41-SRI-VOiCES-speaker-gender-dataset.tbl'
df = pd.read_csv(path+speaker_file, skiprows=0,
delimiter=' ', error_bad_lines=False)
df.columns = [col.strip().replace(';', '').lower()
for col in df.columns]
df = df.assign(
sex=df['gender'].apply(lambda x: x.strip()),
subset=df['dataset'].apply(lambda x: x.strip()),
)
df = df.rename(columns={'speaker': 'id', 'gender': 'sex',
'dataset': 'subset'})
audio_files = index_subset(path, subset)
# Merge individual audio files with indexing dataframe
df = pd.merge(df, pd.DataFrame(audio_files))
# Remove duplicate column names
df = df[['id', 'sex', 'subset', 'filepath', 'length', 'seconds']]
# Add additional useful columns to dataframe:
snippets = []
mikes = []
degrees = []
noises = []
for i in df.index:
snip = df.filepath[i]
sg = snip.index('sg')
snippets.append(snip[sg+2:sg+6])
mc = snip.index('mc')
mikes.append(snip[mc+2:mc+4])
dg = snip.index('dg')
degrees.append(snip[dg+2:dg+5])
rm = snip.index('rm')
dash = snip[rm:].index('/') # Find first / after rm
noises.append(snip[rm:][dash+1:dash+5])
df = df.assign(Section=snippets, Mic=mikes,
Degree=degrees, Noise=noises)
mins = (df.groupby('id').sum()['seconds']/60)
min_dict = mins.to_dict()
df = df.assign(speaker_minutes=df['id'])
df['speaker_minutes'] = df['speaker_minutes'].map(min_dict)
# Save index files to data folder
df.to_csv(index_file, index=False)
# Trim too-small files
if not pad:
df = df[df['seconds'] > fragment_seconds]
# Renaming for clarity
df = df.rename(columns={'id': 'speaker_id'})
# Index of dataframe has direct correspondence to item in dataset
df = df.reset_index(drop=True)
df = df.assign(id=df.index.values)
print('\t Finished indexing {}. {} usable files found.'.format(subset,
len(df)))
return df
def Voices_preload_and_split(subset='room-1', seconds=3,
path=None, pad=False, splits=None):
"""Index and split librispeech dataset.
Args:
subset (string): LibriSpeech subset to parse, load and split.
Currently can only handle one at a time
seconds (int): Minimum length of audio samples to include.
path (string): Path to location containing dataset. If left as None
will search default location 'DATASETS_DIR' specified in
definitions.
pad (bool): Flag to specify whether to pad (with 0's) and keep the
samples with lenght below the minimum.
splits (dict): dictionary with {name:[fractions]} for a user specified
split. The split will be saved to 'DATASPLITS_DIR' under 'name'
Returns:
dict(Dataframes): Dictionary containing the dataframes corresponding
to each split inclduing metadata.
Example:
Todo:
- Write Example.
- More work on user specified splits.
- Add option and functionality to split longer recording into samples
of length 'seconds' to augment data.
"""
num_splits = 6
fragment_seconds = seconds
if path is None:
path = DATASETS_DIR
print('Initialising VOiCESDataset with minimum length = {}s'
' and subset = {}'.format(seconds, subset))
df = load_or_index_subset(subset=subset, path=path,
fragment_seconds=fragment_seconds, pad=pad)
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers
# - 1) labels
unique_speakers = sorted(df['speaker_id'].unique())
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers
# - 1) labels
dfs = {} # dictionary of dataframes
sample_dfs = {}
# split df into data-subsets
if splits is None:
# Default behaviour will be to load cyphercat default splits
# check if splits exists.
print('Build/load speaker membership inference splits')
splits_ready = [False]*num_splits
for i_split in range(num_splits):
if os.path.exists(DATASPLITS_DIR+'/VOiCES-%s/speaker_splits/'
'VOiCES_%i.csv' % (subset, i_split)):
splits_ready[i_split] = True
if all(splits_ready): # Found all of the relelvant splits
print('Found default speaker splits, loading dataframe')
dfs = {}
for i_split in range(num_splits):
dfs[i_split] = pd.read_csv(DATASPLITS_DIR +
'/VOiCES-%s/speaker_splits/'
'VOiCES_%i.csv' % (subset, i_split))
else:
# Case when splits not found. This should only occur first time
# VOiCES is parsed by developers (not users), so will include
# a warning
print('WARNING: Creating default speaker splits for VOiCES!')
dfs = default_speaker_splitter2(dfs, df)
# write the default dataframes
for i_df, this_df in enumerate(dfs):
dfs[this_df] = dfs[this_df].drop(columns=['id'])
dfs[this_df].rename(columns={'level_0': 'idx_in_original_df'},
inplace=True)
dfs[this_df].to_csv(DATASPLITS_DIR+'/VOiCES-%s/speaker_splits/'
'VOiCES_%i.csv' % (subset, i_df),
index=False)
print('Build/load sample membership inference splits')
splits_ready = [False]*(num_splits-1)
for i_split in range(num_splits-1):
if os.path.exists(DATASPLITS_DIR+'/VOiCES-%s/sample_splits/'
'VOiCES_%i.csv' % (subset, i_split)):
splits_ready[i_split] = True
if all(splits_ready): # Found all of the relelvant splits
print('Found default sample splits, loading dataframe')
sample_dfs = {}
for i_split in range(num_splits-1):
sample_dfs[i_split] = pd.read_csv(DATASPLITS_DIR +
'/VOiCES-%s/sample_splits/'
'VOiCES_%i.csv' % (subset,
i_split))
else:
# Case when splits not found. This should only occur first time
# LibriSpeech is parsed by developers (not users), so will include
# a warning
print('WARNING: Creating default sample splits for VOiCES!')
sample_dfs = default_sample_splitter(sample_dfs, df)
# write the default dataframes
for i_df, this_df in enumerate(sample_dfs):
sample_dfs[this_df] = sample_dfs[this_df].drop(columns=['id'])
sample_dfs[this_df].rename(columns={'level_0':
'idx_in_original_df'},
inplace=True)
sample_dfs[this_df].to_csv(DATASPLITS_DIR+'/VOiCES-%s/'
'sample_splits/VOiCES_%i.csv' %
(subset, i_df), index=False)
else:
name = list(splits.keys())[0]
print('Creating user defined splits under name %s' %
(list(splits.keys())[0]))
total = 0
for fraction in splits[name]:
total += fraction
if total != 1.:
raise('Data split doesn\'t not add up to 1.')
# this creates user selescted splits according to the list provided
# num speakers for train & test is the same.
# the below was solved with a system of equations
# amt data depends on train data
n = int(len(unique_speakers)//(2+2*splits[0]))
# n is train data for shadow & target networks
unique_speakers1 = unique_speakers[:n] # target
unique_speakers2 = unique_speakers[n:2*n] # shadow
unique_speakers3 = unique_speakers[2*n:] # out (target + shadow)
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers1,
category_id='speaker_id', splits=splits, N=0)
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers2,
category_id='speaker_id', splits=splits, N=2)
# split out data for attack train + test evenly
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers3,
category_id='speaker_id', splits=[0.5, 0.5], N=4)
print('\n ------- Speaker split statistics ------- ')
for d in dfs:
this_df = dfs[d]
male_df = this_df[this_df['sex'] == 'M']
female_df = this_df[this_df['sex'] == 'F']
print('\t\t ---- Split %i ---- \n\tUnique speakers \t Samples' % d)
print('Male:\t\t %i\t\t %i' %
(len(male_df['speaker_id'].unique()), len(male_df)))
print('Female:\t\t %i\t\t %i' %
(len(female_df['speaker_id'].unique()), len(female_df)))
print('Total:\t\t %i\t\t %i' %
(len(this_df['speaker_id'].unique()), len(this_df)))
print(' ---------------------------------------- \n')
print(' ------- Sample split statistics -------- ')
for d in sample_dfs:
this_df = sample_dfs[d]
male_df = this_df[this_df['sex'] == 'M']
female_df = this_df[this_df['sex'] == 'F']
print('\t\t ---- Split %i ---- \n\tUnique speakers \t Samples' % d)
print('Male:\t\t %i\t\t %i' %
(len(male_df['speaker_id'].unique()), len(male_df)))
print('Female:\t\t %i\t\t %i' %
(len(female_df['speaker_id'].unique()), len(female_df)))
print('Total:\t\t %i\t\t %i' %
(len(this_df['speaker_id'].unique()), len(this_df)))
print(' ---------------------------------------- \n')
print('Finished splitting data.')
return dfs, sample_dfs
def index_subset(path=None, subset=None):
"""Index a subset by looping through all of it's files and recording their
speaker ID, filepath and length.
Args:
subset (string): Name of the subset.
path (string): Path to search for files to parse.
Returns:
(list(dicts)): A list of dicts containing information about all the
audio files in a particular subset of the LibriSpeech dataset.
Example:
Todo:
- Write example.
"""
audio_files = []
print('Indexing {}...'.format(subset))
# Quick first pass to find total for tqdm bar
subset_len = 0
for root, folders, files in os.walk(path +
'/VOiCES/{}/'.format(subset)):
subset_len += len([f for f in files if f.endswith('.wav')])
progress_bar = tqdm(total=subset_len)
for root, folders, files in os.walk(path +
'/VOiCES/{}/'.format(subset)):
if len(files) == 0:
continue
for f in files:
# Skip non-sound files
if not f.endswith('.wav'):
continue
progress_bar.update(1)
librispeech_id = int(root[-4:])
instance, samplerate = sf.read(os.path.join(root, f))
audio_files.append({
'id': librispeech_id,
'filepath': os.path.relpath(os.path.join(root, f), path),
'length': len(instance),
'seconds': len(instance) * 1. / LIBRISPEECH_SAMPLING_RATE
})
progress_bar.close()
return audio_files
def default_speaker_splitter(dfs=None, df=None):
""" Performs cycpercat default split for librspeech dataset.
Args:
dfs (dict(Dataframe)): Current dictionary of dataframes.
Splits concatenated to this dict.
df (Dataframe): Dataframe to split.
Returns:
dict(Dataframes): Returns a dictionary containing the dataframes for
each of the splits.
Example:
Todo:
-Write example.
"""
# defining dataset category
cat_id = 'speaker_id'
# split the df by sex
male_df = df[df['sex'] == 'M']
female_df = df[df['sex'] == 'F']
#
unique_male = sorted(male_df['speaker_id'].unique())
unique_female = sorted(female_df['speaker_id'].unique())
n_male = len(unique_male)//2
n_female = len(unique_female)//2
# male splits
m_dfs = {}
# splits speakers in 0.8/0.2 split for target
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[:n_male],
category_id=cat_id, splits=[0.8, 0.2], N=0)
# splits by speaker for attack
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[n_male:],
category_id=cat_id, splits=[0.5, 0.5],
N=2, split_by_class=True)
m_dfs[4] = m_dfs[0][:len(m_dfs[1])]
# female splits
f_dfs = {}
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[:n_female],
category_id=cat_id, splits=[0.8, 0.2], N=0)
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[n_female:],
category_id=cat_id, splits=[0.5, 0.5], N=2,
split_by_class=True)
f_dfs[4] = f_dfs[0][:len(f_dfs[1])]
# merge male and female into final splits
for i_split in range(5):
print('Merging split %i\n Male: %i and Female: %i' %
(i_split, len(m_dfs[i_split]), len(f_dfs[i_split])))
dfs[i_split] = m_dfs[i_split].append(f_dfs[i_split])
return dfs
def default_speaker_splitter2(dfs=None, df=None):
""" Performs cycpercat default split for librspeech dataset.
Args:
dfs (dict(Dataframe)): Current dictionary of dataframes.
Splits concatenated to this dict.
df (Dataframe): Dataframe to split.
Returns:
dict(Dataframes): Returns a dictionary containing the dataframes for
each of the splits.
Example:
Todo:
-Write example.
"""
# defining dataset category
cat_id = 'speaker_id'
# split the df by sex
male_df = df[df['sex'] == 'M']
female_df = df[df['sex'] == 'F']
#
unique_male = sorted(male_df['speaker_id'].unique())
unique_female = sorted(female_df['speaker_id'].unique())
# Below math to get the data volume for splits 4 & 5 similar
n_male = len(unique_male)//50
n_female = len(unique_female)//50
n1 = 23
n2 = 46
# male splits
m_dfs = {}
# splits speakers in 0.8/0.2 split for target
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[:n_male*n1],
category_id=cat_id, splits=[0.8, 0.2], N=0)
# splits by speaker for attack
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[n_male*n1:n_male*n2],
category_id=cat_id, splits=[0.5, 0.5],
N=2, split_by_class=True)
# split off unheard speakers for outset
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[n_male*n2:],
category_id=cat_id, splits=[0, 1],
N=4, split_by_class=True)
# Replace in set with subset of df0
m_dfs[4] = m_dfs[0][:len(m_dfs[1])]
# female splits
f_dfs = {}
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[:n_female*n1],
category_id=cat_id, splits=[0.8, 0.2], N=0)
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[n_female*n1:n_female*n2],
category_id=cat_id, splits=[0.5, 0.5], N=2,
split_by_class=True)
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[n_female*n2:],
category_id=cat_id, splits=[0, 1], N=4,
split_by_class=True)
f_dfs[4] = f_dfs[0][:len(f_dfs[1])]
# merge male and female into final splits
for i_split in range(6):
print('Merging split %i\n Male: %i and Female: %i' %
(i_split, len(m_dfs[i_split]), len(f_dfs[i_split])))
dfs[i_split] = m_dfs[i_split].append(f_dfs[i_split])
return dfs
def default_sample_splitter(dfs=None, df=None):
""" Performs cycpercat default split for librspeech dataset.
Args:
dfs (dict(Dataframe)): Current dictionary of dataframes.
Splits concatenated to this dict.
df (Dataframe): Dataframe to split.
Returns:
dict(Dataframes): Returns a dictionary containing the dataframes for
each of the splits.
Example:
Todo:
-Write example.
"""
# defining dataset category
cat_id = 'speaker_id'
# split the df by sex
male_df = df[df['sex'] == 'M']
female_df = df[df['sex'] == 'F']
#
unique_male = sorted(male_df['speaker_id'].unique())
unique_female = sorted(female_df['speaker_id'].unique())
n_male = len(unique_male)//2
n_female = len(unique_female)//2
# male splits
m_dfs = {}
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[:n_male],
category_id=cat_id, splits=[0.8, 0.2], N=0)
m_dfs = splitter2(dfs=m_dfs, df=male_df,
unique_categories=unique_male[n_male:],
category_id=cat_id, splits=[0.5, 0.5], N=2)
m_dfs[4] = m_dfs[0][:len(m_dfs[1])]
# female splits
f_dfs = {}
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[:n_female],
category_id=cat_id, splits=[0.8, 0.2], N=0)
f_dfs = splitter2(dfs=f_dfs, df=female_df,
unique_categories=unique_female[n_female:],
category_id=cat_id, splits=[0.5, 0.5], N=2)
f_dfs[4] = f_dfs[0][:len(f_dfs[1])]
# merge male and female into final splits
for i_split in range(5):
print('Merging split %i\n Male: %i and Female: %i' %
(i_split, len(m_dfs[i_split]), len(f_dfs[i_split])))
dfs[i_split] = m_dfs[i_split].append(f_dfs[i_split])
return dfs
class Voices_dataset(Dataset):
"""This class subclasses the torch.utils.data.Dataset. Calling __getitem__
will return the transformed librispeech audio sample and it's label
# Args
df (Dataframe): Dataframe with audiosample path and metadata.
seconds (int): Minimum length of audio to include in the dataset. Any
files smaller than this will be ignored or padded to this length.
downsampling (int): Downsampling factor.
label (string): One of {speaker, sex}. Whether to use sex or speaker
ID as a label.
stochastic (bool): If True then we will take a random fragment from
each file of sufficient length. If False we will always take a
fragment starting at the beginning of a file.
pad (bool): Whether or not to pad samples with 0s to get them to the
desired length. If `stochastic` is True then a random number of 0s
will be appended/prepended to each side to pad the sequence to the
desired length.
cache: bool. Whether or not to use the cached index file
"""
def __init__(self, df=None, seconds=3, downsampling=1, label='speaker',
stochastic=True, pad=False, transform=None, cache=True):
if label not in ('sex', 'speaker'):
raise(ValueError, 'Label type must be one of (\'sex\','
'\'speaker\')')
if int(seconds * LIBRISPEECH_SAMPLING_RATE) % downsampling != 0:
raise(ValueError, 'Down sampling must be an integer divisor of the'
' fragment length.')
self.fragment_seconds = seconds
self.downsampling = downsampling
self.fragment_length = int(seconds * LIBRISPEECH_SAMPLING_RATE)
self.stochastic = stochastic
self.pad = pad
self.label = label
self.transform = transform
# load df from splitting function
self.df = df
self.num_speakers = len(self.df['speaker_id'].unique())
# Convert arbitrary integer labels of dataset to ordered
# 0-(num_speakers - 1) labels
self.unique_speakers = sorted(self.df['speaker_id'].unique())
self.speaker_id_mapping = {self.unique_speakers[i]: i
for i in range(self.num_classes())}
# Create dicts
self.datasetid_to_filepath = self.df.to_dict()['filepath']
self.datasetid_to_speaker_id = self.df.to_dict()['speaker_id']
self.datasetid_to_sex = self.df.to_dict()['sex']
def __getitem__(self, index):
instance, samplerate = sf.read(
os.path.join(DATASETS_DIR, self.datasetid_to_filepath[index]))
# Choose a random sample of the file
if self.stochastic:
upper_bound = max(len(instance) - self.fragment_length, 1)
fragment_start_index = np.random.randint(0, upper_bound)
else:
fragment_start_index = 0
instance = instance[fragment_start_index:
fragment_start_index+self.fragment_length]
# Check for required length and pad if necessary
if self.pad and len(instance) < self.fragment_length:
less_timesteps = self.fragment_length - len(instance)
if self.stochastic:
# Stochastic padding, ensure instance length
# by appending a random number of 0s before and the
# appropriate number of 0s after the instance
less_timesteps = self.fragment_length - len(instance)
before_len = np.random.randint(0, less_timesteps)
after_len = less_timesteps - before_len
instance = np.pad(instance, (before_len, after_len),
'constant')
else:
# Deterministic padding. Append 0s to reach desired length
instance = np.pad(instance, (0, less_timesteps), 'constant')
if self.label == 'sex':
sex = self.datasetid_to_sex[index]
label = sex_to_label[sex]
elif self.label == 'speaker':
label = self.datasetid_to_speaker_id[index]
label = self.speaker_id_mapping[label]
else:
raise(ValueError, 'Label type must be one of (\'sex\','
'\'speaker\')'.format(self.label))
# Reindex to channels first format as supported by pytorch and
# downsample by desired amount
instance = instance[np.newaxis, ::self.downsampling]
# Add transforms
if self.transform is not None:
instance = self.transform(instance)
return instance, label
def __len__(self):
return len(self.df)
def num_classes(self):
return len(self.df['speaker_id'].unique())
| 24,898 | 38.966292 | 80 | py |
cyphercat | cyphercat-master/cyphercat/datadefs/libri_dataset.py | from torch.utils.data import Dataset
from cyphercat.definitions import DATASETS_DIR, DATASPLITS_DIR
from tqdm import tqdm
import soundfile as sf
import pandas as pd
import numpy as np
import os
from .splitter import splitter
LIBRISPEECH_SAMPLING_RATE = 16000
sex_to_label = {'M': False, 'F': True}
label_to_sex = {False: 'M', True: 'F'}
def load_or_index_subset(subset=None, path=None, fragment_seconds=3,
pad=False):
""" Subroutine to either load existing subset dataframe or index and save it
Args:
subset (string): Librispeech subset to either load or index.
path (string): Path to search for or save indexed subset.
fragment_seconds (float): Number of seconds for audio samples.
pad (bool): If true will accept short framgents and pad with silence.
Returns:
(pandas.Dataframe): Returns indexed subset in dataframe.
"""
index_file = path + '/libri-{}.index.csv'.format(subset)
subset_index_path = index_file
if os.path.exists(subset_index_path):
df = pd.read_csv(subset_index_path)
# otherwise cache them
else:
print('Files not found, indexing {}'.format(subset))
df = pd.read_csv(path+'/LibriSpeech/SPEAKERS.TXT', skiprows=11,
delimiter='|', error_bad_lines=False)
df.columns = [col.strip().replace(';', '').lower()
for col in df.columns]
df = df.assign(
sex=df['sex'].apply(lambda x: x.strip()),
subset=df['subset'].apply(lambda x: x.strip()),
name=df['name'].apply(lambda x: x.strip()),
)
audio_files = index_subset(path, subset)
# Merge individual audio files with indexing dataframe
df = pd.merge(df, pd.DataFrame(audio_files))
# Save index files to data folder
df.to_csv(index_file, index=False)
# Trim too-small files
if not pad:
df = df[df['seconds'] > fragment_seconds]
# Renaming for clarity
df = df.rename(columns={'id': 'speaker_id',
'minutes': 'speaker_minutes'})
# Index of dataframe has direct correspondence to item in dataset
df = df.reset_index(drop=True)
df = df.assign(id=df.index.values)
print('\t Finished indexing {}. {} usable files found.'.format(subset,
len(df)))
return df
def Libri_preload_and_split(subset='train-clean-100',
outset='test-clean', seconds=3,
path=None, pad=False, splits=None):
"""Index and split librispeech dataset.
Args:
subset (string): LibriSpeech subset to parse, load and split.
Currently can only handle one at a time
outset (string): Librispeech subset to use for last split. Holds audio
for speakers that are out of set, used for membership inference on
speakers instead of utterances.
seconds (int): Minimum length of audio samples to include.
path (string): Path to location containing dataset. If left as None
will search default location 'DATASETS_DIR' specified in
definitions.
pad (bool): Flag to specify whether to pad (with 0's) and keep the
samples with lenght below the minimum.
splits (dict): dictionary with {name:[fractions]} for a user specified
split. The split will be saved to 'DATASPLITS_DIR' under 'name'
Returns:
dict(Dataframes): Dictionary containing the dataframes corresponding
to each split inclduing metadata.
Example:
Todo:
- Write Example.
- More work on user specified splits.
- Add option and functionality to split longer recording into samples
of length 'seconds' to augment data.
"""
num_splits = 6
fragment_seconds = seconds
if path is None:
path = DATASETS_DIR
print('Initialising LibriSpeechDataset with minimum length = {}s'
' and subset = {}'.format(seconds, subset))
df = load_or_index_subset(subset=subset, path=path,
fragment_seconds=fragment_seconds, pad=pad)
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers
# - 1) labels
unique_speakers = sorted(df['speaker_id'].unique())
outset_df = load_or_index_subset(subset=outset, path=path,
fragment_seconds=fragment_seconds,
pad=pad)
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers
# - 1) labels
dfs = {} # dictionary of dataframes
sample_dfs = {}
# split df into data-subsets
if splits is None:
# Default behaviour will be to load cyphercat default splits
# check if splits exists.
print('Build/load speaker membership inference splits')
splits_ready = [False]*num_splits
for i_split in range(num_splits):
if os.path.exists(DATASPLITS_DIR+'/libri-%s/speaker_splits/'
'libri_%i.csv' % (subset, i_split)):
splits_ready[i_split] = True
if all(splits_ready): # Found all of the relelvant splits
print('Found default speaker splits, loading dataframe')
dfs = {}
for i_split in range(num_splits):
dfs[i_split] = pd.read_csv(DATASPLITS_DIR +
'/libri-%s/speaker_splits/'
'libri_%i.csv' % (subset, i_split))
else:
# Case when splits not found. This should only occur first time
# LibriSpeech is parsed by developers (not users), so will include
# a warning
print('WARNING: Creating default speaker splits for LibriSpeech!')
dfs = default_speaker_splitter(dfs, df)
dfs[num_splits-1] = outset_df
# write the default dataframes
for i_df, this_df in enumerate(dfs):
dfs[this_df] = dfs[this_df].drop(columns=['id'])
dfs[this_df].rename(columns={'level_0': 'idx_in_original_df'},
inplace=True)
dfs[this_df].to_csv(DATASPLITS_DIR+'/libri-%s/speaker_splits/'
'libri_%i.csv' % (subset, i_df),
index=False)
print('Build/load sample membership inference splits')
splits_ready = [False]*(num_splits-1)
for i_split in range(num_splits-1):
if os.path.exists(DATASPLITS_DIR+'/libri-%s/sample_splits/'
'libri_%i.csv' % (subset, i_split)):
splits_ready[i_split] = True
if all(splits_ready): # Found all of the relelvant splits
print('Found default sample splits, loading dataframe')
sample_dfs = {}
for i_split in range(num_splits-1):
sample_dfs[i_split] = pd.read_csv(DATASPLITS_DIR +
'/libri-%s/sample_splits/'
'libri_%i.csv' % (subset,
i_split))
else:
# Case when splits not found. This should only occur first time
# LibriSpeech is parsed by developers (not users), so will include
# a warning
print('WARNING: Creating default sample splits for LibriSpeech!')
sample_dfs = default_sample_splitter(sample_dfs, df)
# write the default dataframes
for i_df, this_df in enumerate(sample_dfs):
sample_dfs[this_df] = sample_dfs[this_df].drop(columns=['id'])
sample_dfs[this_df].rename(columns={'level_0':
'idx_in_original_df'},
inplace=True)
sample_dfs[this_df].to_csv(DATASPLITS_DIR+'/libri-%s/'
'sample_splits/libri_%i.csv' %
(subset, i_df), index=False)
else:
name = list(splits.keys())[0]
print('Creating user defined splits under name %s' %
(list(splits.keys())[0]))
total = 0
for fraction in splits[name]:
total += fraction
if total != 1.:
raise('Data split doesn\'t not add up to 1.')
# this creates user selescted splits according to the list provided
# num speakers for train & test is the same.
# the below was solved with a system of equations
# amt data depends on train data
n = int(len(unique_speakers)//(2+2*splits[0]))
# n is train data for shadow & target networks
unique_speakers1 = unique_speakers[:n] # target
unique_speakers2 = unique_speakers[n:2*n] # shadow
unique_speakers3 = unique_speakers[2*n:] # out (target + shadow)
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers1,
category_id='speaker_id', splits=splits, N=0)
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers2,
category_id='speaker_id', splits=splits, N=2)
# split out data for attack train + test evenly
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers3,
category_id='speaker_id', splits=[0.5, 0.5], N=4)
print('\n ------- Speaker split statistics ------- ')
for d in dfs:
this_df = dfs[d]
male_df = this_df[this_df['sex'] == 'M']
female_df = this_df[this_df['sex'] == 'F']
print('\t\t ---- Split %i ---- \n\tUnique speakers \t Samples' % d)
print('Male:\t\t %i\t\t %i' %
(len(male_df['speaker_id'].unique()), len(male_df)))
print('Female:\t\t %i\t\t %i' %
(len(female_df['speaker_id'].unique()), len(female_df)))
print('Total:\t\t %i\t\t %i' %
(len(this_df['speaker_id'].unique()), len(this_df)))
print(' ---------------------------------------- \n')
print(' ------- Sample split statistics -------- ')
for d in sample_dfs:
this_df = sample_dfs[d]
male_df = this_df[this_df['sex'] == 'M']
female_df = this_df[this_df['sex'] == 'F']
print('\t\t ---- Split %i ---- \n\tUnique speakers \t Samples' % d)
print('Male:\t\t %i\t\t %i' %
(len(male_df['speaker_id'].unique()), len(male_df)))
print('Female:\t\t %i\t\t %i' %
(len(female_df['speaker_id'].unique()), len(female_df)))
print('Total:\t\t %i\t\t %i' %
(len(this_df['speaker_id'].unique()), len(this_df)))
print(' ---------------------------------------- \n')
print('Finished splitting data.')
return dfs, sample_dfs
def index_subset(path=None, subset=None):
"""Index a subset by looping through all of it's files and recording their
speaker ID, filepath and length.
Args:
subset (string): Name of the subset.
path (string): Path to search for files to parse.
Returns:
(list(dicts)): A list of dicts containing information about all the
audio files in a particular subset of the LibriSpeech dataset.
Example:
Todo:
- Write example.
"""
audio_files = []
print('Indexing {}...'.format(subset))
# Quick first pass to find total for tqdm bar
subset_len = 0
for root, folders, files in os.walk(path +
'/LibriSpeech/{}/'.format(subset)):
subset_len += len([f for f in files if f.endswith('.flac')])
progress_bar = tqdm(total=subset_len)
for root, folders, files in os.walk(path +
'/LibriSpeech/{}/'.format(subset)):
if len(files) == 0:
continue
librispeech_id = int(root.split('/')[-2])
for f in files:
# Skip non-sound files
if not f.endswith('.flac'):
continue
progress_bar.update(1)
instance, samplerate = sf.read(os.path.join(root, f))
audio_files.append({
'id': librispeech_id,
'filepath': os.path.relpath(os.path.join(root, f), path),
'length': len(instance),
'seconds': len(instance) * 1. / LIBRISPEECH_SAMPLING_RATE
})
progress_bar.close()
return audio_files
def default_speaker_splitter(dfs=None, df=None):
""" Performs cycpercat default split for librspeech dataset.
Args:
dfs (dict(Dataframe)): Current dictionary of dataframes.
Splits concatenated to this dict.
df (Dataframe): Dataframe to split.
Returns:
dict(Dataframes): Returns a dictionary containing the dataframes for
each of the splits.
Example:
Todo:
-Write example.
"""
# defining dataset category
cat_id = 'speaker_id'
# split the df by sex
male_df = df[df['sex'] == 'M']
female_df = df[df['sex'] == 'F']
#
unique_male = sorted(male_df['speaker_id'].unique())
unique_female = sorted(female_df['speaker_id'].unique())
n_male = len(unique_male)//2
n_female = len(unique_female)//2
# male splits
m_dfs = {}
# splits speakers in 0.8/0.2 split for target
m_dfs = splitter(dfs=m_dfs, df=male_df,
unique_categories=unique_male[:n_male],
category_id=cat_id, splits=[0.8, 0.2], N=0)
# splits by speaker for attack
m_dfs = splitter(dfs=m_dfs, df=male_df,
unique_categories=unique_male[n_male:],
category_id=cat_id, splits=[0.5, 0.5],
N=2, split_by_class=True)
m_dfs[4] = m_dfs[0][:len(m_dfs[1])]
# female splits
f_dfs = {}
f_dfs = splitter(dfs=f_dfs, df=female_df,
unique_categories=unique_female[:n_female],
category_id=cat_id, splits=[0.8, 0.2], N=0)
f_dfs = splitter(dfs=f_dfs, df=female_df,
unique_categories=unique_female[n_female:],
category_id=cat_id, splits=[0.5, 0.5], N=2,
split_by_class=True)
f_dfs[4] = f_dfs[0][:len(f_dfs[1])]
# merge male and female into final splits
for i_split in range(5):
print('Merging split %i\n Male: %i and Female: %i' %
(i_split, len(m_dfs[i_split]), len(f_dfs[i_split])))
dfs[i_split] = m_dfs[i_split].append(f_dfs[i_split])
return dfs
def default_sample_splitter(dfs=None, df=None):
""" Performs cycpercat default split for librspeech dataset.
Args:
dfs (dict(Dataframe)): Current dictionary of dataframes.
Splits concatenated to this dict.
df (Dataframe): Dataframe to split.
Returns:
dict(Dataframes): Returns a dictionary containing the dataframes for
each of the splits.
Example:
Todo:
-Write example.
"""
# defining dataset category
cat_id = 'speaker_id'
# split the df by sex
male_df = df[df['sex'] == 'M']
female_df = df[df['sex'] == 'F']
#
unique_male = sorted(male_df['speaker_id'].unique())
unique_female = sorted(female_df['speaker_id'].unique())
n_male = len(unique_male)//2
n_female = len(unique_female)//2
# male splits
m_dfs = {}
m_dfs = splitter(dfs=m_dfs, df=male_df,
unique_categories=unique_male[:n_male],
category_id=cat_id, splits=[0.8, 0.2], N=0)
m_dfs = splitter(dfs=m_dfs, df=male_df,
unique_categories=unique_male[n_male:],
category_id=cat_id, splits=[0.5, 0.5], N=2)
m_dfs[4] = m_dfs[0][:len(m_dfs[1])]
# female splits
f_dfs = {}
f_dfs = splitter(dfs=f_dfs, df=female_df,
unique_categories=unique_female[:n_female],
category_id=cat_id, splits=[0.8, 0.2], N=0)
f_dfs = splitter(dfs=f_dfs, df=female_df,
unique_categories=unique_female[n_female:],
category_id=cat_id, splits=[0.5, 0.5], N=2)
f_dfs[4] = f_dfs[0][:len(f_dfs[1])]
# merge male and female into final splits
for i_split in range(5):
print('Merging split %i\n Male: %i and Female: %i' %
(i_split, len(m_dfs[i_split]), len(f_dfs[i_split])))
dfs[i_split] = m_dfs[i_split].append(f_dfs[i_split])
return dfs
class LibriSpeechDataset(Dataset):
"""This class subclasses the torch.utils.data.Dataset. Calling __getitem__
will return the transformed librispeech audio sample and it's label
# Args
df (Dataframe): Dataframe with audiosample path and metadata.
seconds (int): Minimum length of audio to include in the dataset. Any
files smaller than this will be ignored or padded to this length.
downsampling (int): Downsampling factor.
label (string): One of {speaker, sex}. Whether to use sex or speaker
ID as a label.
stochastic (bool): If True then we will take a random fragment from
each file of sufficient length. If False we will always take a
fragment starting at the beginning of a file.
pad (bool): Whether or not to pad samples with 0s to get them to the
desired length. If `stochastic` is True then a random number of 0s
will be appended/prepended to each side to pad the sequence to the
desired length.
cache: bool. Whether or not to use the cached index file
"""
def __init__(self, df=None, seconds=3, downsampling=1, label='speaker',
stochastic=True, pad=False, transform=None, cache=True):
if label not in ('sex', 'speaker'):
raise(ValueError, 'Label type must be one of (\'sex\','
'\'speaker\')')
if int(seconds * LIBRISPEECH_SAMPLING_RATE) % downsampling != 0:
raise(ValueError, 'Down sampling must be an integer divisor of the'
' fragment length.')
self.fragment_seconds = seconds
self.downsampling = downsampling
self.fragment_length = int(seconds * LIBRISPEECH_SAMPLING_RATE)
self.stochastic = stochastic
self.pad = pad
self.label = label
self.transform = transform
# load df from splitting function
self.df = df
self.num_speakers = len(self.df['speaker_id'].unique())
# Convert arbitrary integer labels of dataset to ordered
# 0-(num_speakers - 1) labels
self.unique_speakers = sorted(self.df['speaker_id'].unique())
self.speaker_id_mapping = {self.unique_speakers[i]: i
for i in range(self.num_classes())}
# Create dicts
self.datasetid_to_filepath = self.df.to_dict()['filepath']
self.datasetid_to_speaker_id = self.df.to_dict()['speaker_id']
self.datasetid_to_sex = self.df.to_dict()['sex']
def __getitem__(self, index):
instance, samplerate = sf.read(
os.path.join(DATASETS_DIR, self.datasetid_to_filepath[index]))
# Choose a random sample of the file
if self.stochastic:
upper_bound = max(len(instance) - self.fragment_length, 1)
fragment_start_index = np.random.randint(0, upper_bound)
else:
fragment_start_index = 0
instance = instance[fragment_start_index:
fragment_start_index+self.fragment_length]
# Check for required length and pad if necessary
if self.pad and len(instance) < self.fragment_length:
less_timesteps = self.fragment_length - len(instance)
if self.stochastic:
# Stochastic padding, ensure instance length
# by appending a random number of 0s before and the
# appropriate number of 0s after the instance
less_timesteps = self.fragment_length - len(instance)
before_len = np.random.randint(0, less_timesteps)
after_len = less_timesteps - before_len
instance = np.pad(instance, (before_len, after_len),
'constant')
else:
# Deterministic padding. Append 0s to reach desired length
instance = np.pad(instance, (0, less_timesteps), 'constant')
if self.label == 'sex':
sex = self.datasetid_to_sex[index]
label = sex_to_label[sex]
elif self.label == 'speaker':
label = self.datasetid_to_speaker_id[index]
label = self.speaker_id_mapping[label]
else:
raise(ValueError, 'Label type must be one of (\'sex\','
'\'speaker\')'.format(self.label))
# Reindex to channels first format as supported by pytorch and
# downsample by desired amount
instance = instance[np.newaxis, ::self.downsampling]
# Add transforms
if self.transform is not None:
instance = self.transform(instance)
return instance, label
def __len__(self):
return len(self.df)
def num_classes(self):
return len(self.df['speaker_id'].unique())
| 21,505 | 39.96381 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.