repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
baselines
|
baselines-master/baselines/acktr/__init__.py
| 0 | 0 | 0 |
py
|
|
baselines
|
baselines-master/baselines/acktr/kfac_utils.py
|
import tensorflow as tf
def gmatmul(a, b, transpose_a=False, transpose_b=False, reduce_dim=None):
assert reduce_dim is not None
# weird batch matmul
if len(a.get_shape()) == 2 and len(b.get_shape()) > 2:
# reshape reduce_dim to the left most dim in b
b_shape = b.get_shape()
if reduce_dim != 0:
b_dims = list(range(len(b_shape)))
b_dims.remove(reduce_dim)
b_dims.insert(0, reduce_dim)
b = tf.transpose(b, b_dims)
b_t_shape = b.get_shape()
b = tf.reshape(b, [int(b_shape[reduce_dim]), -1])
result = tf.matmul(a, b, transpose_a=transpose_a,
transpose_b=transpose_b)
result = tf.reshape(result, b_t_shape)
if reduce_dim != 0:
b_dims = list(range(len(b_shape)))
b_dims.remove(0)
b_dims.insert(reduce_dim, 0)
result = tf.transpose(result, b_dims)
return result
elif len(a.get_shape()) > 2 and len(b.get_shape()) == 2:
# reshape reduce_dim to the right most dim in a
a_shape = a.get_shape()
outter_dim = len(a_shape) - 1
reduce_dim = len(a_shape) - reduce_dim - 1
if reduce_dim != outter_dim:
a_dims = list(range(len(a_shape)))
a_dims.remove(reduce_dim)
a_dims.insert(outter_dim, reduce_dim)
a = tf.transpose(a, a_dims)
a_t_shape = a.get_shape()
a = tf.reshape(a, [-1, int(a_shape[reduce_dim])])
result = tf.matmul(a, b, transpose_a=transpose_a,
transpose_b=transpose_b)
result = tf.reshape(result, a_t_shape)
if reduce_dim != outter_dim:
a_dims = list(range(len(a_shape)))
a_dims.remove(outter_dim)
a_dims.insert(reduce_dim, outter_dim)
result = tf.transpose(result, a_dims)
return result
elif len(a.get_shape()) == 2 and len(b.get_shape()) == 2:
return tf.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
assert False, 'something went wrong'
def clipoutNeg(vec, threshold=1e-6):
mask = tf.cast(vec > threshold, tf.float32)
return mask * vec
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False):
eigen_min = tf.reduce_min(input_mat)
eigen_max = tf.reduce_max(input_mat)
eigen_ratio = eigen_max / eigen_min
input_mat_clipped = clipoutNeg(input_mat, threshold)
if debug:
input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print(
input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio]))
return input_mat_clipped
def factorReshape(Q, e, grad, facIndx=0, ftype='act'):
grad_shape = grad.get_shape()
if ftype == 'act':
assert e.get_shape()[0] == grad_shape[facIndx]
expanded_shape = [1, ] * len(grad_shape)
expanded_shape[facIndx] = -1
e = tf.reshape(e, expanded_shape)
if ftype == 'grad':
assert e.get_shape()[0] == grad_shape[len(grad_shape) - facIndx - 1]
expanded_shape = [1, ] * len(grad_shape)
expanded_shape[len(grad_shape) - facIndx - 1] = -1
e = tf.reshape(e, expanded_shape)
return Q, e
| 3,389 | 37.965517 | 168 |
py
|
baselines
|
baselines-master/baselines/bench/test_monitor.py
|
from .monitor import Monitor
import gym
import json
def test_monitor():
import pandas
import os
import uuid
env = gym.make("CartPole-v1")
env.seed(0)
mon_file = "/tmp/baselines-test-%s.monitor.csv" % uuid.uuid4()
menv = Monitor(env, mon_file)
menv.reset()
for _ in range(1000):
_, _, done, _ = menv.step(0)
if done:
menv.reset()
f = open(mon_file, 'rt')
firstline = f.readline()
assert firstline.startswith('#')
metadata = json.loads(firstline[1:])
assert metadata['env_id'] == "CartPole-v1"
assert set(metadata.keys()) == {'env_id', 't_start'}, "Incorrect keys in monitor metadata"
last_logline = pandas.read_csv(f, index_col=None)
assert set(last_logline.keys()) == {'l', 't', 'r'}, "Incorrect keys in monitor logline"
f.close()
os.remove(mon_file)
| 861 | 25.9375 | 95 |
py
|
baselines
|
baselines-master/baselines/bench/benchmarks.py
|
import re
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
_atari7 = ['BeamRider', 'Breakout', 'Enduro', 'Pong', 'Qbert', 'Seaquest', 'SpaceInvaders']
_atariexpl7 = ['Freeway', 'Gravitar', 'MontezumaRevenge', 'Pitfall', 'PrivateEye', 'Solaris', 'Venture']
_BENCHMARKS = []
remove_version_re = re.compile(r'-v\d+$')
def register_benchmark(benchmark):
for b in _BENCHMARKS:
if b['name'] == benchmark['name']:
raise ValueError('Benchmark with name %s already registered!' % b['name'])
# automatically add a description if it is not present
if 'tasks' in benchmark:
for t in benchmark['tasks']:
if 'desc' not in t:
t['desc'] = remove_version_re.sub('', t.get('env_id', t.get('id')))
_BENCHMARKS.append(benchmark)
def list_benchmarks():
return [b['name'] for b in _BENCHMARKS]
def get_benchmark(benchmark_name):
for b in _BENCHMARKS:
if b['name'] == benchmark_name:
return b
raise ValueError('%s not found! Known benchmarks: %s' % (benchmark_name, list_benchmarks()))
def get_task(benchmark, env_id):
"""Get a task by env_id. Return None if the benchmark doesn't have the env"""
return next(filter(lambda task: task['env_id'] == env_id, benchmark['tasks']), None)
def find_task_for_env_id_in_any_benchmark(env_id):
for bm in _BENCHMARKS:
for task in bm["tasks"]:
if task["env_id"] == env_id:
return bm, task
return None, None
_ATARI_SUFFIX = 'NoFrameskip-v4'
register_benchmark({
'name': 'Atari50M',
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 50M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(50e6)} for _game in _atari7]
})
register_benchmark({
'name': 'Atari10M',
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 10M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 6, 'num_timesteps': int(10e6)} for _game in _atari7]
})
register_benchmark({
'name': 'Atari1Hr',
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 1 hour of walltime',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_seconds': 60 * 60} for _game in _atari7]
})
register_benchmark({
'name': 'AtariExploration10M',
'description': '7 Atari games emphasizing exploration, with pixel observations, 10M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(10e6)} for _game in _atariexpl7]
})
# MuJoCo
_mujocosmall = [
'InvertedDoublePendulum-v2', 'InvertedPendulum-v2',
'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2',
'Reacher-v2', 'Swimmer-v2']
register_benchmark({
'name': 'Mujoco1M',
'description': 'Some small 2D MuJoCo tasks, run for 1M timesteps',
'tasks': [{'env_id': _envid, 'trials': 6, 'num_timesteps': int(1e6)} for _envid in _mujocosmall]
})
register_benchmark({
'name': 'MujocoWalkers',
'description': 'MuJoCo forward walkers, run for 8M, humanoid 100M',
'tasks': [
{'env_id': "Hopper-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "Walker2d-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "Humanoid-v1", 'trials': 4, 'num_timesteps': 100 * 1000000},
]
})
# Bullet
_bulletsmall = [
'InvertedDoublePendulum', 'InvertedPendulum', 'HalfCheetah', 'Reacher', 'Walker2D', 'Hopper', 'Ant'
]
_bulletsmall = [e + 'BulletEnv-v0' for e in _bulletsmall]
register_benchmark({
'name': 'Bullet1M',
'description': '6 mujoco-like tasks from bullet, 1M steps',
'tasks': [{'env_id': e, 'trials': 6, 'num_timesteps': int(1e6)} for e in _bulletsmall]
})
# Roboschool
register_benchmark({
'name': 'Roboschool8M',
'description': 'Small 2D tasks, up to 30 minutes to complete on 8 cores',
'tasks': [
{'env_id': "RoboschoolReacher-v1", 'trials': 4, 'num_timesteps': 2 * 1000000},
{'env_id': "RoboschoolAnt-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "RoboschoolHalfCheetah-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "RoboschoolHopper-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "RoboschoolWalker2d-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
]
})
register_benchmark({
'name': 'RoboschoolHarder',
'description': 'Test your might!!! Up to 12 hours on 32 cores',
'tasks': [
{'env_id': "RoboschoolHumanoid-v1", 'trials': 4, 'num_timesteps': 100 * 1000000},
{'env_id': "RoboschoolHumanoidFlagrun-v1", 'trials': 4, 'num_timesteps': 200 * 1000000},
{'env_id': "RoboschoolHumanoidFlagrunHarder-v1", 'trials': 4, 'num_timesteps': 400 * 1000000},
]
})
# Other
_atari50 = [ # actually 47
'Alien', 'Amidar', 'Assault', 'Asterix', 'Asteroids',
'Atlantis', 'BankHeist', 'BattleZone', 'BeamRider', 'Bowling',
'Breakout', 'Centipede', 'ChopperCommand', 'CrazyClimber',
'DemonAttack', 'DoubleDunk', 'Enduro', 'FishingDerby', 'Freeway',
'Frostbite', 'Gopher', 'Gravitar', 'IceHockey', 'Jamesbond',
'Kangaroo', 'Krull', 'KungFuMaster', 'MontezumaRevenge', 'MsPacman',
'NameThisGame', 'Pitfall', 'Pong', 'PrivateEye', 'Qbert',
'RoadRunner', 'Robotank', 'Seaquest', 'SpaceInvaders', 'StarGunner',
'Tennis', 'TimePilot', 'Tutankham', 'UpNDown', 'Venture',
'VideoPinball', 'WizardOfWor', 'Zaxxon',
]
register_benchmark({
'name': 'Atari50_10M',
'description': '47 Atari games from Mnih et al. (2013), with pixel observations, 10M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(10e6)} for _game in _atari50]
})
# HER DDPG
_fetch_tasks = ['FetchReach-v1', 'FetchPush-v1', 'FetchSlide-v1']
register_benchmark({
'name': 'Fetch1M',
'description': 'Fetch* benchmarks for 1M timesteps',
'tasks': [{'trials': 6, 'env_id': env_id, 'num_timesteps': int(1e6)} for env_id in _fetch_tasks]
})
| 6,102 | 35.987879 | 129 |
py
|
baselines
|
baselines-master/baselines/bench/monitor.py
|
__all__ = ['Monitor', 'get_monitor_files', 'load_results']
from gym.core import Wrapper
import time
from glob import glob
import csv
import os.path as osp
import json
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename:
self.results_writer = ResultsWriter(filename,
header={"t_start": time.time(), 'env_id' : env.spec and env.spec.id},
extra_keys=reset_keywords + info_keywords
)
else:
self.results_writer = None
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
self.reset_state()
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset'%k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def reset_state(self):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.update(ob, rew, done, info)
return (ob, rew, done, info)
def update(self, ob, rew, done, info):
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.results_writer:
self.results_writer.write_row(epinfo)
assert isinstance(info, dict)
if isinstance(info, dict):
info['episode'] = epinfo
self.total_steps += 1
def close(self):
super(Monitor, self).close()
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class LoadMonitorResultsError(Exception):
pass
class ResultsWriter(object):
def __init__(self, filename, header='', extra_keys=()):
self.extra_keys = extra_keys
assert filename is not None
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
if isinstance(header, dict):
header = '# {} \n'.format(json.dumps(header))
self.f.write(header)
self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+tuple(extra_keys))
self.logger.writeheader()
self.f.flush()
def write_row(self, epinfo):
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
def get_monitor_files(dir):
return glob(osp.join(dir, "*" + Monitor.EXT))
def load_results(dir):
import pandas
monitor_files = (
glob(osp.join(dir, "*monitor.json")) +
glob(osp.join(dir, "*monitor.csv"))) # get both csv and (old) json files
if not monitor_files:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, dir))
dfs = []
headers = []
for fname in monitor_files:
with open(fname, 'rt') as fh:
if fname.endswith('csv'):
firstline = fh.readline()
if not firstline:
continue
assert firstline[0] == '#'
header = json.loads(firstline[1:])
df = pandas.read_csv(fh, index_col=None)
headers.append(header)
elif fname.endswith('json'): # Deprecated json format
episodes = []
lines = fh.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
df = pandas.DataFrame(episodes)
else:
assert 0, 'unreachable'
df['t'] += header['t_start']
dfs.append(df)
df = pandas.concat(dfs)
df.sort_values('t', inplace=True)
df.reset_index(inplace=True)
df['t'] -= min(header['t_start'] for header in headers)
df.headers = headers # HACK to preserve backwards compatibility
return df
| 5,741 | 34.012195 | 174 |
py
|
baselines
|
baselines-master/baselines/bench/__init__.py
|
# flake8: noqa F403
from baselines.bench.benchmarks import *
from baselines.bench.monitor import *
| 99 | 24 | 40 |
py
|
baselines
|
baselines-master/baselines/her/ddpg.py
|
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from tensorflow.contrib.staging import StagingArea
from baselines import logger
from baselines.her.util import (
import_function, store_args, flatten_grads, transitions_in_episode_batch, convert_episode_to_batch_major)
from baselines.her.normalizer import Normalizer
from baselines.her.replay_buffer import ReplayBuffer
from baselines.common.mpi_adam import MpiAdam
from baselines.common import tf_util
def dims_to_shapes(input_dims):
return {key: tuple([val]) if val > 0 else tuple() for key, val in input_dims.items()}
global DEMO_BUFFER #buffer for demonstrations
class DDPG(object):
@store_args
def __init__(self, input_dims, buffer_size, hidden, layers, network_class, polyak, batch_size,
Q_lr, pi_lr, norm_eps, norm_clip, max_u, action_l2, clip_obs, scope, T,
rollout_batch_size, subtract_goals, relative_goals, clip_pos_returns, clip_return,
bc_loss, q_filter, num_demo, demo_batch_size, prm_loss_weight, aux_loss_weight,
sample_transitions, gamma, reuse=False, **kwargs):
"""Implementation of DDPG that is used in combination with Hindsight Experience Replay (HER).
Added functionality to use demonstrations for training to Overcome exploration problem.
Args:
input_dims (dict of ints): dimensions for the observation (o), the goal (g), and the
actions (u)
buffer_size (int): number of transitions that are stored in the replay buffer
hidden (int): number of units in the hidden layers
layers (int): number of hidden layers
network_class (str): the network class that should be used (e.g. 'baselines.her.ActorCritic')
polyak (float): coefficient for Polyak-averaging of the target network
batch_size (int): batch size for training
Q_lr (float): learning rate for the Q (critic) network
pi_lr (float): learning rate for the pi (actor) network
norm_eps (float): a small value used in the normalizer to avoid numerical instabilities
norm_clip (float): normalized inputs are clipped to be in [-norm_clip, norm_clip]
max_u (float): maximum action magnitude, i.e. actions are in [-max_u, max_u]
action_l2 (float): coefficient for L2 penalty on the actions
clip_obs (float): clip observations before normalization to be in [-clip_obs, clip_obs]
scope (str): the scope used for the TensorFlow graph
T (int): the time horizon for rollouts
rollout_batch_size (int): number of parallel rollouts per DDPG agent
subtract_goals (function): function that subtracts goals from each other
relative_goals (boolean): whether or not relative goals should be fed into the network
clip_pos_returns (boolean): whether or not positive returns should be clipped
clip_return (float): clip returns to be in [-clip_return, clip_return]
sample_transitions (function) function that samples from the replay buffer
gamma (float): gamma used for Q learning updates
reuse (boolean): whether or not the networks should be reused
bc_loss: whether or not the behavior cloning loss should be used as an auxilliary loss
q_filter: whether or not a filter on the q value update should be used when training with demonstartions
num_demo: Number of episodes in to be used in the demonstration buffer
demo_batch_size: number of samples to be used from the demonstrations buffer, per mpi thread
prm_loss_weight: Weight corresponding to the primary loss
aux_loss_weight: Weight corresponding to the auxilliary loss also called the cloning loss
"""
if self.clip_return is None:
self.clip_return = np.inf
self.create_actor_critic = import_function(self.network_class)
input_shapes = dims_to_shapes(self.input_dims)
self.dimo = self.input_dims['o']
self.dimg = self.input_dims['g']
self.dimu = self.input_dims['u']
# Prepare staging area for feeding data to the model.
stage_shapes = OrderedDict()
for key in sorted(self.input_dims.keys()):
if key.startswith('info_'):
continue
stage_shapes[key] = (None, *input_shapes[key])
for key in ['o', 'g']:
stage_shapes[key + '_2'] = stage_shapes[key]
stage_shapes['r'] = (None,)
self.stage_shapes = stage_shapes
# Create network.
with tf.variable_scope(self.scope):
self.staging_tf = StagingArea(
dtypes=[tf.float32 for _ in self.stage_shapes.keys()],
shapes=list(self.stage_shapes.values()))
self.buffer_ph_tf = [
tf.placeholder(tf.float32, shape=shape) for shape in self.stage_shapes.values()]
self.stage_op = self.staging_tf.put(self.buffer_ph_tf)
self._create_network(reuse=reuse)
# Configure the replay buffer.
buffer_shapes = {key: (self.T-1 if key != 'o' else self.T, *input_shapes[key])
for key, val in input_shapes.items()}
buffer_shapes['g'] = (buffer_shapes['g'][0], self.dimg)
buffer_shapes['ag'] = (self.T, self.dimg)
buffer_size = (self.buffer_size // self.rollout_batch_size) * self.rollout_batch_size
self.buffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions)
global DEMO_BUFFER
DEMO_BUFFER = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions) #initialize the demo buffer; in the same way as the primary data buffer
def _random_action(self, n):
return np.random.uniform(low=-self.max_u, high=self.max_u, size=(n, self.dimu))
def _preprocess_og(self, o, ag, g):
if self.relative_goals:
g_shape = g.shape
g = g.reshape(-1, self.dimg)
ag = ag.reshape(-1, self.dimg)
g = self.subtract_goals(g, ag)
g = g.reshape(*g_shape)
o = np.clip(o, -self.clip_obs, self.clip_obs)
g = np.clip(g, -self.clip_obs, self.clip_obs)
return o, g
def step(self, obs):
actions = self.get_actions(obs['observation'], obs['achieved_goal'], obs['desired_goal'])
return actions, None, None, None
def get_actions(self, o, ag, g, noise_eps=0., random_eps=0., use_target_net=False,
compute_Q=False):
o, g = self._preprocess_og(o, ag, g)
policy = self.target if use_target_net else self.main
# values to compute
vals = [policy.pi_tf]
if compute_Q:
vals += [policy.Q_pi_tf]
# feed
feed = {
policy.o_tf: o.reshape(-1, self.dimo),
policy.g_tf: g.reshape(-1, self.dimg),
policy.u_tf: np.zeros((o.size // self.dimo, self.dimu), dtype=np.float32)
}
ret = self.sess.run(vals, feed_dict=feed)
# action postprocessing
u = ret[0]
noise = noise_eps * self.max_u * np.random.randn(*u.shape) # gaussian noise
u += noise
u = np.clip(u, -self.max_u, self.max_u)
u += np.random.binomial(1, random_eps, u.shape[0]).reshape(-1, 1) * (self._random_action(u.shape[0]) - u) # eps-greedy
if u.shape[0] == 1:
u = u[0]
u = u.copy()
ret[0] = u
if len(ret) == 1:
return ret[0]
else:
return ret
def init_demo_buffer(self, demoDataFile, update_stats=True): #function that initializes the demo buffer
demoData = np.load(demoDataFile) #load the demonstration data from data file
info_keys = [key.replace('info_', '') for key in self.input_dims.keys() if key.startswith('info_')]
info_values = [np.empty((self.T - 1, 1, self.input_dims['info_' + key]), np.float32) for key in info_keys]
demo_data_obs = demoData['obs']
demo_data_acs = demoData['acs']
demo_data_info = demoData['info']
for epsd in range(self.num_demo): # we initialize the whole demo buffer at the start of the training
obs, acts, goals, achieved_goals = [], [] ,[] ,[]
i = 0
for transition in range(self.T - 1):
obs.append([demo_data_obs[epsd][transition].get('observation')])
acts.append([demo_data_acs[epsd][transition]])
goals.append([demo_data_obs[epsd][transition].get('desired_goal')])
achieved_goals.append([demo_data_obs[epsd][transition].get('achieved_goal')])
for idx, key in enumerate(info_keys):
info_values[idx][transition, i] = demo_data_info[epsd][transition][key]
obs.append([demo_data_obs[epsd][self.T - 1].get('observation')])
achieved_goals.append([demo_data_obs[epsd][self.T - 1].get('achieved_goal')])
episode = dict(o=obs,
u=acts,
g=goals,
ag=achieved_goals)
for key, value in zip(info_keys, info_values):
episode['info_{}'.format(key)] = value
episode = convert_episode_to_batch_major(episode)
global DEMO_BUFFER
DEMO_BUFFER.store_episode(episode) # create the observation dict and append them into the demonstration buffer
logger.debug("Demo buffer size currently ", DEMO_BUFFER.get_current_size()) #print out the demonstration buffer size
if update_stats:
# add transitions to normalizer to normalize the demo data as well
episode['o_2'] = episode['o'][:, 1:, :]
episode['ag_2'] = episode['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode)
transitions = self.sample_transitions(episode, num_normalizing_transitions)
o, g, ag = transitions['o'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
# No need to preprocess the o_2 and g_2 since this is only used for stats
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
episode.clear()
logger.info("Demo buffer size: ", DEMO_BUFFER.get_current_size()) #print out the demonstration buffer size
def store_episode(self, episode_batch, update_stats=True):
"""
episode_batch: array of batch_size x (T or T+1) x dim_key
'o' is of size T+1, others are of size T
"""
self.buffer.store_episode(episode_batch)
if update_stats:
# add transitions to normalizer
episode_batch['o_2'] = episode_batch['o'][:, 1:, :]
episode_batch['ag_2'] = episode_batch['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode_batch)
transitions = self.sample_transitions(episode_batch, num_normalizing_transitions)
o, g, ag = transitions['o'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
# No need to preprocess the o_2 and g_2 since this is only used for stats
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
def get_current_buffer_size(self):
return self.buffer.get_current_size()
def _sync_optimizers(self):
self.Q_adam.sync()
self.pi_adam.sync()
def _grads(self):
# Avoid feed_dict here for performance!
critic_loss, actor_loss, Q_grad, pi_grad = self.sess.run([
self.Q_loss_tf,
self.main.Q_pi_tf,
self.Q_grad_tf,
self.pi_grad_tf
])
return critic_loss, actor_loss, Q_grad, pi_grad
def _update(self, Q_grad, pi_grad):
self.Q_adam.update(Q_grad, self.Q_lr)
self.pi_adam.update(pi_grad, self.pi_lr)
def sample_batch(self):
if self.bc_loss: #use demonstration buffer to sample as well if bc_loss flag is set TRUE
transitions = self.buffer.sample(self.batch_size - self.demo_batch_size)
global DEMO_BUFFER
transitions_demo = DEMO_BUFFER.sample(self.demo_batch_size) #sample from the demo buffer
for k, values in transitions_demo.items():
rolloutV = transitions[k].tolist()
for v in values:
rolloutV.append(v.tolist())
transitions[k] = np.array(rolloutV)
else:
transitions = self.buffer.sample(self.batch_size) #otherwise only sample from primary buffer
o, o_2, g = transitions['o'], transitions['o_2'], transitions['g']
ag, ag_2 = transitions['ag'], transitions['ag_2']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
transitions['o_2'], transitions['g_2'] = self._preprocess_og(o_2, ag_2, g)
transitions_batch = [transitions[key] for key in self.stage_shapes.keys()]
return transitions_batch
def stage_batch(self, batch=None):
if batch is None:
batch = self.sample_batch()
assert len(self.buffer_ph_tf) == len(batch)
self.sess.run(self.stage_op, feed_dict=dict(zip(self.buffer_ph_tf, batch)))
def train(self, stage=True):
if stage:
self.stage_batch()
critic_loss, actor_loss, Q_grad, pi_grad = self._grads()
self._update(Q_grad, pi_grad)
return critic_loss, actor_loss
def _init_target_net(self):
self.sess.run(self.init_target_net_op)
def update_target_net(self):
self.sess.run(self.update_target_net_op)
def clear_buffer(self):
self.buffer.clear_buffer()
def _vars(self, scope):
res = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope + '/' + scope)
assert len(res) > 0
return res
def _global_vars(self, scope):
res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope + '/' + scope)
return res
def _create_network(self, reuse=False):
logger.info("Creating a DDPG agent with action space %d x %s..." % (self.dimu, self.max_u))
self.sess = tf_util.get_session()
# running averages
with tf.variable_scope('o_stats') as vs:
if reuse:
vs.reuse_variables()
self.o_stats = Normalizer(self.dimo, self.norm_eps, self.norm_clip, sess=self.sess)
with tf.variable_scope('g_stats') as vs:
if reuse:
vs.reuse_variables()
self.g_stats = Normalizer(self.dimg, self.norm_eps, self.norm_clip, sess=self.sess)
# mini-batch sampling.
batch = self.staging_tf.get()
batch_tf = OrderedDict([(key, batch[i])
for i, key in enumerate(self.stage_shapes.keys())])
batch_tf['r'] = tf.reshape(batch_tf['r'], [-1, 1])
#choose only the demo buffer samples
mask = np.concatenate((np.zeros(self.batch_size - self.demo_batch_size), np.ones(self.demo_batch_size)), axis = 0)
# networks
with tf.variable_scope('main') as vs:
if reuse:
vs.reuse_variables()
self.main = self.create_actor_critic(batch_tf, net_type='main', **self.__dict__)
vs.reuse_variables()
with tf.variable_scope('target') as vs:
if reuse:
vs.reuse_variables()
target_batch_tf = batch_tf.copy()
target_batch_tf['o'] = batch_tf['o_2']
target_batch_tf['g'] = batch_tf['g_2']
self.target = self.create_actor_critic(
target_batch_tf, net_type='target', **self.__dict__)
vs.reuse_variables()
assert len(self._vars("main")) == len(self._vars("target"))
# loss functions
target_Q_pi_tf = self.target.Q_pi_tf
clip_range = (-self.clip_return, 0. if self.clip_pos_returns else np.inf)
target_tf = tf.clip_by_value(batch_tf['r'] + self.gamma * target_Q_pi_tf, *clip_range)
self.Q_loss_tf = tf.reduce_mean(tf.square(tf.stop_gradient(target_tf) - self.main.Q_tf))
if self.bc_loss ==1 and self.q_filter == 1 : # train with demonstrations and use bc_loss and q_filter both
maskMain = tf.reshape(tf.boolean_mask(self.main.Q_tf > self.main.Q_pi_tf, mask), [-1]) #where is the demonstrator action better than actor action according to the critic? choose those samples only
#define the cloning loss on the actor's actions only on the samples which adhere to the above masks
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask(tf.boolean_mask((self.main.pi_tf), mask), maskMain, axis=0) - tf.boolean_mask(tf.boolean_mask((batch_tf['u']), mask), maskMain, axis=0)))
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf) #primary loss scaled by it's respective weight prm_loss_weight
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u)) #L2 loss on action values scaled by the same weight prm_loss_weight
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf #adding the cloning loss to the actor loss as an auxilliary loss scaled by its weight aux_loss_weight
elif self.bc_loss == 1 and self.q_filter == 0: # train with demonstrations without q_filter
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask((self.main.pi_tf), mask) - tf.boolean_mask((batch_tf['u']), mask)))
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf
else: #If not training with demonstrations
self.pi_loss_tf = -tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
Q_grads_tf = tf.gradients(self.Q_loss_tf, self._vars('main/Q'))
pi_grads_tf = tf.gradients(self.pi_loss_tf, self._vars('main/pi'))
assert len(self._vars('main/Q')) == len(Q_grads_tf)
assert len(self._vars('main/pi')) == len(pi_grads_tf)
self.Q_grads_vars_tf = zip(Q_grads_tf, self._vars('main/Q'))
self.pi_grads_vars_tf = zip(pi_grads_tf, self._vars('main/pi'))
self.Q_grad_tf = flatten_grads(grads=Q_grads_tf, var_list=self._vars('main/Q'))
self.pi_grad_tf = flatten_grads(grads=pi_grads_tf, var_list=self._vars('main/pi'))
# optimizers
self.Q_adam = MpiAdam(self._vars('main/Q'), scale_grad_by_procs=False)
self.pi_adam = MpiAdam(self._vars('main/pi'), scale_grad_by_procs=False)
# polyak averaging
self.main_vars = self._vars('main/Q') + self._vars('main/pi')
self.target_vars = self._vars('target/Q') + self._vars('target/pi')
self.stats_vars = self._global_vars('o_stats') + self._global_vars('g_stats')
self.init_target_net_op = list(
map(lambda v: v[0].assign(v[1]), zip(self.target_vars, self.main_vars)))
self.update_target_net_op = list(
map(lambda v: v[0].assign(self.polyak * v[0] + (1. - self.polyak) * v[1]), zip(self.target_vars, self.main_vars)))
# initialize all variables
tf.variables_initializer(self._global_vars('')).run()
self._sync_optimizers()
self._init_target_net()
def logs(self, prefix=''):
logs = []
logs += [('stats_o/mean', np.mean(self.sess.run([self.o_stats.mean])))]
logs += [('stats_o/std', np.mean(self.sess.run([self.o_stats.std])))]
logs += [('stats_g/mean', np.mean(self.sess.run([self.g_stats.mean])))]
logs += [('stats_g/std', np.mean(self.sess.run([self.g_stats.std])))]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs
def __getstate__(self):
"""Our policies can be loaded from pkl, but after unpickling you cannot continue training.
"""
excluded_subnames = ['_tf', '_op', '_vars', '_adam', 'buffer', 'sess', '_stats',
'main', 'target', 'lock', 'env', 'sample_transitions',
'stage_shapes', 'create_actor_critic']
state = {k: v for k, v in self.__dict__.items() if all([not subname in k for subname in excluded_subnames])}
state['buffer_size'] = self.buffer_size
state['tf'] = self.sess.run([x for x in self._global_vars('') if 'buffer' not in x.name])
return state
def __setstate__(self, state):
if 'sample_transitions' not in state:
# We don't need this for playing the policy.
state['sample_transitions'] = None
self.__init__(**state)
# set up stats (they are overwritten in __init__)
for k, v in state.items():
if k[-6:] == '_stats':
self.__dict__[k] = v
# load TF variables
vars = [x for x in self._global_vars('') if 'buffer' not in x.name]
assert(len(vars) == len(state["tf"]))
node = [tf.assign(var, val) for var, val in zip(vars, state["tf"])]
self.sess.run(node)
def save(self, save_path):
tf_util.save_variables(save_path)
| 21,980 | 47.955457 | 212 |
py
|
baselines
|
baselines-master/baselines/her/normalizer.py
|
import threading
import numpy as np
from mpi4py import MPI
import tensorflow as tf
from baselines.her.util import reshape_for_broadcasting
class Normalizer:
def __init__(self, size, eps=1e-2, default_clip_range=np.inf, sess=None):
"""A normalizer that ensures that observations are approximately distributed according to
a standard Normal distribution (i.e. have mean zero and variance one).
Args:
size (int): the size of the observation to be normalized
eps (float): a small constant that avoids underflows
default_clip_range (float): normalized observations are clipped to be in
[-default_clip_range, default_clip_range]
sess (object): the TensorFlow session to be used
"""
self.size = size
self.eps = eps
self.default_clip_range = default_clip_range
self.sess = sess if sess is not None else tf.get_default_session()
self.local_sum = np.zeros(self.size, np.float32)
self.local_sumsq = np.zeros(self.size, np.float32)
self.local_count = np.zeros(1, np.float32)
self.sum_tf = tf.get_variable(
initializer=tf.zeros_initializer(), shape=self.local_sum.shape, name='sum',
trainable=False, dtype=tf.float32)
self.sumsq_tf = tf.get_variable(
initializer=tf.zeros_initializer(), shape=self.local_sumsq.shape, name='sumsq',
trainable=False, dtype=tf.float32)
self.count_tf = tf.get_variable(
initializer=tf.ones_initializer(), shape=self.local_count.shape, name='count',
trainable=False, dtype=tf.float32)
self.mean = tf.get_variable(
initializer=tf.zeros_initializer(), shape=(self.size,), name='mean',
trainable=False, dtype=tf.float32)
self.std = tf.get_variable(
initializer=tf.ones_initializer(), shape=(self.size,), name='std',
trainable=False, dtype=tf.float32)
self.count_pl = tf.placeholder(name='count_pl', shape=(1,), dtype=tf.float32)
self.sum_pl = tf.placeholder(name='sum_pl', shape=(self.size,), dtype=tf.float32)
self.sumsq_pl = tf.placeholder(name='sumsq_pl', shape=(self.size,), dtype=tf.float32)
self.update_op = tf.group(
self.count_tf.assign_add(self.count_pl),
self.sum_tf.assign_add(self.sum_pl),
self.sumsq_tf.assign_add(self.sumsq_pl)
)
self.recompute_op = tf.group(
tf.assign(self.mean, self.sum_tf / self.count_tf),
tf.assign(self.std, tf.sqrt(tf.maximum(
tf.square(self.eps),
self.sumsq_tf / self.count_tf - tf.square(self.sum_tf / self.count_tf)
))),
)
self.lock = threading.Lock()
def update(self, v):
v = v.reshape(-1, self.size)
with self.lock:
self.local_sum += v.sum(axis=0)
self.local_sumsq += (np.square(v)).sum(axis=0)
self.local_count[0] += v.shape[0]
def normalize(self, v, clip_range=None):
if clip_range is None:
clip_range = self.default_clip_range
mean = reshape_for_broadcasting(self.mean, v)
std = reshape_for_broadcasting(self.std, v)
return tf.clip_by_value((v - mean) / std, -clip_range, clip_range)
def denormalize(self, v):
mean = reshape_for_broadcasting(self.mean, v)
std = reshape_for_broadcasting(self.std, v)
return mean + v * std
def _mpi_average(self, x):
buf = np.zeros_like(x)
MPI.COMM_WORLD.Allreduce(x, buf, op=MPI.SUM)
buf /= MPI.COMM_WORLD.Get_size()
return buf
def synchronize(self, local_sum, local_sumsq, local_count, root=None):
local_sum[...] = self._mpi_average(local_sum)
local_sumsq[...] = self._mpi_average(local_sumsq)
local_count[...] = self._mpi_average(local_count)
return local_sum, local_sumsq, local_count
def recompute_stats(self):
with self.lock:
# Copy over results.
local_count = self.local_count.copy()
local_sum = self.local_sum.copy()
local_sumsq = self.local_sumsq.copy()
# Reset.
self.local_count[...] = 0
self.local_sum[...] = 0
self.local_sumsq[...] = 0
# We perform the synchronization outside of the lock to keep the critical section as short
# as possible.
synced_sum, synced_sumsq, synced_count = self.synchronize(
local_sum=local_sum, local_sumsq=local_sumsq, local_count=local_count)
self.sess.run(self.update_op, feed_dict={
self.count_pl: synced_count,
self.sum_pl: synced_sum,
self.sumsq_pl: synced_sumsq,
})
self.sess.run(self.recompute_op)
class IdentityNormalizer:
def __init__(self, size, std=1.):
self.size = size
self.mean = tf.zeros(self.size, tf.float32)
self.std = std * tf.ones(self.size, tf.float32)
def update(self, x):
pass
def normalize(self, x, clip_range=None):
return x / self.std
def denormalize(self, x):
return self.std * x
def synchronize(self):
pass
def recompute_stats(self):
pass
| 5,304 | 36.624113 | 98 |
py
|
baselines
|
baselines-master/baselines/her/actor_critic.py
|
import tensorflow as tf
from baselines.her.util import store_args, nn
class ActorCritic:
@store_args
def __init__(self, inputs_tf, dimo, dimg, dimu, max_u, o_stats, g_stats, hidden, layers,
**kwargs):
"""The actor-critic network and related training code.
Args:
inputs_tf (dict of tensors): all necessary inputs for the network: the
observation (o), the goal (g), and the action (u)
dimo (int): the dimension of the observations
dimg (int): the dimension of the goals
dimu (int): the dimension of the actions
max_u (float): the maximum magnitude of actions; action outputs will be scaled
accordingly
o_stats (baselines.her.Normalizer): normalizer for observations
g_stats (baselines.her.Normalizer): normalizer for goals
hidden (int): number of hidden units that should be used in hidden layers
layers (int): number of hidden layers
"""
self.o_tf = inputs_tf['o']
self.g_tf = inputs_tf['g']
self.u_tf = inputs_tf['u']
# Prepare inputs for actor and critic.
o = self.o_stats.normalize(self.o_tf)
g = self.g_stats.normalize(self.g_tf)
input_pi = tf.concat(axis=1, values=[o, g]) # for actor
# Networks.
with tf.variable_scope('pi'):
self.pi_tf = self.max_u * tf.tanh(nn(
input_pi, [self.hidden] * self.layers + [self.dimu]))
with tf.variable_scope('Q'):
# for policy training
input_Q = tf.concat(axis=1, values=[o, g, self.pi_tf / self.max_u])
self.Q_pi_tf = nn(input_Q, [self.hidden] * self.layers + [1])
# for critic training
input_Q = tf.concat(axis=1, values=[o, g, self.u_tf / self.max_u])
self._input_Q = input_Q # exposed for tests
self.Q_tf = nn(input_Q, [self.hidden] * self.layers + [1], reuse=True)
| 1,996 | 43.377778 | 92 |
py
|
baselines
|
baselines-master/baselines/her/her.py
|
import os
import click
import numpy as np
import json
from mpi4py import MPI
from baselines import logger
from baselines.common import set_global_seeds, tf_util
from baselines.common.mpi_moments import mpi_moments
import baselines.her.experiment.config as config
from baselines.her.rollout import RolloutWorker
def mpi_average(value):
if not isinstance(value, list):
value = [value]
if not any(value):
value = [0.]
return mpi_moments(np.array(value))[0]
def train(*, policy, rollout_worker, evaluator,
n_epochs, n_test_rollouts, n_cycles, n_batches, policy_save_interval,
save_path, demo_file, **kwargs):
rank = MPI.COMM_WORLD.Get_rank()
if save_path:
latest_policy_path = os.path.join(save_path, 'policy_latest.pkl')
best_policy_path = os.path.join(save_path, 'policy_best.pkl')
periodic_policy_path = os.path.join(save_path, 'policy_{}.pkl')
logger.info("Training...")
best_success_rate = -1
if policy.bc_loss == 1: policy.init_demo_buffer(demo_file) #initialize demo buffer if training with demonstrations
# num_timesteps = n_epochs * n_cycles * rollout_length * number of rollout workers
for epoch in range(n_epochs):
# train
rollout_worker.clear_history()
for _ in range(n_cycles):
episode = rollout_worker.generate_rollouts()
policy.store_episode(episode)
for _ in range(n_batches):
policy.train()
policy.update_target_net()
# test
evaluator.clear_history()
for _ in range(n_test_rollouts):
evaluator.generate_rollouts()
# record logs
logger.record_tabular('epoch', epoch)
for key, val in evaluator.logs('test'):
logger.record_tabular(key, mpi_average(val))
for key, val in rollout_worker.logs('train'):
logger.record_tabular(key, mpi_average(val))
for key, val in policy.logs():
logger.record_tabular(key, mpi_average(val))
if rank == 0:
logger.dump_tabular()
# save the policy if it's better than the previous ones
success_rate = mpi_average(evaluator.current_success_rate())
if rank == 0 and success_rate >= best_success_rate and save_path:
best_success_rate = success_rate
logger.info('New best success rate: {}. Saving policy to {} ...'.format(best_success_rate, best_policy_path))
evaluator.save_policy(best_policy_path)
evaluator.save_policy(latest_policy_path)
if rank == 0 and policy_save_interval > 0 and epoch % policy_save_interval == 0 and save_path:
policy_path = periodic_policy_path.format(epoch)
logger.info('Saving periodic policy to {} ...'.format(policy_path))
evaluator.save_policy(policy_path)
# make sure that different threads have different seeds
local_uniform = np.random.uniform(size=(1,))
root_uniform = local_uniform.copy()
MPI.COMM_WORLD.Bcast(root_uniform, root=0)
if rank != 0:
assert local_uniform[0] != root_uniform[0]
return policy
def learn(*, network, env, total_timesteps,
seed=None,
eval_env=None,
replay_strategy='future',
policy_save_interval=5,
clip_return=True,
demo_file=None,
override_params=None,
load_path=None,
save_path=None,
**kwargs
):
override_params = override_params or {}
if MPI is not None:
rank = MPI.COMM_WORLD.Get_rank()
num_cpu = MPI.COMM_WORLD.Get_size()
# Seed everything.
rank_seed = seed + 1000000 * rank if seed is not None else None
set_global_seeds(rank_seed)
# Prepare params.
params = config.DEFAULT_PARAMS
env_name = env.spec.id
params['env_name'] = env_name
params['replay_strategy'] = replay_strategy
if env_name in config.DEFAULT_ENV_PARAMS:
params.update(config.DEFAULT_ENV_PARAMS[env_name]) # merge env-specific parameters in
params.update(**override_params) # makes it possible to override any parameter
with open(os.path.join(logger.get_dir(), 'params.json'), 'w') as f:
json.dump(params, f)
params = config.prepare_params(params)
params['rollout_batch_size'] = env.num_envs
if demo_file is not None:
params['bc_loss'] = 1
params.update(kwargs)
config.log_params(params, logger=logger)
if num_cpu == 1:
logger.warn()
logger.warn('*** Warning ***')
logger.warn(
'You are running HER with just a single MPI worker. This will work, but the ' +
'experiments that we report in Plappert et al. (2018, https://arxiv.org/abs/1802.09464) ' +
'were obtained with --num_cpu 19. This makes a significant difference and if you ' +
'are looking to reproduce those results, be aware of this. Please also refer to ' +
'https://github.com/openai/baselines/issues/314 for further details.')
logger.warn('****************')
logger.warn()
dims = config.configure_dims(params)
policy = config.configure_ddpg(dims=dims, params=params, clip_return=clip_return)
if load_path is not None:
tf_util.load_variables(load_path)
rollout_params = {
'exploit': False,
'use_target_net': False,
'use_demo_states': True,
'compute_Q': False,
'T': params['T'],
}
eval_params = {
'exploit': True,
'use_target_net': params['test_with_polyak'],
'use_demo_states': False,
'compute_Q': True,
'T': params['T'],
}
for name in ['T', 'rollout_batch_size', 'gamma', 'noise_eps', 'random_eps']:
rollout_params[name] = params[name]
eval_params[name] = params[name]
eval_env = eval_env or env
rollout_worker = RolloutWorker(env, policy, dims, logger, monitor=True, **rollout_params)
evaluator = RolloutWorker(eval_env, policy, dims, logger, **eval_params)
n_cycles = params['n_cycles']
n_epochs = total_timesteps // n_cycles // rollout_worker.T // rollout_worker.rollout_batch_size
return train(
save_path=save_path, policy=policy, rollout_worker=rollout_worker,
evaluator=evaluator, n_epochs=n_epochs, n_test_rollouts=params['n_test_rollouts'],
n_cycles=params['n_cycles'], n_batches=params['n_batches'],
policy_save_interval=policy_save_interval, demo_file=demo_file)
@click.command()
@click.option('--env', type=str, default='FetchReach-v1', help='the name of the OpenAI Gym environment that you want to train on')
@click.option('--total_timesteps', type=int, default=int(5e5), help='the number of timesteps to run')
@click.option('--seed', type=int, default=0, help='the random seed used to seed both the environment and the training code')
@click.option('--policy_save_interval', type=int, default=5, help='the interval with which policy pickles are saved. If set to 0, only the best and latest policy will be pickled.')
@click.option('--replay_strategy', type=click.Choice(['future', 'none']), default='future', help='the HER replay strategy to be used. "future" uses HER, "none" disables HER.')
@click.option('--clip_return', type=int, default=1, help='whether or not returns should be clipped')
@click.option('--demo_file', type=str, default = 'PATH/TO/DEMO/DATA/FILE.npz', help='demo data file path')
def main(**kwargs):
learn(**kwargs)
if __name__ == '__main__':
main()
| 7,498 | 37.654639 | 180 |
py
|
baselines
|
baselines-master/baselines/her/util.py
|
import os
import subprocess
import sys
import importlib
import inspect
import functools
import tensorflow as tf
import numpy as np
from baselines.common import tf_util as U
def store_args(method):
"""Stores provided method args as instance attributes.
"""
argspec = inspect.getfullargspec(method)
defaults = {}
if argspec.defaults is not None:
defaults = dict(
zip(argspec.args[-len(argspec.defaults):], argspec.defaults))
if argspec.kwonlydefaults is not None:
defaults.update(argspec.kwonlydefaults)
arg_names = argspec.args[1:]
@functools.wraps(method)
def wrapper(*positional_args, **keyword_args):
self = positional_args[0]
# Get default arg values
args = defaults.copy()
# Add provided arg values
for name, value in zip(arg_names, positional_args[1:]):
args[name] = value
args.update(keyword_args)
self.__dict__.update(args)
return method(*positional_args, **keyword_args)
return wrapper
def import_function(spec):
"""Import a function identified by a string like "pkg.module:fn_name".
"""
mod_name, fn_name = spec.split(':')
module = importlib.import_module(mod_name)
fn = getattr(module, fn_name)
return fn
def flatten_grads(var_list, grads):
"""Flattens a variables and their gradients.
"""
return tf.concat([tf.reshape(grad, [U.numel(v)])
for (v, grad) in zip(var_list, grads)], 0)
def nn(input, layers_sizes, reuse=None, flatten=False, name=""):
"""Creates a simple neural network
"""
for i, size in enumerate(layers_sizes):
activation = tf.nn.relu if i < len(layers_sizes) - 1 else None
input = tf.layers.dense(inputs=input,
units=size,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
reuse=reuse,
name=name + '_' + str(i))
if activation:
input = activation(input)
if flatten:
assert layers_sizes[-1] == 1
input = tf.reshape(input, [-1])
return input
def install_mpi_excepthook():
import sys
from mpi4py import MPI
old_hook = sys.excepthook
def new_hook(a, b, c):
old_hook(a, b, c)
sys.stdout.flush()
sys.stderr.flush()
MPI.COMM_WORLD.Abort()
sys.excepthook = new_hook
def mpi_fork(n, extra_mpi_args=[]):
"""Re-launches the current script with workers
Returns "parent" for original parent, "child" for MPI children
"""
if n <= 1:
return "child"
if os.getenv("IN_MPI") is None:
env = os.environ.copy()
env.update(
MKL_NUM_THREADS="1",
OMP_NUM_THREADS="1",
IN_MPI="1"
)
# "-bind-to core" is crucial for good performance
args = ["mpirun", "-np", str(n)] + \
extra_mpi_args + \
[sys.executable]
args += sys.argv
subprocess.check_call(args, env=env)
return "parent"
else:
install_mpi_excepthook()
return "child"
def convert_episode_to_batch_major(episode):
"""Converts an episode to have the batch dimension in the major (first)
dimension.
"""
episode_batch = {}
for key in episode.keys():
val = np.array(episode[key]).copy()
# make inputs batch-major instead of time-major
episode_batch[key] = val.swapaxes(0, 1)
return episode_batch
def transitions_in_episode_batch(episode_batch):
"""Number of transitions in a given episode batch.
"""
shape = episode_batch['u'].shape
return shape[0] * shape[1]
def reshape_for_broadcasting(source, target):
"""Reshapes a tensor (source) to have the correct shape and dtype of the target
before broadcasting it with MPI.
"""
dim = len(target.get_shape())
shape = ([1] * (dim - 1)) + [-1]
return tf.reshape(tf.cast(source, target.dtype), shape)
| 4,038 | 27.64539 | 90 |
py
|
baselines
|
baselines-master/baselines/her/__init__.py
| 0 | 0 | 0 |
py
|
|
baselines
|
baselines-master/baselines/her/replay_buffer.py
|
import threading
import numpy as np
class ReplayBuffer:
def __init__(self, buffer_shapes, size_in_transitions, T, sample_transitions):
"""Creates a replay buffer.
Args:
buffer_shapes (dict of ints): the shape for all buffers that are used in the replay
buffer
size_in_transitions (int): the size of the buffer, measured in transitions
T (int): the time horizon for episodes
sample_transitions (function): a function that samples from the replay buffer
"""
self.buffer_shapes = buffer_shapes
self.size = size_in_transitions // T
self.T = T
self.sample_transitions = sample_transitions
# self.buffers is {key: array(size_in_episodes x T or T+1 x dim_key)}
self.buffers = {key: np.empty([self.size, *shape])
for key, shape in buffer_shapes.items()}
# memory management
self.current_size = 0
self.n_transitions_stored = 0
self.lock = threading.Lock()
@property
def full(self):
with self.lock:
return self.current_size == self.size
def sample(self, batch_size):
"""Returns a dict {key: array(batch_size x shapes[key])}
"""
buffers = {}
with self.lock:
assert self.current_size > 0
for key in self.buffers.keys():
buffers[key] = self.buffers[key][:self.current_size]
buffers['o_2'] = buffers['o'][:, 1:, :]
buffers['ag_2'] = buffers['ag'][:, 1:, :]
transitions = self.sample_transitions(buffers, batch_size)
for key in (['r', 'o_2', 'ag_2'] + list(self.buffers.keys())):
assert key in transitions, "key %s missing from transitions" % key
return transitions
def store_episode(self, episode_batch):
"""episode_batch: array(batch_size x (T or T+1) x dim_key)
"""
batch_sizes = [len(episode_batch[key]) for key in episode_batch.keys()]
assert np.all(np.array(batch_sizes) == batch_sizes[0])
batch_size = batch_sizes[0]
with self.lock:
idxs = self._get_storage_idx(batch_size)
# load inputs into buffers
for key in self.buffers.keys():
self.buffers[key][idxs] = episode_batch[key]
self.n_transitions_stored += batch_size * self.T
def get_current_episode_size(self):
with self.lock:
return self.current_size
def get_current_size(self):
with self.lock:
return self.current_size * self.T
def get_transitions_stored(self):
with self.lock:
return self.n_transitions_stored
def clear_buffer(self):
with self.lock:
self.current_size = 0
def _get_storage_idx(self, inc=None):
inc = inc or 1 # size increment
assert inc <= self.size, "Batch committed to replay is too large!"
# go consecutively until you hit the end, and then go randomly.
if self.current_size+inc <= self.size:
idx = np.arange(self.current_size, self.current_size+inc)
elif self.current_size < self.size:
overflow = inc - (self.size - self.current_size)
idx_a = np.arange(self.current_size, self.size)
idx_b = np.random.randint(0, self.current_size, overflow)
idx = np.concatenate([idx_a, idx_b])
else:
idx = np.random.randint(0, self.size, inc)
# update replay size
self.current_size = min(self.size, self.current_size+inc)
if inc == 1:
idx = idx[0]
return idx
| 3,669 | 32.669725 | 95 |
py
|
baselines
|
baselines-master/baselines/her/rollout.py
|
from collections import deque
import numpy as np
import pickle
from baselines.her.util import convert_episode_to_batch_major, store_args
class RolloutWorker:
@store_args
def __init__(self, venv, policy, dims, logger, T, rollout_batch_size=1,
exploit=False, use_target_net=False, compute_Q=False, noise_eps=0,
random_eps=0, history_len=100, render=False, monitor=False, **kwargs):
"""Rollout worker generates experience by interacting with one or many environments.
Args:
venv: vectorized gym environments.
policy (object): the policy that is used to act
dims (dict of ints): the dimensions for observations (o), goals (g), and actions (u)
logger (object): the logger that is used by the rollout worker
rollout_batch_size (int): the number of parallel rollouts that should be used
exploit (boolean): whether or not to exploit, i.e. to act optimally according to the
current policy without any exploration
use_target_net (boolean): whether or not to use the target net for rollouts
compute_Q (boolean): whether or not to compute the Q values alongside the actions
noise_eps (float): scale of the additive Gaussian noise
random_eps (float): probability of selecting a completely random action
history_len (int): length of history for statistics smoothing
render (boolean): whether or not to render the rollouts
"""
assert self.T > 0
self.info_keys = [key.replace('info_', '') for key in dims.keys() if key.startswith('info_')]
self.success_history = deque(maxlen=history_len)
self.Q_history = deque(maxlen=history_len)
self.n_episodes = 0
self.reset_all_rollouts()
self.clear_history()
def reset_all_rollouts(self):
self.obs_dict = self.venv.reset()
self.initial_o = self.obs_dict['observation']
self.initial_ag = self.obs_dict['achieved_goal']
self.g = self.obs_dict['desired_goal']
def generate_rollouts(self):
"""Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current
policy acting on it accordingly.
"""
self.reset_all_rollouts()
# compute observations
o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations
ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals
o[:] = self.initial_o
ag[:] = self.initial_ag
# generate episodes
obs, achieved_goals, acts, goals, successes = [], [], [], [], []
dones = []
info_values = [np.empty((self.T - 1, self.rollout_batch_size, self.dims['info_' + key]), np.float32) for key in self.info_keys]
Qs = []
for t in range(self.T):
policy_output = self.policy.get_actions(
o, ag, self.g,
compute_Q=self.compute_Q,
noise_eps=self.noise_eps if not self.exploit else 0.,
random_eps=self.random_eps if not self.exploit else 0.,
use_target_net=self.use_target_net)
if self.compute_Q:
u, Q = policy_output
Qs.append(Q)
else:
u = policy_output
if u.ndim == 1:
# The non-batched case should still have a reasonable shape.
u = u.reshape(1, -1)
o_new = np.empty((self.rollout_batch_size, self.dims['o']))
ag_new = np.empty((self.rollout_batch_size, self.dims['g']))
success = np.zeros(self.rollout_batch_size)
# compute new states and observations
obs_dict_new, _, done, info = self.venv.step(u)
o_new = obs_dict_new['observation']
ag_new = obs_dict_new['achieved_goal']
success = np.array([i.get('is_success', 0.0) for i in info])
if any(done):
# here we assume all environments are done is ~same number of steps, so we terminate rollouts whenever any of the envs returns done
# trick with using vecenvs is not to add the obs from the environments that are "done", because those are already observations
# after a reset
break
for i, info_dict in enumerate(info):
for idx, key in enumerate(self.info_keys):
info_values[idx][t, i] = info[i][key]
if np.isnan(o_new).any():
self.logger.warn('NaN caught during rollout generation. Trying again...')
self.reset_all_rollouts()
return self.generate_rollouts()
dones.append(done)
obs.append(o.copy())
achieved_goals.append(ag.copy())
successes.append(success.copy())
acts.append(u.copy())
goals.append(self.g.copy())
o[...] = o_new
ag[...] = ag_new
obs.append(o.copy())
achieved_goals.append(ag.copy())
episode = dict(o=obs,
u=acts,
g=goals,
ag=achieved_goals)
for key, value in zip(self.info_keys, info_values):
episode['info_{}'.format(key)] = value
# stats
successful = np.array(successes)[-1, :]
assert successful.shape == (self.rollout_batch_size,)
success_rate = np.mean(successful)
self.success_history.append(success_rate)
if self.compute_Q:
self.Q_history.append(np.mean(Qs))
self.n_episodes += self.rollout_batch_size
return convert_episode_to_batch_major(episode)
def clear_history(self):
"""Clears all histories that are used for statistics
"""
self.success_history.clear()
self.Q_history.clear()
def current_success_rate(self):
return np.mean(self.success_history)
def current_mean_Q(self):
return np.mean(self.Q_history)
def save_policy(self, path):
"""Pickles the current policy for later inspection.
"""
with open(path, 'wb') as f:
pickle.dump(self.policy, f)
def logs(self, prefix='worker'):
"""Generates a dictionary that contains all collected statistics.
"""
logs = []
logs += [('success_rate', np.mean(self.success_history))]
if self.compute_Q:
logs += [('mean_Q', np.mean(self.Q_history))]
logs += [('episode', self.n_episodes)]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs
| 6,782 | 38.9 | 147 |
py
|
baselines
|
baselines-master/baselines/her/her_sampler.py
|
import numpy as np
def make_sample_her_transitions(replay_strategy, replay_k, reward_fun):
"""Creates a sample function that can be used for HER experience replay.
Args:
replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',
regular DDPG experience replay is used
replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times
as many HER replays as regular replays are used)
reward_fun (function): function to re-compute the reward with substituted goals
"""
if replay_strategy == 'future':
future_p = 1 - (1. / (1 + replay_k))
else: # 'replay_strategy' == 'none'
future_p = 0
def _sample_her_transitions(episode_batch, batch_size_in_transitions):
"""episode_batch is {key: array(buffer_size x T x dim_key)}
"""
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
# Select which episodes and time steps to use.
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
transitions = {key: episode_batch[key][episode_idxs, t_samples].copy()
for key in episode_batch.keys()}
# Select future time indexes proportional with probability future_p. These
# will be used for HER replay by substituting in future goals.
her_indexes = np.where(np.random.uniform(size=batch_size) < future_p)
future_offset = np.random.uniform(size=batch_size) * (T - t_samples)
future_offset = future_offset.astype(int)
future_t = (t_samples + 1 + future_offset)[her_indexes]
# Replace goal with achieved goal but only for the previously-selected
# HER transitions (as defined by her_indexes). For the other transitions,
# keep the original goal.
future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]
transitions['g'][her_indexes] = future_ag
# Reconstruct info dictionary for reward computation.
info = {}
for key, value in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
# Re-compute reward since we may have substituted the goal.
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
transitions['r'] = reward_fun(**reward_params)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:])
for k in transitions.keys()}
assert(transitions['u'].shape[0] == batch_size_in_transitions)
return transitions
return _sample_her_transitions
| 2,822 | 43.109375 | 96 |
py
|
baselines
|
baselines-master/baselines/her/experiment/play.py
|
# DEPRECATED, use --play flag to baselines.run instead
import click
import numpy as np
import pickle
from baselines import logger
from baselines.common import set_global_seeds
import baselines.her.experiment.config as config
from baselines.her.rollout import RolloutWorker
@click.command()
@click.argument('policy_file', type=str)
@click.option('--seed', type=int, default=0)
@click.option('--n_test_rollouts', type=int, default=10)
@click.option('--render', type=int, default=1)
def main(policy_file, seed, n_test_rollouts, render):
set_global_seeds(seed)
# Load policy.
with open(policy_file, 'rb') as f:
policy = pickle.load(f)
env_name = policy.info['env_name']
# Prepare params.
params = config.DEFAULT_PARAMS
if env_name in config.DEFAULT_ENV_PARAMS:
params.update(config.DEFAULT_ENV_PARAMS[env_name]) # merge env-specific parameters in
params['env_name'] = env_name
params = config.prepare_params(params)
config.log_params(params, logger=logger)
dims = config.configure_dims(params)
eval_params = {
'exploit': True,
'use_target_net': params['test_with_polyak'],
'compute_Q': True,
'rollout_batch_size': 1,
'render': bool(render),
}
for name in ['T', 'gamma', 'noise_eps', 'random_eps']:
eval_params[name] = params[name]
evaluator = RolloutWorker(params['make_env'], policy, dims, logger, **eval_params)
evaluator.seed(seed)
# Run evaluation.
evaluator.clear_history()
for _ in range(n_test_rollouts):
evaluator.generate_rollouts()
# record logs
for key, val in evaluator.logs('test'):
logger.record_tabular(key, np.mean(val))
logger.dump_tabular()
if __name__ == '__main__':
main()
| 1,775 | 27.645161 | 94 |
py
|
baselines
|
baselines-master/baselines/her/experiment/config.py
|
import os
import numpy as np
import gym
from baselines import logger
from baselines.her.ddpg import DDPG
from baselines.her.her_sampler import make_sample_her_transitions
from baselines.bench.monitor import Monitor
DEFAULT_ENV_PARAMS = {
'FetchReach-v1': {
'n_cycles': 10,
},
}
DEFAULT_PARAMS = {
# env
'max_u': 1., # max absolute value of actions on different coordinates
# ddpg
'layers': 3, # number of layers in the critic/actor networks
'hidden': 256, # number of neurons in each hidden layers
'network_class': 'baselines.her.actor_critic:ActorCritic',
'Q_lr': 0.001, # critic learning rate
'pi_lr': 0.001, # actor learning rate
'buffer_size': int(1E6), # for experience replay
'polyak': 0.95, # polyak averaging coefficient
'action_l2': 1.0, # quadratic penalty on actions (before rescaling by max_u)
'clip_obs': 200.,
'scope': 'ddpg', # can be tweaked for testing
'relative_goals': False,
# training
'n_cycles': 50, # per epoch
'rollout_batch_size': 2, # per mpi thread
'n_batches': 40, # training batches per cycle
'batch_size': 256, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.
'n_test_rollouts': 10, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts
'test_with_polyak': False, # run test episodes with the target network
# exploration
'random_eps': 0.3, # percentage of time a random action is taken
'noise_eps': 0.2, # std of gaussian noise added to not-completely-random actions as a percentage of max_u
# HER
'replay_strategy': 'future', # supported modes: future, none
'replay_k': 4, # number of additional goals used for replay, only used if off_policy_data=future
# normalization
'norm_eps': 0.01, # epsilon used for observation normalization
'norm_clip': 5, # normalized observations are cropped to this values
'bc_loss': 0, # whether or not to use the behavior cloning loss as an auxilliary loss
'q_filter': 0, # whether or not a Q value filter should be used on the Actor outputs
'num_demo': 100, # number of expert demo episodes
'demo_batch_size': 128, #number of samples to be used from the demonstrations buffer, per mpi thread 128/1024 or 32/256
'prm_loss_weight': 0.001, #Weight corresponding to the primary loss
'aux_loss_weight': 0.0078, #Weight corresponding to the auxilliary loss also called the cloning loss
}
CACHED_ENVS = {}
def cached_make_env(make_env):
"""
Only creates a new environment from the provided function if one has not yet already been
created. This is useful here because we need to infer certain properties of the env, e.g.
its observation and action spaces, without any intend of actually using it.
"""
if make_env not in CACHED_ENVS:
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env]
def prepare_params(kwargs):
# DDPG params
ddpg_params = dict()
env_name = kwargs['env_name']
def make_env(subrank=None):
env = gym.make(env_name)
if subrank is not None and logger.get_dir() is not None:
try:
from mpi4py import MPI
mpi_rank = MPI.COMM_WORLD.Get_rank()
except ImportError:
MPI = None
mpi_rank = 0
logger.warn('Running with a single MPI process. This should work, but the results may differ from the ones publshed in Plappert et al.')
max_episode_steps = env._max_episode_steps
env = Monitor(env,
os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(subrank)),
allow_early_resets=True)
# hack to re-expose _max_episode_steps (ideally should replace reliance on it downstream)
env = gym.wrappers.TimeLimit(env, max_episode_steps=max_episode_steps)
return env
kwargs['make_env'] = make_env
tmp_env = cached_make_env(kwargs['make_env'])
assert hasattr(tmp_env, '_max_episode_steps')
kwargs['T'] = tmp_env._max_episode_steps
kwargs['max_u'] = np.array(kwargs['max_u']) if isinstance(kwargs['max_u'], list) else kwargs['max_u']
kwargs['gamma'] = 1. - 1. / kwargs['T']
if 'lr' in kwargs:
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['buffer_size', 'hidden', 'layers',
'network_class',
'polyak',
'batch_size', 'Q_lr', 'pi_lr',
'norm_eps', 'norm_clip', 'max_u',
'action_l2', 'clip_obs', 'scope', 'relative_goals']:
ddpg_params[name] = kwargs[name]
kwargs['_' + name] = kwargs[name]
del kwargs[name]
kwargs['ddpg_params'] = ddpg_params
return kwargs
def log_params(params, logger=logger):
for key in sorted(params.keys()):
logger.info('{}: {}'.format(key, params[key]))
def configure_her(params):
env = cached_make_env(params['make_env'])
env.reset()
def reward_fun(ag_2, g, info): # vectorized
return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)
# Prepare configuration for HER.
her_params = {
'reward_fun': reward_fun,
}
for name in ['replay_strategy', 'replay_k']:
her_params[name] = params[name]
params['_' + name] = her_params[name]
del params[name]
sample_her_transitions = make_sample_her_transitions(**her_params)
return sample_her_transitions
def simple_goal_subtract(a, b):
assert a.shape == b.shape
return a - b
def configure_ddpg(dims, params, reuse=False, use_mpi=True, clip_return=True):
sample_her_transitions = configure_her(params)
# Extract relevant parameters.
gamma = params['gamma']
rollout_batch_size = params['rollout_batch_size']
ddpg_params = params['ddpg_params']
input_dims = dims.copy()
# DDPG agent
env = cached_make_env(params['make_env'])
env.reset()
ddpg_params.update({'input_dims': input_dims, # agent takes an input observations
'T': params['T'],
'clip_pos_returns': True, # clip positive returns
'clip_return': (1. / (1. - gamma)) if clip_return else np.inf, # max abs of return
'rollout_batch_size': rollout_batch_size,
'subtract_goals': simple_goal_subtract,
'sample_transitions': sample_her_transitions,
'gamma': gamma,
'bc_loss': params['bc_loss'],
'q_filter': params['q_filter'],
'num_demo': params['num_demo'],
'demo_batch_size': params['demo_batch_size'],
'prm_loss_weight': params['prm_loss_weight'],
'aux_loss_weight': params['aux_loss_weight'],
})
ddpg_params['info'] = {
'env_name': params['env_name'],
}
policy = DDPG(reuse=reuse, **ddpg_params, use_mpi=use_mpi)
return policy
def configure_dims(params):
env = cached_make_env(params['make_env'])
env.reset()
obs, _, _, info = env.step(env.action_space.sample())
dims = {
'o': obs['observation'].shape[0],
'u': env.action_space.shape[0],
'g': obs['desired_goal'].shape[0],
}
for key, value in info.items():
value = np.array(value)
if value.ndim == 0:
value = value.reshape(1)
dims['info_{}'.format(key)] = value.shape[0]
return dims
| 7,705 | 37.148515 | 152 |
py
|
baselines
|
baselines-master/baselines/her/experiment/plot.py
|
# DEPRECATED, use baselines.common.plot_util instead
import os
import matplotlib.pyplot as plt
import numpy as np
import json
import seaborn as sns; sns.set()
import glob2
import argparse
def smooth_reward_curve(x, y):
halfwidth = int(np.ceil(len(x) / 60)) # Halfwidth of our smoothing convolution
k = halfwidth
xsmoo = x
ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1),
mode='same')
return xsmoo, ysmoo
def load_results(file):
if not os.path.exists(file):
return None
with open(file, 'r') as f:
lines = [line for line in f]
if len(lines) < 2:
return None
keys = [name.strip() for name in lines[0].split(',')]
data = np.genfromtxt(file, delimiter=',', skip_header=1, filling_values=0.)
if data.ndim == 1:
data = data.reshape(1, -1)
assert data.ndim == 2
assert data.shape[-1] == len(keys)
result = {}
for idx, key in enumerate(keys):
result[key] = data[:, idx]
return result
def pad(xs, value=np.nan):
maxlen = np.max([len(x) for x in xs])
padded_xs = []
for x in xs:
if x.shape[0] >= maxlen:
padded_xs.append(x)
padding = np.ones((maxlen - x.shape[0],) + x.shape[1:]) * value
x_padded = np.concatenate([x, padding], axis=0)
assert x_padded.shape[1:] == x.shape[1:]
assert x_padded.shape[0] == maxlen
padded_xs.append(x_padded)
return np.array(padded_xs)
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
parser.add_argument('--smooth', type=int, default=1)
args = parser.parse_args()
# Load all data.
data = {}
paths = [os.path.abspath(os.path.join(path, '..')) for path in glob2.glob(os.path.join(args.dir, '**', 'progress.csv'))]
for curr_path in paths:
if not os.path.isdir(curr_path):
continue
results = load_results(os.path.join(curr_path, 'progress.csv'))
if not results:
print('skipping {}'.format(curr_path))
continue
print('loading {} ({})'.format(curr_path, len(results['epoch'])))
with open(os.path.join(curr_path, 'params.json'), 'r') as f:
params = json.load(f)
success_rate = np.array(results['test/success_rate'])
epoch = np.array(results['epoch']) + 1
env_id = params['env_name']
replay_strategy = params['replay_strategy']
if replay_strategy == 'future':
config = 'her'
else:
config = 'ddpg'
if 'Dense' in env_id:
config += '-dense'
else:
config += '-sparse'
env_id = env_id.replace('Dense', '')
# Process and smooth data.
assert success_rate.shape == epoch.shape
x = epoch
y = success_rate
if args.smooth:
x, y = smooth_reward_curve(epoch, success_rate)
assert x.shape == y.shape
if env_id not in data:
data[env_id] = {}
if config not in data[env_id]:
data[env_id][config] = []
data[env_id][config].append((x, y))
# Plot data.
for env_id in sorted(data.keys()):
print('exporting {}'.format(env_id))
plt.clf()
for config in sorted(data[env_id].keys()):
xs, ys = zip(*data[env_id][config])
xs, ys = pad(xs), pad(ys)
assert xs.shape == ys.shape
plt.plot(xs[0], np.nanmedian(ys, axis=0), label=config)
plt.fill_between(xs[0], np.nanpercentile(ys, 25, axis=0), np.nanpercentile(ys, 75, axis=0), alpha=0.25)
plt.title(env_id)
plt.xlabel('Epoch')
plt.ylabel('Median Success Rate')
plt.legend()
plt.savefig(os.path.join(args.dir, 'fig_{}.png'.format(env_id)))
| 3,611 | 28.85124 | 120 |
py
|
baselines
|
baselines-master/baselines/her/experiment/__init__.py
| 0 | 0 | 0 |
py
|
|
baselines
|
baselines-master/baselines/her/experiment/data_generation/fetch_data_generation.py
|
import gym
import numpy as np
"""Data generation for the case of a single block pick and place in Fetch Env"""
actions = []
observations = []
infos = []
def main():
env = gym.make('FetchPickAndPlace-v1')
numItr = 100
initStateSpace = "random"
env.reset()
print("Reset!")
while len(actions) < numItr:
obs = env.reset()
print("ITERATION NUMBER ", len(actions))
goToGoal(env, obs)
fileName = "data_fetch"
fileName += "_" + initStateSpace
fileName += "_" + str(numItr)
fileName += ".npz"
np.savez_compressed(fileName, acs=actions, obs=observations, info=infos) # save the file
def goToGoal(env, lastObs):
goal = lastObs['desired_goal']
objectPos = lastObs['observation'][3:6]
object_rel_pos = lastObs['observation'][6:9]
episodeAcs = []
episodeObs = []
episodeInfo = []
object_oriented_goal = object_rel_pos.copy()
object_oriented_goal[2] += 0.03 # first make the gripper go slightly above the object
timeStep = 0 #count the total number of timesteps
episodeObs.append(lastObs)
while np.linalg.norm(object_oriented_goal) >= 0.005 and timeStep <= env._max_episode_steps:
env.render()
action = [0, 0, 0, 0]
object_oriented_goal = object_rel_pos.copy()
object_oriented_goal[2] += 0.03
for i in range(len(object_oriented_goal)):
action[i] = object_oriented_goal[i]*6
action[len(action)-1] = 0.05 #open
obsDataNew, reward, done, info = env.step(action)
timeStep += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
objectPos = obsDataNew['observation'][3:6]
object_rel_pos = obsDataNew['observation'][6:9]
while np.linalg.norm(object_rel_pos) >= 0.005 and timeStep <= env._max_episode_steps :
env.render()
action = [0, 0, 0, 0]
for i in range(len(object_rel_pos)):
action[i] = object_rel_pos[i]*6
action[len(action)-1] = -0.005
obsDataNew, reward, done, info = env.step(action)
timeStep += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
objectPos = obsDataNew['observation'][3:6]
object_rel_pos = obsDataNew['observation'][6:9]
while np.linalg.norm(goal - objectPos) >= 0.01 and timeStep <= env._max_episode_steps :
env.render()
action = [0, 0, 0, 0]
for i in range(len(goal - objectPos)):
action[i] = (goal - objectPos)[i]*6
action[len(action)-1] = -0.005
obsDataNew, reward, done, info = env.step(action)
timeStep += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
objectPos = obsDataNew['observation'][3:6]
object_rel_pos = obsDataNew['observation'][6:9]
while True: #limit the number of timesteps in the episode to a fixed duration
env.render()
action = [0, 0, 0, 0]
action[len(action)-1] = -0.005 # keep the gripper closed
obsDataNew, reward, done, info = env.step(action)
timeStep += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
objectPos = obsDataNew['observation'][3:6]
object_rel_pos = obsDataNew['observation'][6:9]
if timeStep >= env._max_episode_steps: break
actions.append(episodeAcs)
observations.append(episodeObs)
infos.append(episodeInfo)
if __name__ == "__main__":
main()
| 3,603 | 27.377953 | 95 |
py
|
baselines
|
baselines-master/baselines/ppo1/run_robotics.py
|
#!/usr/bin/env python3
from mpi4py import MPI
from baselines.common import set_global_seeds
from baselines import logger
from baselines.common.cmd_util import make_robotics_env, robotics_arg_parser
import mujoco_py
def train(env_id, num_timesteps, seed):
from baselines.ppo1 import mlp_policy, pposgd_simple
import baselines.common.tf_util as U
rank = MPI.COMM_WORLD.Get_rank()
sess = U.single_threaded_session()
sess.__enter__()
mujoco_py.ignore_mujoco_warnings().__enter__()
workerseed = seed + 10000 * rank
set_global_seeds(workerseed)
env = make_robotics_env(env_id, workerseed, rank=rank)
def policy_fn(name, ob_space, ac_space):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=256, num_hid_layers=3)
pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_actorbatch=2048,
clip_param=0.2, entcoeff=0.0,
optim_epochs=5, optim_stepsize=3e-4, optim_batchsize=256,
gamma=0.99, lam=0.95, schedule='linear',
)
env.close()
def main():
args = robotics_arg_parser().parse_args()
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed)
if __name__ == '__main__':
main()
| 1,293 | 30.560976 | 84 |
py
|
baselines
|
baselines-master/baselines/ppo1/run_atari.py
|
#!/usr/bin/env python3
from mpi4py import MPI
from baselines.common import set_global_seeds
from baselines import bench
import os.path as osp
from baselines import logger
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.common.cmd_util import atari_arg_parser
def train(env_id, num_timesteps, seed):
from baselines.ppo1 import pposgd_simple, cnn_policy
import baselines.common.tf_util as U
rank = MPI.COMM_WORLD.Get_rank()
sess = U.single_threaded_session()
sess.__enter__()
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank() if seed is not None else None
set_global_seeds(workerseed)
env = make_atari(env_id)
def policy_fn(name, ob_space, ac_space): #pylint: disable=W0613
return cnn_policy.CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space)
env = bench.Monitor(env, logger.get_dir() and
osp.join(logger.get_dir(), str(rank)))
env.seed(workerseed)
env = wrap_deepmind(env)
env.seed(workerseed)
pposgd_simple.learn(env, policy_fn,
max_timesteps=int(num_timesteps * 1.1),
timesteps_per_actorbatch=256,
clip_param=0.2, entcoeff=0.01,
optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64,
gamma=0.99, lam=0.95,
schedule='linear'
)
env.close()
def main():
args = atari_arg_parser().parse_args()
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed)
if __name__ == '__main__':
main()
| 1,583 | 31.326531 | 87 |
py
|
baselines
|
baselines-master/baselines/ppo1/run_humanoid.py
|
#!/usr/bin/env python3
import os
from baselines.common.cmd_util import make_mujoco_env, mujoco_arg_parser
from baselines.common import tf_util as U
from baselines import logger
import gym
def train(num_timesteps, seed, model_path=None):
env_id = 'Humanoid-v2'
from baselines.ppo1 import mlp_policy, pposgd_simple
U.make_session(num_cpu=1).__enter__()
def policy_fn(name, ob_space, ac_space):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)
env = make_mujoco_env(env_id, seed)
# parameters below were the best found in a simple random search
# these are good enough to make humanoid walk, but whether those are
# an absolute best or not is not certain
env = RewScale(env, 0.1)
logger.log("NOTE: reward will be scaled by a factor of 10 in logged stats. Check the monitor for unscaled reward.")
pi = pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_actorbatch=2048,
clip_param=0.1, entcoeff=0.0,
optim_epochs=10,
optim_stepsize=1e-4,
optim_batchsize=64,
gamma=0.99,
lam=0.95,
schedule='constant',
)
env.close()
if model_path:
U.save_state(model_path)
return pi
class RewScale(gym.RewardWrapper):
def __init__(self, env, scale):
gym.RewardWrapper.__init__(self, env)
self.scale = scale
def reward(self, r):
return r * self.scale
def main():
logger.configure()
parser = mujoco_arg_parser()
parser.add_argument('--model-path', default=os.path.join(logger.get_dir(), 'humanoid_policy'))
parser.set_defaults(num_timesteps=int(5e7))
args = parser.parse_args()
if not args.play:
# train the model
train(num_timesteps=args.num_timesteps, seed=args.seed, model_path=args.model_path)
else:
# construct the model object, load pre-trained model and render
pi = train(num_timesteps=1, seed=args.seed)
U.load_state(args.model_path)
env = make_mujoco_env('Humanoid-v2', seed=0)
ob = env.reset()
while True:
action = pi.act(stochastic=False, ob=ob)[0]
ob, _, done, _ = env.step(action)
env.render()
if done:
ob = env.reset()
if __name__ == '__main__':
main()
| 2,434 | 31.905405 | 120 |
py
|
baselines
|
baselines-master/baselines/ppo1/cnn_policy.py
|
import baselines.common.tf_util as U
import tensorflow as tf
import gym
from baselines.common.distributions import make_pdtype
class CnnPolicy(object):
recurrent = False
def __init__(self, name, ob_space, ac_space, kind='large'):
with tf.variable_scope(name):
self._init(ob_space, ac_space, kind)
self.scope = tf.get_variable_scope().name
def _init(self, ob_space, ac_space, kind):
assert isinstance(ob_space, gym.spaces.Box)
self.pdtype = pdtype = make_pdtype(ac_space)
sequence_length = None
ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape))
x = ob / 255.0
if kind == 'small': # from A3C paper
x = tf.nn.relu(U.conv2d(x, 16, "l1", [8, 8], [4, 4], pad="VALID"))
x = tf.nn.relu(U.conv2d(x, 32, "l2", [4, 4], [2, 2], pad="VALID"))
x = U.flattenallbut0(x)
x = tf.nn.relu(tf.layers.dense(x, 256, name='lin', kernel_initializer=U.normc_initializer(1.0)))
elif kind == 'large': # Nature DQN
x = tf.nn.relu(U.conv2d(x, 32, "l1", [8, 8], [4, 4], pad="VALID"))
x = tf.nn.relu(U.conv2d(x, 64, "l2", [4, 4], [2, 2], pad="VALID"))
x = tf.nn.relu(U.conv2d(x, 64, "l3", [3, 3], [1, 1], pad="VALID"))
x = U.flattenallbut0(x)
x = tf.nn.relu(tf.layers.dense(x, 512, name='lin', kernel_initializer=U.normc_initializer(1.0)))
else:
raise NotImplementedError
logits = tf.layers.dense(x, pdtype.param_shape()[0], name='logits', kernel_initializer=U.normc_initializer(0.01))
self.pd = pdtype.pdfromflat(logits)
self.vpred = tf.layers.dense(x, 1, name='value', kernel_initializer=U.normc_initializer(1.0))[:,0]
self.state_in = []
self.state_out = []
stochastic = tf.placeholder(dtype=tf.bool, shape=())
ac = self.pd.sample() # XXX
self._act = U.function([stochastic, ob], [ac, self.vpred])
def act(self, stochastic, ob):
ac1, vpred1 = self._act(stochastic, ob[None])
return ac1[0], vpred1[0]
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def get_initial_state(self):
return []
| 2,417 | 41.421053 | 121 |
py
|
baselines
|
baselines-master/baselines/ppo1/run_mujoco.py
|
#!/usr/bin/env python3
from baselines.common.cmd_util import make_mujoco_env, mujoco_arg_parser
from baselines.common import tf_util as U
from baselines import logger
def train(env_id, num_timesteps, seed):
from baselines.ppo1 import mlp_policy, pposgd_simple
U.make_session(num_cpu=1).__enter__()
def policy_fn(name, ob_space, ac_space):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)
env = make_mujoco_env(env_id, seed)
pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_actorbatch=2048,
clip_param=0.2, entcoeff=0.0,
optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64,
gamma=0.99, lam=0.95, schedule='linear',
)
env.close()
def main():
args = mujoco_arg_parser().parse_args()
logger.configure()
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed)
if __name__ == '__main__':
main()
| 1,025 | 33.2 | 84 |
py
|
baselines
|
baselines-master/baselines/ppo1/mlp_policy.py
|
from baselines.common.mpi_running_mean_std import RunningMeanStd
import baselines.common.tf_util as U
import tensorflow as tf
import gym
from baselines.common.distributions import make_pdtype
class MlpPolicy(object):
recurrent = False
def __init__(self, name, *args, **kwargs):
with tf.variable_scope(name):
self._init(*args, **kwargs)
self.scope = tf.get_variable_scope().name
def _init(self, ob_space, ac_space, hid_size, num_hid_layers, gaussian_fixed_var=True):
assert isinstance(ob_space, gym.spaces.Box)
self.pdtype = pdtype = make_pdtype(ac_space)
sequence_length = None
ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape))
with tf.variable_scope("obfilter"):
self.ob_rms = RunningMeanStd(shape=ob_space.shape)
with tf.variable_scope('vf'):
obz = tf.clip_by_value((ob - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)
last_out = obz
for i in range(num_hid_layers):
last_out = tf.nn.tanh(tf.layers.dense(last_out, hid_size, name="fc%i"%(i+1), kernel_initializer=U.normc_initializer(1.0)))
self.vpred = tf.layers.dense(last_out, 1, name='final', kernel_initializer=U.normc_initializer(1.0))[:,0]
with tf.variable_scope('pol'):
last_out = obz
for i in range(num_hid_layers):
last_out = tf.nn.tanh(tf.layers.dense(last_out, hid_size, name='fc%i'%(i+1), kernel_initializer=U.normc_initializer(1.0)))
if gaussian_fixed_var and isinstance(ac_space, gym.spaces.Box):
mean = tf.layers.dense(last_out, pdtype.param_shape()[0]//2, name='final', kernel_initializer=U.normc_initializer(0.01))
logstd = tf.get_variable(name="logstd", shape=[1, pdtype.param_shape()[0]//2], initializer=tf.zeros_initializer())
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
else:
pdparam = tf.layers.dense(last_out, pdtype.param_shape()[0], name='final', kernel_initializer=U.normc_initializer(0.01))
self.pd = pdtype.pdfromflat(pdparam)
self.state_in = []
self.state_out = []
stochastic = tf.placeholder(dtype=tf.bool, shape=())
ac = U.switch(stochastic, self.pd.sample(), self.pd.mode())
self._act = U.function([stochastic, ob], [ac, self.vpred])
def act(self, stochastic, ob):
ac1, vpred1 = self._act(stochastic, ob[None])
return ac1[0], vpred1[0]
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def get_initial_state(self):
return []
| 2,842 | 44.854839 | 138 |
py
|
baselines
|
baselines-master/baselines/ppo1/pposgd_simple.py
|
from baselines.common import Dataset, explained_variance, fmt_row, zipsame
from baselines import logger
import baselines.common.tf_util as U
import tensorflow as tf, numpy as np
import time
from baselines.common.mpi_adam import MpiAdam
from baselines.common.mpi_moments import mpi_moments
from mpi4py import MPI
from collections import deque
def traj_segment_generator(pi, env, horizon, stochastic):
t = 0
ac = env.action_space.sample() # not used, just so we have the datatype
new = True # marks if we're on first timestep of an episode
ob = env.reset()
cur_ep_ret = 0 # return in current episode
cur_ep_len = 0 # len of current episode
ep_rets = [] # returns of completed episodes in this segment
ep_lens = [] # lengths of ...
# Initialize history arrays
obs = np.array([ob for _ in range(horizon)])
rews = np.zeros(horizon, 'float32')
vpreds = np.zeros(horizon, 'float32')
news = np.zeros(horizon, 'int32')
acs = np.array([ac for _ in range(horizon)])
prevacs = acs.copy()
while True:
prevac = ac
ac, vpred = pi.act(stochastic, ob)
# Slight weirdness here because we need value function at time T
# before returning segment [0, T-1] so we get the correct
# terminal value
if t > 0 and t % horizon == 0:
yield {"ob" : obs, "rew" : rews, "vpred" : vpreds, "new" : news,
"ac" : acs, "prevac" : prevacs, "nextvpred": vpred * (1 - new),
"ep_rets" : ep_rets, "ep_lens" : ep_lens}
# Be careful!!! if you change the downstream algorithm to aggregate
# several of these batches, then be sure to do a deepcopy
ep_rets = []
ep_lens = []
i = t % horizon
obs[i] = ob
vpreds[i] = vpred
news[i] = new
acs[i] = ac
prevacs[i] = prevac
ob, rew, new, _ = env.step(ac)
rews[i] = rew
cur_ep_ret += rew
cur_ep_len += 1
if new:
ep_rets.append(cur_ep_ret)
ep_lens.append(cur_ep_len)
cur_ep_ret = 0
cur_ep_len = 0
ob = env.reset()
t += 1
def add_vtarg_and_adv(seg, gamma, lam):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
"""
new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = np.append(seg["vpred"], seg["nextvpred"])
T = len(seg["rew"])
seg["adv"] = gaelam = np.empty(T, 'float32')
rew = seg["rew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-new[t+1]
delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"]
def learn(env, policy_fn, *,
timesteps_per_actorbatch, # timesteps per actor per update
clip_param, entcoeff, # clipping parameter epsilon, entropy coeff
optim_epochs, optim_stepsize, optim_batchsize,# optimization hypers
gamma, lam, # advantage estimation
max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint
callback=None, # you can do anything in the callback, since it takes locals(), globals()
adam_epsilon=1e-5,
schedule='constant' # annealing for stepsize parameters (epsilon and adam)
):
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_fn("pi", ob_space, ac_space) # Construct network for new policy
oldpi = policy_fn("oldpi", ob_space, ac_space) # Network for old policy
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule
ob = U.get_placeholder_cached(name="ob")
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
pol_entpen = (-entcoeff) * meanent
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold
surr1 = ratio * atarg # surrogate from conservative policy iteration
surr2 = tf.clip_by_value(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg #
pol_surr = - tf.reduce_mean(tf.minimum(surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP)
vf_loss = tf.reduce_mean(tf.square(pi.vpred - ret))
total_loss = pol_surr + pol_entpen + vf_loss
losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]
loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"]
var_list = pi.get_trainable_variables()
lossandgrad = U.function([ob, ac, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, var_list)])
adam = MpiAdam(var_list, epsilon=adam_epsilon)
assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)
for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])
compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses)
U.initialize()
adam.sync()
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, timesteps_per_actorbatch, stochastic=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards
assert sum([max_iters>0, max_timesteps>0, max_episodes>0, max_seconds>0])==1, "Only one time constraint permitted"
while True:
if callback: callback(locals(), globals())
if max_timesteps and timesteps_so_far >= max_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
elif max_seconds and time.time() - tstart >= max_seconds:
break
if schedule == 'constant':
cur_lrmult = 1.0
elif schedule == 'linear':
cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0)
else:
raise NotImplementedError
logger.log("********** Iteration %i ************"%iters_so_far)
seg = seg_gen.__next__()
add_vtarg_and_adv(seg, gamma, lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), deterministic=pi.recurrent)
optim_batchsize = optim_batchsize or ob.shape[0]
if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy
assign_old_eq_new() # set old parameter values to new parameter values
logger.log("Optimizing...")
logger.log(fmt_row(13, loss_names))
# Here we do a bunch of optimization epochs over the data
for _ in range(optim_epochs):
losses = [] # list of tuples, each of which gives the loss for a minibatch
for batch in d.iterate_once(optim_batchsize):
*newlosses, g = lossandgrad(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult)
adam.update(g, optim_stepsize * cur_lrmult)
losses.append(newlosses)
logger.log(fmt_row(13, np.mean(losses, axis=0)))
logger.log("Evaluating losses...")
losses = []
for batch in d.iterate_once(optim_batchsize):
newlosses = compute_losses(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult)
losses.append(newlosses)
meanlosses,_,_ = mpi_moments(losses, axis=0)
logger.log(fmt_row(13, meanlosses))
for (lossval, name) in zipsame(meanlosses, loss_names):
logger.record_tabular("loss_"+name, lossval)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
lens, rews = map(flatten_lists, zip(*listoflrpairs))
lenbuffer.extend(lens)
rewbuffer.extend(rews)
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
if MPI.COMM_WORLD.Get_rank()==0:
logger.dump_tabular()
return pi
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_]
| 9,432 | 42.270642 | 120 |
py
|
baselines
|
baselines-master/baselines/ppo1/__init__.py
| 0 | 0 | 0 |
py
|
|
baselines
|
baselines-master/baselines/acer/acer.py
|
import time
import functools
import numpy as np
import tensorflow as tf
from baselines import logger
from baselines.common import set_global_seeds
from baselines.common.policies import build_policy
from baselines.common.tf_util import get_session, save_variables, load_variables
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from baselines.a2c.utils import batch_to_seq, seq_to_batch
from baselines.a2c.utils import cat_entropy_softmax
from baselines.a2c.utils import Scheduler, find_trainable_variables
from baselines.a2c.utils import EpisodeStats
from baselines.a2c.utils import get_by_index, check_shape, avg_norm, gradient_add, q_explained_variance
from baselines.acer.buffer import Buffer
from baselines.acer.runner import Runner
# remove last step
def strip(var, nenvs, nsteps, flat = False):
vars = batch_to_seq(var, nenvs, nsteps + 1, flat)
return seq_to_batch(vars[:-1], flat)
def q_retrace(R, D, q_i, v, rho_i, nenvs, nsteps, gamma):
"""
Calculates q_retrace targets
:param R: Rewards
:param D: Dones
:param q_i: Q values for actions taken
:param v: V values
:param rho_i: Importance weight for each action
:return: Q_retrace values
"""
rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), nenvs, nsteps, True) # list of len steps, shape [nenvs]
rs = batch_to_seq(R, nenvs, nsteps, True) # list of len steps, shape [nenvs]
ds = batch_to_seq(D, nenvs, nsteps, True) # list of len steps, shape [nenvs]
q_is = batch_to_seq(q_i, nenvs, nsteps, True)
vs = batch_to_seq(v, nenvs, nsteps + 1, True)
v_final = vs[-1]
qret = v_final
qrets = []
for i in range(nsteps - 1, -1, -1):
check_shape([qret, ds[i], rs[i], rho_bar[i], q_is[i], vs[i]], [[nenvs]] * 6)
qret = rs[i] + gamma * qret * (1.0 - ds[i])
qrets.append(qret)
qret = (rho_bar[i] * (qret - q_is[i])) + vs[i]
qrets = qrets[::-1]
qret = seq_to_batch(qrets, flat=True)
return qret
# For ACER with PPO clipping instead of trust region
# def clip(ratio, eps_clip):
# # assume 0 <= eps_clip <= 1
# return tf.minimum(1 + eps_clip, tf.maximum(1 - eps_clip, ratio))
class Model(object):
def __init__(self, policy, ob_space, ac_space, nenvs, nsteps, ent_coef, q_coef, gamma, max_grad_norm, lr,
rprop_alpha, rprop_epsilon, total_timesteps, lrschedule,
c, trust_region, alpha, delta):
sess = get_session()
nact = ac_space.n
nbatch = nenvs * nsteps
A = tf.placeholder(tf.int32, [nbatch]) # actions
D = tf.placeholder(tf.float32, [nbatch]) # dones
R = tf.placeholder(tf.float32, [nbatch]) # rewards, not returns
MU = tf.placeholder(tf.float32, [nbatch, nact]) # mu's
LR = tf.placeholder(tf.float32, [])
eps = 1e-6
step_ob_placeholder = tf.placeholder(dtype=ob_space.dtype, shape=(nenvs,) + ob_space.shape)
train_ob_placeholder = tf.placeholder(dtype=ob_space.dtype, shape=(nenvs*(nsteps+1),) + ob_space.shape)
with tf.variable_scope('acer_model', reuse=tf.AUTO_REUSE):
step_model = policy(nbatch=nenvs, nsteps=1, observ_placeholder=step_ob_placeholder, sess=sess)
train_model = policy(nbatch=nbatch, nsteps=nsteps, observ_placeholder=train_ob_placeholder, sess=sess)
params = find_trainable_variables("acer_model")
print("Params {}".format(len(params)))
for var in params:
print(var)
# create polyak averaged model
ema = tf.train.ExponentialMovingAverage(alpha)
ema_apply_op = ema.apply(params)
def custom_getter(getter, *args, **kwargs):
v = ema.average(getter(*args, **kwargs))
print(v.name)
return v
with tf.variable_scope("acer_model", custom_getter=custom_getter, reuse=True):
polyak_model = policy(nbatch=nbatch, nsteps=nsteps, observ_placeholder=train_ob_placeholder, sess=sess)
# Notation: (var) = batch variable, (var)s = seqeuence variable, (var)_i = variable index by action at step i
# action probability distributions according to train_model, polyak_model and step_model
# poilcy.pi is probability distribution parameters; to obtain distribution that sums to 1 need to take softmax
train_model_p = tf.nn.softmax(train_model.pi)
polyak_model_p = tf.nn.softmax(polyak_model.pi)
step_model_p = tf.nn.softmax(step_model.pi)
v = tf.reduce_sum(train_model_p * train_model.q, axis = -1) # shape is [nenvs * (nsteps + 1)]
# strip off last step
f, f_pol, q = map(lambda var: strip(var, nenvs, nsteps), [train_model_p, polyak_model_p, train_model.q])
# Get pi and q values for actions taken
f_i = get_by_index(f, A)
q_i = get_by_index(q, A)
# Compute ratios for importance truncation
rho = f / (MU + eps)
rho_i = get_by_index(rho, A)
# Calculate Q_retrace targets
qret = q_retrace(R, D, q_i, v, rho_i, nenvs, nsteps, gamma)
# Calculate losses
# Entropy
# entropy = tf.reduce_mean(strip(train_model.pd.entropy(), nenvs, nsteps))
entropy = tf.reduce_mean(cat_entropy_softmax(f))
# Policy Graident loss, with truncated importance sampling & bias correction
v = strip(v, nenvs, nsteps, True)
check_shape([qret, v, rho_i, f_i], [[nenvs * nsteps]] * 4)
check_shape([rho, f, q], [[nenvs * nsteps, nact]] * 2)
# Truncated importance sampling
adv = qret - v
logf = tf.log(f_i + eps)
gain_f = logf * tf.stop_gradient(adv * tf.minimum(c, rho_i)) # [nenvs * nsteps]
loss_f = -tf.reduce_mean(gain_f)
# Bias correction for the truncation
adv_bc = (q - tf.reshape(v, [nenvs * nsteps, 1])) # [nenvs * nsteps, nact]
logf_bc = tf.log(f + eps) # / (f_old + eps)
check_shape([adv_bc, logf_bc], [[nenvs * nsteps, nact]]*2)
gain_bc = tf.reduce_sum(logf_bc * tf.stop_gradient(adv_bc * tf.nn.relu(1.0 - (c / (rho + eps))) * f), axis = 1) #IMP: This is sum, as expectation wrt f
loss_bc= -tf.reduce_mean(gain_bc)
loss_policy = loss_f + loss_bc
# Value/Q function loss, and explained variance
check_shape([qret, q_i], [[nenvs * nsteps]]*2)
ev = q_explained_variance(tf.reshape(q_i, [nenvs, nsteps]), tf.reshape(qret, [nenvs, nsteps]))
loss_q = tf.reduce_mean(tf.square(tf.stop_gradient(qret) - q_i)*0.5)
# Net loss
check_shape([loss_policy, loss_q, entropy], [[]] * 3)
loss = loss_policy + q_coef * loss_q - ent_coef * entropy
if trust_region:
g = tf.gradients(- (loss_policy - ent_coef * entropy) * nsteps * nenvs, f) #[nenvs * nsteps, nact]
# k = tf.gradients(KL(f_pol || f), f)
k = - f_pol / (f + eps) #[nenvs * nsteps, nact] # Directly computed gradient of KL divergence wrt f
k_dot_g = tf.reduce_sum(k * g, axis=-1)
adj = tf.maximum(0.0, (tf.reduce_sum(k * g, axis=-1) - delta) / (tf.reduce_sum(tf.square(k), axis=-1) + eps)) #[nenvs * nsteps]
# Calculate stats (before doing adjustment) for logging.
avg_norm_k = avg_norm(k)
avg_norm_g = avg_norm(g)
avg_norm_k_dot_g = tf.reduce_mean(tf.abs(k_dot_g))
avg_norm_adj = tf.reduce_mean(tf.abs(adj))
g = g - tf.reshape(adj, [nenvs * nsteps, 1]) * k
grads_f = -g/(nenvs*nsteps) # These are turst region adjusted gradients wrt f ie statistics of policy pi
grads_policy = tf.gradients(f, params, grads_f)
grads_q = tf.gradients(loss_q * q_coef, params)
grads = [gradient_add(g1, g2, param) for (g1, g2, param) in zip(grads_policy, grads_q, params)]
avg_norm_grads_f = avg_norm(grads_f) * (nsteps * nenvs)
norm_grads_q = tf.global_norm(grads_q)
norm_grads_policy = tf.global_norm(grads_policy)
else:
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
grads, norm_grads = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=rprop_alpha, epsilon=rprop_epsilon)
_opt_op = trainer.apply_gradients(grads)
# so when you call _train, you first do the gradient step, then you apply ema
with tf.control_dependencies([_opt_op]):
_train = tf.group(ema_apply_op)
lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
# Ops/Summaries to run, and their names for logging
run_ops = [_train, loss, loss_q, entropy, loss_policy, loss_f, loss_bc, ev, norm_grads]
names_ops = ['loss', 'loss_q', 'entropy', 'loss_policy', 'loss_f', 'loss_bc', 'explained_variance',
'norm_grads']
if trust_region:
run_ops = run_ops + [norm_grads_q, norm_grads_policy, avg_norm_grads_f, avg_norm_k, avg_norm_g, avg_norm_k_dot_g,
avg_norm_adj]
names_ops = names_ops + ['norm_grads_q', 'norm_grads_policy', 'avg_norm_grads_f', 'avg_norm_k', 'avg_norm_g',
'avg_norm_k_dot_g', 'avg_norm_adj']
def train(obs, actions, rewards, dones, mus, states, masks, steps):
cur_lr = lr.value_steps(steps)
td_map = {train_model.X: obs, polyak_model.X: obs, A: actions, R: rewards, D: dones, MU: mus, LR: cur_lr}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
td_map[polyak_model.S] = states
td_map[polyak_model.M] = masks
return names_ops, sess.run(run_ops, td_map)[1:] # strip off _train
def _step(observation, **kwargs):
return step_model._evaluate([step_model.action, step_model_p, step_model.state], observation, **kwargs)
self.train = train
self.save = functools.partial(save_variables, sess=sess)
self.load = functools.partial(load_variables, sess=sess)
self.train_model = train_model
self.step_model = step_model
self._step = _step
self.step = self.step_model.step
self.initial_state = step_model.initial_state
tf.global_variables_initializer().run(session=sess)
class Acer():
def __init__(self, runner, model, buffer, log_interval):
self.runner = runner
self.model = model
self.buffer = buffer
self.log_interval = log_interval
self.tstart = None
self.episode_stats = EpisodeStats(runner.nsteps, runner.nenv)
self.steps = None
def call(self, on_policy):
runner, model, buffer, steps = self.runner, self.model, self.buffer, self.steps
if on_policy:
enc_obs, obs, actions, rewards, mus, dones, masks = runner.run()
self.episode_stats.feed(rewards, dones)
if buffer is not None:
buffer.put(enc_obs, actions, rewards, mus, dones, masks)
else:
# get obs, actions, rewards, mus, dones from buffer.
obs, actions, rewards, mus, dones, masks = buffer.get()
# reshape stuff correctly
obs = obs.reshape(runner.batch_ob_shape)
actions = actions.reshape([runner.nbatch])
rewards = rewards.reshape([runner.nbatch])
mus = mus.reshape([runner.nbatch, runner.nact])
dones = dones.reshape([runner.nbatch])
masks = masks.reshape([runner.batch_ob_shape[0]])
names_ops, values_ops = model.train(obs, actions, rewards, dones, mus, model.initial_state, masks, steps)
if on_policy and (int(steps/runner.nbatch) % self.log_interval == 0):
logger.record_tabular("total_timesteps", steps)
logger.record_tabular("fps", int(steps/(time.time() - self.tstart)))
# IMP: In EpisodicLife env, during training, we get done=True at each loss of life, not just at the terminal state.
# Thus, this is mean until end of life, not end of episode.
# For true episode rewards, see the monitor files in the log folder.
logger.record_tabular("mean_episode_length", self.episode_stats.mean_length())
logger.record_tabular("mean_episode_reward", self.episode_stats.mean_reward())
for name, val in zip(names_ops, values_ops):
logger.record_tabular(name, float(val))
logger.dump_tabular()
def learn(network, env, seed=None, nsteps=20, total_timesteps=int(80e6), q_coef=0.5, ent_coef=0.01,
max_grad_norm=10, lr=7e-4, lrschedule='linear', rprop_epsilon=1e-5, rprop_alpha=0.99, gamma=0.99,
log_interval=100, buffer_size=50000, replay_ratio=4, replay_start=10000, c=10.0,
trust_region=True, alpha=0.99, delta=1, load_path=None, **network_kwargs):
'''
Main entrypoint for ACER (Actor-Critic with Experience Replay) algorithm (https://arxiv.org/pdf/1611.01224.pdf)
Train an agent with given network architecture on a given environment using ACER.
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel) (default: 20)
nstack: int, size of the frame stack, i.e. number of the frames passed to the step model. Frames are stacked along channel dimension
(last image dimension) (default: 4)
total_timesteps: int, number of timesteps (i.e. number of actions taken in the environment) (default: 80M)
q_coef: float, value function loss coefficient in the optimization objective (analog of vf_coef for other actor-critic methods)
ent_coef: float, policy entropy coefficient in the optimization objective (default: 0.01)
max_grad_norm: float, gradient norm clipping coefficient. If set to None, no clipping. (default: 10),
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
rprop_epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
rprop_alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting factor (default: 0.99)
log_interval: int, number of updates between logging events (default: 100)
buffer_size: int, size of the replay buffer (default: 50k)
replay_ratio: int, now many (on average) batches of data to sample from the replay buffer take after batch from the environment (default: 4)
replay_start: int, the sampling from the replay buffer does not start until replay buffer has at least that many samples (default: 10k)
c: float, importance weight clipping factor (default: 10)
trust_region bool, whether or not algorithms estimates the gradient KL divergence between the old and updated policy and uses it to determine step size (default: True)
delta: float, max KL divergence between the old policy and updated policy (default: 1)
alpha: float, momentum factor in the Polyak (exponential moving average) averaging of the model parameters (default: 0.99)
load_path: str, path to load the model from (default: None)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
print("Running Acer Simple")
print(locals())
set_global_seeds(seed)
if not isinstance(env, VecFrameStack):
env = VecFrameStack(env, 1)
policy = build_policy(env, network, estimate_q=True, **network_kwargs)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nstack = env.nstack
model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps,
ent_coef=ent_coef, q_coef=q_coef, gamma=gamma,
max_grad_norm=max_grad_norm, lr=lr, rprop_alpha=rprop_alpha, rprop_epsilon=rprop_epsilon,
total_timesteps=total_timesteps, lrschedule=lrschedule, c=c,
trust_region=trust_region, alpha=alpha, delta=delta)
if load_path is not None:
model.load(load_path)
runner = Runner(env=env, model=model, nsteps=nsteps)
if replay_ratio > 0:
buffer = Buffer(env=env, nsteps=nsteps, size=buffer_size)
else:
buffer = None
nbatch = nenvs*nsteps
acer = Acer(runner, model, buffer, log_interval)
acer.tstart = time.time()
for acer.steps in range(0, total_timesteps, nbatch): #nbatch samples, 1 on_policy call and multiple off-policy calls
acer.call(on_policy=True)
if replay_ratio > 0 and buffer.has_atleast(replay_start):
n = np.random.poisson(replay_ratio)
for _ in range(n):
acer.call(on_policy=False) # no simulation steps in this
return model
| 18,596 | 47.683246 | 179 |
py
|
baselines
|
baselines-master/baselines/acer/buffer.py
|
import numpy as np
class Buffer(object):
# gets obs, actions, rewards, mu's, (states, masks), dones
def __init__(self, env, nsteps, size=50000):
self.nenv = env.num_envs
self.nsteps = nsteps
# self.nh, self.nw, self.nc = env.observation_space.shape
self.obs_shape = env.observation_space.shape
self.obs_dtype = env.observation_space.dtype
self.ac_dtype = env.action_space.dtype
self.nc = self.obs_shape[-1]
self.nstack = env.nstack
self.nc //= self.nstack
self.nbatch = self.nenv * self.nsteps
self.size = size // (self.nsteps) # Each loc contains nenv * nsteps frames, thus total buffer is nenv * size frames
# Memory
self.enc_obs = None
self.actions = None
self.rewards = None
self.mus = None
self.dones = None
self.masks = None
# Size indexes
self.next_idx = 0
self.num_in_buffer = 0
def has_atleast(self, frames):
# Frames per env, so total (nenv * frames) Frames needed
# Each buffer loc has nenv * nsteps frames
return self.num_in_buffer >= (frames // self.nsteps)
def can_sample(self):
return self.num_in_buffer > 0
# Generate stacked frames
def decode(self, enc_obs, dones):
# enc_obs has shape [nenvs, nsteps + nstack, nh, nw, nc]
# dones has shape [nenvs, nsteps]
# returns stacked obs of shape [nenv, (nsteps + 1), nh, nw, nstack*nc]
return _stack_obs(enc_obs, dones,
nsteps=self.nsteps)
def put(self, enc_obs, actions, rewards, mus, dones, masks):
# enc_obs [nenv, (nsteps + nstack), nh, nw, nc]
# actions, rewards, dones [nenv, nsteps]
# mus [nenv, nsteps, nact]
if self.enc_obs is None:
self.enc_obs = np.empty([self.size] + list(enc_obs.shape), dtype=self.obs_dtype)
self.actions = np.empty([self.size] + list(actions.shape), dtype=self.ac_dtype)
self.rewards = np.empty([self.size] + list(rewards.shape), dtype=np.float32)
self.mus = np.empty([self.size] + list(mus.shape), dtype=np.float32)
self.dones = np.empty([self.size] + list(dones.shape), dtype=np.bool)
self.masks = np.empty([self.size] + list(masks.shape), dtype=np.bool)
self.enc_obs[self.next_idx] = enc_obs
self.actions[self.next_idx] = actions
self.rewards[self.next_idx] = rewards
self.mus[self.next_idx] = mus
self.dones[self.next_idx] = dones
self.masks[self.next_idx] = masks
self.next_idx = (self.next_idx + 1) % self.size
self.num_in_buffer = min(self.size, self.num_in_buffer + 1)
def take(self, x, idx, envx):
nenv = self.nenv
out = np.empty([nenv] + list(x.shape[2:]), dtype=x.dtype)
for i in range(nenv):
out[i] = x[idx[i], envx[i]]
return out
def get(self):
# returns
# obs [nenv, (nsteps + 1), nh, nw, nstack*nc]
# actions, rewards, dones [nenv, nsteps]
# mus [nenv, nsteps, nact]
nenv = self.nenv
assert self.can_sample()
# Sample exactly one id per env. If you sample across envs, then higher correlation in samples from same env.
idx = np.random.randint(0, self.num_in_buffer, nenv)
envx = np.arange(nenv)
take = lambda x: self.take(x, idx, envx) # for i in range(nenv)], axis = 0)
dones = take(self.dones)
enc_obs = take(self.enc_obs)
obs = self.decode(enc_obs, dones)
actions = take(self.actions)
rewards = take(self.rewards)
mus = take(self.mus)
masks = take(self.masks)
return obs, actions, rewards, mus, dones, masks
def _stack_obs_ref(enc_obs, dones, nsteps):
nenv = enc_obs.shape[0]
nstack = enc_obs.shape[1] - nsteps
nh, nw, nc = enc_obs.shape[2:]
obs_dtype = enc_obs.dtype
obs_shape = (nh, nw, nc*nstack)
mask = np.empty([nsteps + nstack - 1, nenv, 1, 1, 1], dtype=np.float32)
obs = np.zeros([nstack, nsteps + nstack, nenv, nh, nw, nc], dtype=obs_dtype)
x = np.reshape(enc_obs, [nenv, nsteps + nstack, nh, nw, nc]).swapaxes(1, 0) # [nsteps + nstack, nenv, nh, nw, nc]
mask[nstack-1:] = np.reshape(1.0 - dones, [nenv, nsteps, 1, 1, 1]).swapaxes(1, 0) # keep
mask[:nstack-1] = 1.0
# y = np.reshape(1 - dones, [nenvs, nsteps, 1, 1, 1])
for i in range(nstack):
obs[-(i + 1), i:] = x
# obs[:,i:,:,:,-(i+1),:] = x
x = x[:-1] * mask
mask = mask[1:]
return np.reshape(obs[:, (nstack-1):].transpose((2, 1, 3, 4, 0, 5)), (nenv, (nsteps + 1)) + obs_shape)
def _stack_obs(enc_obs, dones, nsteps):
nenv = enc_obs.shape[0]
nstack = enc_obs.shape[1] - nsteps
nc = enc_obs.shape[-1]
obs_ = np.zeros((nenv, nsteps + 1) + enc_obs.shape[2:-1] + (enc_obs.shape[-1] * nstack, ), dtype=enc_obs.dtype)
mask = np.ones((nenv, nsteps+1), dtype=enc_obs.dtype)
mask[:, 1:] = 1.0 - dones
mask = mask.reshape(mask.shape + tuple(np.ones(len(enc_obs.shape)-2, dtype=np.uint8)))
for i in range(nstack-1, -1, -1):
obs_[..., i * nc : (i + 1) * nc] = enc_obs[:, i : i + nsteps + 1, :]
if i < nstack-1:
obs_[..., i * nc : (i + 1) * nc] *= mask
mask[:, 1:, ...] *= mask[:, :-1, ...]
return obs_
def test_stack_obs():
nstack = 7
nenv = 1
nsteps = 5
obs_shape = (2, 3, nstack)
enc_obs_shape = (nenv, nsteps + nstack) + obs_shape[:-1] + (1,)
enc_obs = np.random.random(enc_obs_shape)
dones = np.random.randint(low=0, high=2, size=(nenv, nsteps))
stacked_obs_ref = _stack_obs_ref(enc_obs, dones, nsteps=nsteps)
stacked_obs_test = _stack_obs(enc_obs, dones, nsteps=nsteps)
np.testing.assert_allclose(stacked_obs_ref, stacked_obs_test)
| 5,881 | 36.464968 | 124 |
py
|
baselines
|
baselines-master/baselines/acer/defaults.py
|
def atari():
return dict(
lrschedule='constant'
)
| 66 | 12.4 | 29 |
py
|
baselines
|
baselines-master/baselines/acer/runner.py
|
import numpy as np
from baselines.common.runners import AbstractEnvRunner
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from gym import spaces
class Runner(AbstractEnvRunner):
def __init__(self, env, model, nsteps):
super().__init__(env=env, model=model, nsteps=nsteps)
assert isinstance(env.action_space, spaces.Discrete), 'This ACER implementation works only with discrete action spaces!'
assert isinstance(env, VecFrameStack)
self.nact = env.action_space.n
nenv = self.nenv
self.nbatch = nenv * nsteps
self.batch_ob_shape = (nenv*(nsteps+1),) + env.observation_space.shape
self.obs = env.reset()
self.obs_dtype = env.observation_space.dtype
self.ac_dtype = env.action_space.dtype
self.nstack = self.env.nstack
self.nc = self.batch_ob_shape[-1] // self.nstack
def run(self):
# enc_obs = np.split(self.obs, self.nstack, axis=3) # so now list of obs steps
enc_obs = np.split(self.env.stackedobs, self.env.nstack, axis=-1)
mb_obs, mb_actions, mb_mus, mb_dones, mb_rewards = [], [], [], [], []
for _ in range(self.nsteps):
actions, mus, states = self.model._step(self.obs, S=self.states, M=self.dones)
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_mus.append(mus)
mb_dones.append(self.dones)
obs, rewards, dones, _ = self.env.step(actions)
# states information for statefull models like LSTM
self.states = states
self.dones = dones
self.obs = obs
mb_rewards.append(rewards)
enc_obs.append(obs[..., -self.nc:])
mb_obs.append(np.copy(self.obs))
mb_dones.append(self.dones)
enc_obs = np.asarray(enc_obs, dtype=self.obs_dtype).swapaxes(1, 0)
mb_obs = np.asarray(mb_obs, dtype=self.obs_dtype).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=self.ac_dtype).swapaxes(1, 0)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_mus = np.asarray(mb_mus, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones # Used for statefull models like LSTM's to mask state when done
mb_dones = mb_dones[:, 1:] # Used for calculating returns. The dones array is now aligned with rewards
# shapes are now [nenv, nsteps, []]
# When pulling from buffer, arrays will now be reshaped in place, preventing a deep copy.
return enc_obs, mb_obs, mb_actions, mb_rewards, mb_mus, mb_dones, mb_masks
| 2,689 | 42.387097 | 128 |
py
|
baselines
|
baselines-master/baselines/acer/policies.py
|
import numpy as np
import tensorflow as tf
from baselines.common.policies import nature_cnn
from baselines.a2c.utils import fc, batch_to_seq, seq_to_batch, lstm, sample
class AcerCnnPolicy(object):
def __init__(self, sess, ob_space, ac_space, nenv, nsteps, nstack, reuse=False):
nbatch = nenv * nsteps
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc * nstack)
nact = ac_space.n
X = tf.placeholder(tf.uint8, ob_shape) # obs
with tf.variable_scope("model", reuse=reuse):
h = nature_cnn(X)
pi_logits = fc(h, 'pi', nact, init_scale=0.01)
pi = tf.nn.softmax(pi_logits)
q = fc(h, 'q', nact)
a = sample(tf.nn.softmax(pi_logits)) # could change this to use self.pi instead
self.initial_state = [] # not stateful
self.X = X
self.pi = pi # actual policy params now
self.pi_logits = pi_logits
self.q = q
self.vf = q
def step(ob, *args, **kwargs):
# returns actions, mus, states
a0, pi0 = sess.run([a, pi], {X: ob})
return a0, pi0, [] # dummy state
def out(ob, *args, **kwargs):
pi0, q0 = sess.run([pi, q], {X: ob})
return pi0, q0
def act(ob, *args, **kwargs):
return sess.run(a, {X: ob})
self.step = step
self.out = out
self.act = act
class AcerLstmPolicy(object):
def __init__(self, sess, ob_space, ac_space, nenv, nsteps, nstack, reuse=False, nlstm=256):
nbatch = nenv * nsteps
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc * nstack)
nact = ac_space.n
X = tf.placeholder(tf.uint8, ob_shape) # obs
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states
with tf.variable_scope("model", reuse=reuse):
h = nature_cnn(X)
# lstm
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
h5, snew = lstm(xs, ms, S, 'lstm1', nh=nlstm)
h5 = seq_to_batch(h5)
pi_logits = fc(h5, 'pi', nact, init_scale=0.01)
pi = tf.nn.softmax(pi_logits)
q = fc(h5, 'q', nact)
a = sample(pi_logits) # could change this to use self.pi instead
self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32)
self.X = X
self.M = M
self.S = S
self.pi = pi # actual policy params now
self.q = q
def step(ob, state, mask, *args, **kwargs):
# returns actions, mus, states
a0, pi0, s = sess.run([a, pi, snew], {X: ob, S: state, M: mask})
return a0, pi0, s
self.step = step
| 2,807 | 33.243902 | 95 |
py
|
baselines
|
baselines-master/baselines/acer/__init__.py
| 0 | 0 | 0 |
py
|
|
mesa-contrib
|
mesa-contrib-main/hooks/cmd_line_args.py
|
#!/usr/bin/env python3
#
# Generates the command-line argument hook for MESA, which is saved to
# `$MESA_CONTRIB_DIR/hooks/cmd_line_args.inc`.
#
# You can add or remove the parameters you'd like to control from the
# list `args`, below.
#
# To use the command line arguments in MESA, add the variable
# declarations
#
# integer :: i_arg
# character(len=32) :: arg
#
# to the preamble of the `extras_controls` function and
#
# include `cmd_line_args.inc`
#
# after the call to `star_ptr` in your `run_star_extras.f90`. In your
# work folder, run `./clean` and `./mk` and then run MESA with, e.g.
#
# $ ./star inlist --initial-mass 2.0
#
# Added by @warrickball following the simple example by @jschwab on
# the mailing list.
#
# https://lists.mesastar.org/pipermail/mesa-users/2020-January/011068.html
import os
# Change this list to include the controls you'd like to vary from the
# command line.
args = ['initial_mass', 'initial_z', 'initial_y', 'mixing_length_alpha']
with open(os.environ['MESA_CONTRIB_DIR'] + '/hooks/cmd_line_args.inc', 'wt') as f:
f.write('\n'.join([
" ! Add these two variables to extras_controls preamble",
" ! integer :: i_arg",
" ! character(len=32) :: arg",
" ! then include this after the star pointer is set.",
"",
" i_arg = 2 ! first argument is filename of inlist",
" call GET_COMMAND_ARGUMENT(i_arg, arg)",
" do while (arg /= ' ')",
" select case (arg)",
""]))
for arg in args:
f.write('\n'.join([
" case ('--%s')" % arg.replace('_', '-'),
" i_arg = i_arg + 1",
" call GET_COMMAND_ARGUMENT(i_arg, arg)",
" read(arg, *) s%% %s" % arg,
""]))
f.write('\n'.join([
" case default",
" stop 'invalid command-line argument: '//trim(arg)",
" end select",
"",
" i_arg = i_arg + 1",
" call GET_COMMAND_ARGUMENT(i_arg, arg)",
" end do",
""]))
| 2,202 | 32.892308 | 82 |
py
|
Noisy_Neighbours
|
Noisy_Neighbours-main/Global_Fit_Correction/Section_6_3/LISA_utils.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 30 08:55:10 2020
@author: aantonelli
LISA utils
"""
import numpy as np
"""
Define the LISA response function -- IMPORTANT: Doppler Shift missing here.
"""
def d_plus(alpha,theta,phi,lam):
sqrt3_64 = np.sqrt(3)/64 #
A = -36 * np.sin(theta)**2 * np.sin(2 * alpha - 2*lam)
B = (3 + np.cos(2*theta))*( np.cos(2*phi)*(9 * np.sin(2*lam) - np.sin(4*alpha - 2*lam)) + \
np.sin(2*phi)*(np.cos(4*alpha - 2*lam) - 9*np.cos(2*lam)))
C = -4*np.sqrt(3)*np.sin(2*theta)*(np.sin(3*alpha - 2*lam - phi) - 3*np.sin(alpha - 2*lam + phi))
return sqrt3_64 * (A + B + C)
def d_cross(alpha,theta,phi,lam):
A = np.sqrt(3)*np.cos(theta)*(9*np.cos(2*lam - 2*phi) - np.cos(4*alpha - 2*lam - 2*phi))
B = -6*np.sin(theta)*(np.cos(3*alpha - 2*lam - 2*phi) +3*np.cos(alpha - 2*lam + phi))
return (A + B)/16
def F_plus(theta,phi,psi,lam,alpha):
return 0.5*(np.cos(2*psi)*d_plus(alpha,theta,phi,lam) - np.sin(2*psi)*d_cross(alpha,theta,phi,lam))
def F_cross(theta,phi,psi,lam,alpha):
return 0.5*(np.sin(2*psi)*d_plus(alpha,theta,phi,lam) + np.cos(2*psi)*d_cross(alpha,theta,phi,lam))
"""
Define the LISA PSD
"""
def PowerSpectralDensity(f):
"""
From https://arxiv.org/pdf/1803.01944.pdf. This version of the PSD includes
the sky-averaging position 'penalty', which takes into account the fact that, for some
LISA sources, the wavelength of the GWs is shorter than LISA's arms.
"""
L = 2.5*10**9 # Length of LISA arm
f0 = 19.09*10**-3
Poms = ((1.5*10**-11)**2)*(1 + ((2*10**-3)/f)**4) # Optical Metrology Sensor
Pacc = (3*10**-15)**2*(1 + (4*10**-3/(10*f))**2)*(1 + (f/(8*10**-3))**4) # Acceleration Noise
Sc = 9*10**(-45)*f**(-7/3)*np.exp(-f**0.171 + 292*f*np.sin(1020*f)) * (1 \
+ np.tanh(1680*(0.00215 - f)))
PSD = ((10/(3*L**2))*(Poms + (4*Pacc)/((2*np.pi*f))**4)*(1 + 0.6*(f/f0)**2) + Sc) # PSD
where_are_NaNs = np.isnan(PSD) #In case there are nans,
PSD[where_are_NaNs] = 1e100 #set the PSD value for them to something very high and let's be done with it.
return PSD
"""
Define inner product and SNR.
"""
# Derivation of this result in personal notes.
def inner_product(FD_signal_1_fft,FD_signal_2_fft,delta_t,PSD,n_t):
""" The FD signals here are the discretized FD signals. """
return 4*delta_t*np.real(sum(FD_signal_1_fft * np.conj(FD_signal_2_fft) / (n_t * PSD))) #note: n_t in denom.
def SNR2(h_discrete_fft, delta_t, PSD, n_t):
return inner_product(h_discrete_fft, h_discrete_fft, delta_t, PSD, n_t)
"""
Zero padding
"""
def zero_pad(data):
N = len(data)
pow_2 = np.ceil(np.log2(N))
return np.pad(data,(0,int((2**pow_2)-N)),'constant')
| 2,877 | 28.670103 | 112 |
py
|
Noisy_Neighbours
|
Noisy_Neighbours-main/Global_Fit_Correction/Section_6_3/MS_func.py
|
import numpy as np
def units():
GM_sun = 1.3271244*1e20
c =2.9979246*1e8
M_sun =1.9884099*1e30
G = 6.6743*1e-11
pc= 3.0856776*1e16
pi = np.pi
Mpc = (10**6) * pc
return GM_sun, c, M_sun, G, Mpc, pi
def PowerSpectralDensity(f):
"""
From https://arxiv.org/pdf/1803.01944.pdf. This version of the PSD includes
the sky-averaging position 'penalty', which takes into account the fact that, for some
LISA sources, the wavelength of the GWs is shorter than LISA's arms.
"""
L = 2.5*10**9 # Length of LISA arm
f0 = 19.09*10**-3
Poms = ((1.5*10**-11)**2)*(1 + ((2*10**-3)/f)**4) # Optical Metrology Sensor
Pacc = (3*10**-15)**2*(1 + (4*10**-3/(10*f))**2)*(1 + (f/(8*10**-3))**4) # Acceleration Noise
Sc = 9*10**(-45)*f**(-7/3)*np.exp(-f**0.171 + 292*f*np.sin(1020*f)) * (1 \
+ np.tanh(1680*(0.00215 - f))) # Confusion noise
alpha = 0.171
beta = 292
k =1020
gamma = 1680
f_k = 0.00215
PSD = ((10/(3*L*L))*(Poms + (4*Pacc)/(np.power(2*np.pi*f,4)))*(1 + 0.6*(f/f0)*(f/f0)) + Sc) # PSD
where_are_NaNs = np.isnan(PSD) #In case there are nans,
PSD[where_are_NaNs] = 1e100 #set the PSD value for them to something very high and let's be done with it.
return PSD
def htilde_GR(f,eps,params):
"""
Here we calculate a TaylorF2 model up to 2PN which takes as input the following
set of parameters: (log of chirp mass, symmetric mass ratio, beta).
This can easily be changed in the first few lines where the parameters are loaded.
The main reference is https://arxiv.org/pdf/gr-qc/0509116.pdf [Eqs (3.4)].
Note on distance:
Notice that the effective distance contains information about the angular dependence
of the binary. The model can thus be used for all detectors, as long as this distance
parameter is chosen consistently.
Note on spin:
The spin parameter beta is defined in Eq.(2.3a) in [arxiv:0411129].
Notice that this quantity is constructed in such a way to be smaller or equal
than 9.4, and of course it ranges from 0 (no spins) to this upper value.
The coefficient enters the phase as in Eq.(2.2) in the same paper.
"""
GM_sun, c, M_sun, G, Mpc, pi = units()
t0 =1.
phi0 =0.
# Load the parameters
Mchirp_true = M_sun * np.exp(params[0])
eta_true = params[1]
beta_true = params[2]
Deff = params[3]
theta = -11831/9240 #in PN coefficients!
delta = -1987/3080 #in PN coefficients!
# PN expansion parameter (velocity).
v = (pi*G*Mchirp_true*eta_true**(-3/5)/(c**3) * f)**(1/3)
# Amplitude explicitly given in terms of units and frequency.
# Notice that lowest PN order here is fine. Biggest contributions from phase.
amplitude_1 = - (Mpc/Deff)*np.sqrt((5/(24*pi)))*(GM_sun/(c**2 *Mpc))
amplitude_2 = (pi*GM_sun/(c**3))**(-1/6) * (Mchirp_true/M_sun)**(5/6)
amplitude = amplitude_1*amplitude_2 * f**(-7/6)
# Phase: add or remove PN orders here as you see fit.
psi_const = 2*pi*f*t0 - 2*phi0 - pi/4
psi1PN = (3715/756 + (55/9)*eta_true)*v**(-3)
psi1_5PN_tails = -16*pi*v**(-2)
psi1_5PN_spin = 4*beta_true*v**(-2)
psi2PN = (15293365/508032+(27145/504)*eta_true+(3085/72)*eta_true**2)*v**(-1)
psi25PNlog = pi*(38645/252- (65/3) *eta_true)* np.log(v)
psi3PN = v*(11583231236531/4694215680 - (640/3) * (pi**2) -6848/21 *np.euler_gamma
+ eta_true*(-15335597827/3048192 + (2255/12) * (pi**2) - 1760/3 * theta - 12320/9 * delta)
+ (eta_true**2) *76055/1728 - (eta_true**3) * 127825/1296 - 6848/21 * np.log(4))
psi3PNlog = - 6848/21 *v * np.log(v)
psi35PN = pi * v**2 * (77096675./254016 + (378515./1512) *eta_true - 74045./756 * (eta_true**2)* (1-eps))
psi_fullPN = (3/(128*eta_true))*(v**(-5)+psi1PN+psi1_5PN_tails+psi1_5PN_spin+psi2PN
+ psi25PNlog + psi3PN + psi3PNlog + psi35PN)
psi = psi_const + psi_fullPN
return amplitude* np.exp(-1j*psi)
def htilde_GB(f,params):
"""
Here we calculate a TaylorF2 model up to 2PN which takes as input the following
set of parameters: (log of chirp mass, symmetric mass ratio, beta).
This can easily be changed in the first few lines where the parameters are loaded.
The main reference is https://arxiv.org/pdf/gr-qc/0509116.pdf [Eqs (3.4)].
Note on distance:
Notice that the effective distance contains information about the angular dependence
of the binary. The model can thus be used for all detectors, as long as this distance
parameter is chosen consistently.
Note on spin:
The spin parameter beta is defined in Eq.(2.3a) in [arxiv:0411129].
Notice that this quantity is constructed in such a way to be smaller or equal
than 9.4, and of course it ranges from 0 (no spins) to this upper value.
The coefficient enters the phase as in Eq.(2.2) in the same paper.
"""
# Units
# Load the parameters
t0 =1.
phi0 =0.
GM_sun, c, M_sun, G, Mpc, pi = units()
Mchirp_true = M_sun * np.exp(params[0])
eta_true = params[1]
Deff = params[2]
# PN expansion parameter (velocity).
v = (pi*G*Mchirp_true*eta_true**(-3/5)/(c**3) * f)**(1/3)
# Amplitude explicitly given in terms of units and frequency.
# Notice that lowest PN order here is fine. Biggest contributions from phase.
amplitude_1 = - (1/Deff)*np.sqrt((5/(24*pi)))*(GM_sun/(c**2 ))
amplitude_2 = (pi*GM_sun/(c**3))**(-1/6) * (Mchirp_true/M_sun)**(5/6)
amplitude = amplitude_1*amplitude_2 * f**(-7/6)
new_amplitude = -np.sqrt(5*np.pi/24)*(G*Mchirp_true/(c**3))*(G*Mchirp_true/(Deff*c**2))*(f*np.pi*G*Mchirp_true/(c**3))**(-7/6)
# Phase: add or remove PN orders here as you see fit.
psi_const = 2*pi*f*t0 - 2*phi0 - pi/4
# psi1PN = (3715/756 + (55/9)*eta_true)*v**(-3)
# psi1_5PN_tails = -16*pi*v**(-2)
# psi1_5PN_spin = 4*beta_true*v**(-2)
# psi2PN = (15293365/508032+(27145/504)*eta_true+(3085/72)*eta_true**2)*v**(-1)
# psi25PNlog = pi*(38645/252- (65/3) *eta_true)* np.log(v)
# psi3PN = v*(11583231236531/4694215680 - (640/3) * (pi**2) -6848/21 *np.euler_gamma
# + eta_true*(-15335597827/3048192 + (2255/12) * (pi**2) - 1760/3 * theta - 12320/9 * delta)
# + (eta_true**2) *76055/1728 - (eta_true**3) * 127825/1296 - 6848/21 * np.log(4))
# psi3PNlog = - 6848/21 *v * np.log(v)
# psi35PN = pi * v**2 * (77096675./254016 + (378515./1512) *eta_true - 74045./756 * (eta_true**2)* (1-eps))
psi_fullPN = (3/(128*eta_true))*(v**(-5) )
#+psi1PN+psi1_5PN_tails+psi1_5PN_spin+psi2PN
#+ psi25PNlog + psi3PN + psi3PNlog + psi35PN)
psi = psi_const + psi_fullPN
return amplitude_1,amplitude_2,np.exp(-1j*psi),new_amplitude* np.exp(-1j*psi)
def htilde_AP(f,params):
"""
Here we calculate a TaylorF2 model up to 2PN which takes as input the following
set of parameters: (log of chirp mass, symmetric mass ratio, beta).
This can easily be changed in the first few lines where the parameters are loaded.
The main reference is https://arxiv.org/pdf/gr-qc/0509116.pdf [Eqs (3.4)].
Note on distance:
Notice that the effective distance contains information about the angular dependence
of the binary. The model can thus be used for all detectors, as long as this distance
parameter is chosen consistently.
Note on spin:
The spin parameter beta is defined in Eq.(2.3a) in [arxiv:0411129].
Notice that this quantity is constructed in such a way to be smaller or equal
than 9.4, and of course it ranges from 0 (no spins) to this upper value.
The coefficient enters the phase as in Eq.(2.2) in the same paper.
"""
# Units
# GM_sun = 1.3271244*1e20
# c =2.9979246*1e8
# M_sun =1.9884099*1e30
# G = 6.6743*1e-11
# pc= 3.0856776*1e16
# pi = np.pi
# Mpc = 10**6 * pc
# Load the parameters
t0 =1.
phi0 =0.
GM_sun, c, M_sun, G, Mpc, pi = units()
Mchirp_true = M_sun * np.exp(params[0])
eta_true = params[1]
beta_true = params[2]
Deff = params[3]
theta = -11831/9240 #in PN coefficients!
delta = -1987/3080 #in PN coefficients!
# PN expansion parameter (velocity).
v = (pi*G*Mchirp_true*eta_true**(-3/5)/(c**3) * f)**(1/3)
# Amplitude explicitly given in terms of units and frequency.
# Notice that lowest PN order here is fine. Biggest contributions from phase.
amplitude_1 = - (Mpc/Deff)*np.sqrt((5/(24*pi)))*(GM_sun/(c**2 *Mpc))
amplitude_2 = (pi*GM_sun/(c**3))**(-1/6) * (Mchirp_true/M_sun)**(5/6)
amplitude = amplitude_1*amplitude_2 * f**(-7/6)
# Phase: add or remove PN orders here as you see fit.
psi_const = 2*pi*f*t0 - 2*phi0 - pi/4
psi1PN = (3715/756+55/9*eta_true)*v**(-3)
psi1_5PN_tails = -16*pi*v**(-2)
psi1_5PN_spin = 4*beta_true*v**(-2)
psi2PN = (15293365/508032+27145/504*eta_true+3085/72*eta_true**2)*v**(-1)
psi25PNlog = pi*(38645/252- 65/3 *eta_true)* np.log(v)
psi3PN = v*(11583231236531/4694215680 -640/3 * pi**2 -6848/21 *np.euler_gamma
+ eta_true*(-15335597827/3048192+2255/12 * pi**2-1760/3 * theta - 12320/9 * delta)
+ eta_true**2 *76055/1728 - eta_true**3 * 127825/1296 - 6848/21 * np.log(4))
psi3PNlog = - 6848/21 *v * np.log(v)
psi35PN = pi * v**2 * (77096675./254016 + 378515./1512 *eta_true - 74045./756 * eta_true**2)
psi_fullPN = 3/(128*eta_true)*(v**(-5)+psi1PN+psi1_5PN_tails+psi1_5PN_spin+psi2PN
+ psi25PNlog + psi3PN + psi3PNlog + psi35PN)
psi = psi_const + psi_fullPN
return amplitude* np.exp(-1j*psi)
def T_chirp(fmin,M_chirp,eta):
t0 =1.
phi0 =0.
GM_sun, c, M_sun, G, Mpc, pi = units()
M_chirp *= M_sun
M = M_chirp*eta**(-3/5)
v_low = (pi*G*M_chirp*eta**(-3/5)/(c**3) * fmin)**(1/3)
theta = -11831/9240 #in PN coefficients!
delta = -1987/3080 #in PN coefficients!
gamma = np.euler_gamma
pre_fact = ((5/(256*eta)) * G*M/(c**3))
first_term = (v_low**(-8) + (743/252 + (11/3) * eta ) * (v_low **(-6)) - (32*np.pi/5)*v_low**(-5)
+(3058673/508032 + (5429/504)*eta + (617/72)*eta**2)*v_low**(-4)
+(13*eta/3 - 7729/252)*np.pi*v_low**-3)
second_term = (6848*gamma/105 - 10052469856691/23471078400 + 128*pi**2/3 + (
3147553127/3048192 - 451*(pi**2)/12)*eta - (15211*eta**2)/1728 + (2555*eta**3 / 1296) +
(6848/105)*np.log(4*v_low))*v_low**-2
third_term = ((14809/378)*eta**2 - (75703/756) * eta - 15419335/127008)*pi*v_low**-1
return pre_fact * (first_term + second_term + third_term)
def final_frequency(M_chirp,eta):
GM_sun, c, M_sun, G, Mpc, pi = units()
M_tot = M_chirp*eta**(-3/5) * M_sun
return (c**3)/(6*np.sqrt(6)*np.pi*G*M_tot)
def inner_prod(sig1_f,sig2_f,PSD,delta_f):
"""
Wiener Product with constant PSD. Here we use Parseval's theorem. Note the definition of the SNR.
"""
return (4*delta_f) * np.real(sum(sig1_f*np.conjugate(sig2_f)/PSD))
def numerical_derivs(freq_bin,pars):
logMchirp_1 = pars[0];eta_1 = pars[1];beta_1 = pars[2]; Deff_1 = pars[3]
logMchirp_delta = 1e-5
params_1_p = [logMchirp_1 + logMchirp_delta,eta_1,beta_1,Deff_1]
params_1_m = [logMchirp_1 - logMchirp_delta,eta_1,beta_1,Deff_1]
deriv_log_Mchirp_1 = (htilde_AP(freq_bin,params_1_p) - htilde_AP(freq_bin,params_1_m))/(2*logMchirp_delta)
eta_delta = 1e-6
params_1_p = [logMchirp_1,eta_1 + eta_delta,beta_1,Deff_1]
params_1_m = [logMchirp_1,eta_1 - eta_delta,beta_1,Deff_1]
deriv_log_eta_1 = (htilde_AP(freq_bin,params_1_p) - htilde_AP(freq_bin,params_1_m))/(2*eta_delta)
beta_delta = 1e-6
params_1_p = [logMchirp_1,eta_1,beta_1 + beta_delta,Deff_1]
params_1_m = [logMchirp_1,eta_1,beta_1 - beta_delta,Deff_1]
deriv_log_beta_1 = (htilde_AP(freq_bin,params_1_p) - htilde_AP(freq_bin,params_1_m))/(2*beta_delta)
diff_vec = [deriv_log_Mchirp_1,deriv_log_eta_1,deriv_log_beta_1]
return diff_vec
def fish_matrix(diff_vec,PSD,delta_f):
N = len(diff_vec)
fish_mix = np.eye(N)
for i in range(0,N):
for j in range(0,N):
fish_mix[i,j] = inner_prod(diff_vec[i],diff_vec[j],PSD,delta_f)
import mpmath as mp
mp.dps = 4000;
fish_mix_prec = mp.matrix(fish_mix)
fish_mix_inv = fish_mix_prec**-1
Cov_Matrix = np.eye(N)
for i in range(0,N):
for j in range(0,N):
Cov_Matrix[i,j] = float(fish_mix_inv[i,j])
return Cov_Matrix
# MCMC
# MCMC
"""
Created on Mon Nov 25 23:53:26 2019
@author: Ollie
"""
import numpy as np
import scipy as sp
import random as rd
import matplotlib.pyplot as plt
def llike(pdgrm, variances):
"""
Computes log (Whittle) likelihood
"""
return -0.5 * sum(pdgrm / variances)
def lprior_logM_chirp(logM_chirp,logM_chirp_low, logM_chirp_high):
"""
Prior on amplitude - uniform
"""
if logM_chirp < logM_chirp_low or logM_chirp > logM_chirp_high:
print('rejected logM_chirp')
return -1e100
else:
return 0
def lprior_eta(eta,eta_low, eta_high):
"""
Prior on amplitude - uniform
"""
if eta < eta_low or eta > eta_high:
print('rejected eta')
return -1e100
else:
return 0
def lprior_beta(beta,beta_low, beta_high):
"""
Prior on amplitude - uniform
"""
if beta < beta_low or beta > beta_high:
print('rejected beta')
return -1e100
else:
return 0
def lpost_full(pdgrm, variances,
logM_chirp_1,logM_chirp_2, logM_chirp_low,logM_chirp_high,
eta_1, eta_2, eta_low, eta_high,
beta_1, beta_2,beta_low, beta_high):
'''
Compute log posterior
'''
return(lprior_logM_chirp(logM_chirp_1,logM_chirp_low, logM_chirp_high) +
lprior_logM_chirp(logM_chirp_2,logM_chirp_low, logM_chirp_high) +
lprior_eta(eta_1,eta_low, eta_high) +
lprior_eta(eta_2,eta_low, eta_high) +
lprior_beta(beta_1,beta_low, beta_high) +
lprior_beta(beta_2,beta_low, beta_high) +
+ llike(pdgrm, variances))
def accept_reject(lp_prop, lp_prev):
'''
Compute log acceptance probability (minimum of 0 and log acceptance rate)
Decide whether to accept (1) or reject (0)
'''
u = np.random.uniform(size = 1) # U[0, 1]
r = np.minimum(0, lp_prop - lp_prev) # log acceptance probability
if np.log(u) < r:
return(1) # Accept
else:
return(0) # Reject
def accept_rate(parameter):
'''
Compute acceptance rate for a specific parameter
Used to adapt the proposal variance in a MH sampler
Input: parameter (sequence of samples of a parameter)
'''
rejections = 0
for i in range(len(parameter) - 1): # Count rejections
rejections = rejections + (parameter[i + 1] == parameter[i])
reject_rate = rejections / (len(parameter) - 1) # Rejection rate
return(1 - reject_rate) # Return acceptance rate
#####
#####
def MCMC_full(data_f,f, true_vals,D_vec,Cov_Matrix,
variances,
logM_chirp_high,logM_chirp_low,
eta_high, eta_low,
beta_high, beta_low,
Ntotal,
burnin,
printerval = 50):
np.random.seed(2) # Set the seed
logM_chirp_1 = [] # Initialise empty vectors
eta_1 = []
beta_1 = []
Deff_1 = []
logM_chirp_2 = [] # Initialise empty vectors
eta_2 = []
beta_2 = []
Deff_2 = []
logM_chirp_1.append(true_vals[0])
eta_1.append(true_vals[1])
beta_1.append(true_vals[2])
Deff_1.append(D_vec[0])
logM_chirp_2.append(true_vals[3])
eta_2.append(true_vals[4])
beta_2.append(true_vals[5])
Deff_2.append(D_vec[1])
delta_f = f[1] - f[0] # Extract sampling interval
params_1 = [logM_chirp_1[0],eta_1[0],beta_1[0],Deff_1[0]]
params_2 = [logM_chirp_2[0],eta_2[0],beta_2[0],Deff_2[0]]
signal_init_f_1 = htilde_AP(f,params_1)
signal_init_f_2 = htilde_AP(f,params_2)
signal_f_init_tot = signal_init_f_1 + signal_init_f_2
# Compute periodogram
pdgrm = abs(data_f - signal_f_init_tot)**2
print(pdgrm)
if pdgrm[0] == 0:
print('There will be no bias')
else:
print('Prepare for bias')
# Initial value for log posterior
lp = []
lp.append(lpost_full(pdgrm, variances,
logM_chirp_1[0], logM_chirp_2[0],logM_chirp_low, logM_chirp_high,
eta_1[0], eta_2[0], eta_low, eta_high,
beta_1[0], beta_2[0], beta_low, beta_high))
lp_store = lp[0] # Create log posterior storage to be overwritten
accept_reject_count = [0]
#####
# Run MCMC
#####
for i in range(1, Ntotal):
if i % printerval == 0:
print("i = ", i) # Iteration and Acceptance/Rejection ratio
print("acceptance_reject ratio", 100*sum(accept_reject_count)/len(accept_reject_count),'percent')
####
#####
# Step 1: Sample spin, a
#####
lp_prev = lp_store # Call previous stored log posterior
# Hardcoded standard deviations because I'm a twat.
# logM_chirp_prop = logM_chirp[i - 1] + np.random.normal(0,1.94471368e-05)
# eta_prop = eta[i - 1] + np.random.normal(0,6.51506233e-04)
# beta_prop = beta[i - 1] + np.random.normal(0,6.17458158e-03)
prev_vec = [logM_chirp_1[i - 1], eta_1[i - 1], beta_1[i - 1],
logM_chirp_2[i - 1], eta_2[i - 1], beta_2[i - 1]]
prop_vec = np.random.multivariate_normal(prev_vec, (1/2)*Cov_Matrix)
# print(prop_vec)
logM_chirp_prop_1 = prop_vec[0]
eta_prop_1 = prop_vec[1]
beta_prop_1 = prop_vec[2]
logM_chirp_prop_2 = prop_vec[3]
eta_prop_2 = prop_vec[4]
beta_prop_2 = prop_vec[5]
# print(eta_prop_1,eta_prop_2,eta_prop_3,eta_prop_4)
param_1_prop = [logM_chirp_prop_1, eta_prop_1, beta_prop_1, Deff_1[0]]
param_2_prop = [logM_chirp_prop_2, eta_prop_2, beta_prop_2, Deff_2[0]]
signal_prop_f_1 = htilde_AP(f,param_1_prop) # New proposed signal
signal_prop_f_2 = htilde_AP(f,param_2_prop) # New proposed signal
signal_prop_f_tot = signal_prop_f_1 + signal_prop_f_2
pdgrm_prop = abs(data_f - signal_prop_f_tot)**2 # Compute periodigram
# Compute log posterior
lp_prop = lpost_full(pdgrm_prop, variances,
logM_chirp_prop_1,logM_chirp_prop_2,logM_chirp_low,logM_chirp_high,
eta_prop_1,eta_prop_2, eta_low, eta_high,
beta_prop_1, beta_prop_2, beta_low, beta_high) # Compute proposed log posterior
if accept_reject(lp_prop, lp_prev) == 1: # Accept
logM_chirp_1.append(logM_chirp_prop_1)
eta_1.append(eta_prop_1)
beta_1.append(beta_prop_1)
logM_chirp_2.append(logM_chirp_prop_2)
eta_2.append(eta_prop_2)
beta_2.append(beta_prop_2)
accept_reject_count.append(1) # Add one to counter
lp_store = lp_prop # Overwrite lp_store
else: # Reject
logM_chirp_1.append(logM_chirp_1[i - 1])
eta_1.append(eta_1[i - 1])
beta_1.append(beta_1[i - 1])
logM_chirp_2.append(logM_chirp_2[i - 1])
eta_2.append(eta_2[i - 1])
beta_2.append(beta_2[i - 1])
accept_reject_count.append(0) # Add 0 to counter
lp.append(lp_store) # Add log posterior value
return logM_chirp_1,eta_1,beta_1,logM_chirp_2,eta_2,beta_2,lp
def CV_Calc(deltaH,noise_f,waveform_errors,diff_vec,Cov_Matrix,PSD,delta_f):
N = len(diff_vec)
deltah = noise_f + waveform_errors + deltaH
b_vec_n = [inner_prod(diff_vec[i],noise_f,PSD,delta_f) for i in range(0,N)]
b_vec_waveform_errors = [inner_prod(diff_vec[i],waveform_errors,PSD,delta_f) for i in range(0,N)]
b_vec_unresolved_signals = [inner_prod(diff_vec[i],deltaH,PSD,delta_f) for i in range(0,N)]
biases_pred_n = np.matmul(Cov_Matrix,b_vec_n)
biases_pred_waveform_errors = np.matmul(Cov_Matrix,b_vec_waveform_errors)
biases_pred_unresolved = np.matmul(Cov_Matrix,b_vec_unresolved_signals)
biases_pred_total = (biases_pred_waveform_errors + biases_pred_unresolved +
biases_pred_n )
return biases_pred_n,biases_pred_waveform_errors,biases_pred_unresolved,biases_pred_total
# MCMC
# MCMC
"""
Created on Mon Nov 25 23:53:26 2019
@author: Ollie
"""
import numpy as np
import scipy as sp
import random as rd
import matplotlib.pyplot as plt
def lpost(pdgrm, variances,
logM_chirp_1,logM_chirp_low,logM_chirp_high,
eta_1, eta_low, eta_high,
beta_1, beta_low, beta_high):
'''
Compute log posterior
'''
return(lprior_logM_chirp(logM_chirp_1,logM_chirp_low, logM_chirp_high) +
lprior_eta(eta_1,eta_low, eta_high) +
lprior_beta(beta_1,beta_low, beta_high) +
llike(pdgrm, variances))
def MCMC_1_sig(data_f,f, true_vals,D_vec,Cov_Matrix,
variances,
logM_chirp_high,logM_chirp_low,
eta_high, eta_low,
beta_high, beta_low,
Ntotal,
burnin,
printerval = 50):
np.random.seed(2) # Set the seed
logM_chirp_1 = [] # Initialise empty vectors
eta_1 = []
beta_1 = []
Deff_1 = []
logM_chirp_1.append(true_vals[0])
eta_1.append(true_vals[1])
beta_1.append(true_vals[2])
Deff_1.append(D_vec[0])
delta_f = f[1] - f[0] # Extract sampling interval
params_1 = [logM_chirp_1[0],eta_1[0],beta_1[0],Deff_1[0]]
signal_init_f_1 = htilde_AP(f,params_1)
signal_f_init_tot = signal_init_f_1
# Compute periodogram
pdgrm = abs(data_f - signal_f_init_tot)**2
if pdgrm[0] == 0:
print('There will be no bias')
else:
print('Prepare for bias')
# Initial value for log posterior
lp = []
lp.append(lpost(pdgrm, variances,
logM_chirp_1[0], logM_chirp_low, logM_chirp_high,
eta_1[0], eta_low, eta_high,
beta_1[0], beta_low, beta_high))
lp_store = lp[0] # Create log posterior storage to be overwritten
accept_reject_count = [0]
#####
# Run MCMC
#####
for i in range(1, Ntotal):
if i % printerval == 0:
print("i = ", i) # Iteration and Acceptance/Rejection ratio
print("acceptance_reject ratio", 100*sum(accept_reject_count)/len(accept_reject_count),'percent')
####
#####
# Step 1: Sample spin, a
#####
lp_prev = lp_store # Call previous stored log posterior
# Hardcoded standard deviations because I'm a twat.
# logM_chirp_prop = logM_chirp[i - 1] + np.random.normal(0,1.94471368e-05)
# eta_prop = eta[i - 1] + np.random.normal(0,6.51506233e-04)
# beta_prop = beta[i - 1] + np.random.normal(0,6.17458158e-03)
prev_vec = [logM_chirp_1[i - 1], eta_1[i - 1], beta_1[i - 1]]
prop_vec = np.random.multivariate_normal(prev_vec, Cov_Matrix)
# print(prop_vec)
logM_chirp_prop_1 = prop_vec[0]
eta_prop_1 = prop_vec[1]
beta_prop_1 = prop_vec[2]
# print(eta_prop_1,eta_prop_2,eta_prop_3,eta_prop_4)
param_1_prop = [logM_chirp_prop_1, eta_prop_1, beta_prop_1, Deff_1[0]]
signal_prop_f_1 = htilde_AP(f,param_1_prop) # New proposed signal
signal_prop_f_tot = signal_prop_f_1
pdgrm_prop = abs(data_f - signal_prop_f_tot)**2 # Compute periodigram
# Compute log posterior
lp_prop = lpost(pdgrm_prop, variances,
logM_chirp_prop_1,logM_chirp_low,logM_chirp_high,
eta_prop_1, eta_low, eta_high,
beta_prop_1, beta_low, beta_high) # Compute proposed log posterior
if accept_reject(lp_prop, lp_prev) == 1: # Accept
logM_chirp_1.append(logM_chirp_prop_1)
eta_1.append(eta_prop_1)
beta_1.append(beta_prop_1)
accept_reject_count.append(1) # Add one to counter
lp_store = lp_prop # Overwrite lp_store
else: # Reject
logM_chirp_1.append(logM_chirp_1[i - 1])
eta_1.append(eta_1[i - 1])
beta_1.append(beta_1[i - 1])
accept_reject_count.append(0) # Add 0 to counter
lp.append(lp_store) # Add log posterior value
return logM_chirp_1,eta_1,beta_1,lp
| 26,129 | 32.414322 | 130 |
py
|
Input-Specific-Certification
|
Input-Specific-Certification-main/zipdata.py
|
import multiprocessing
import os.path as op
from threading import local
from zipfile import ZipFile, BadZipFile
from PIL import Image
from io import BytesIO
import torch.utils.data as data
_VALID_IMAGE_TYPES = ['.jpg', '.jpeg', '.tiff', '.bmp', '.png']
class ZipData(data.Dataset):
_IGNORE_ATTRS = {'_zip_file'}
def __init__(self, path, map_file,
transform=None, target_transform=None,
extensions=None):
self._path = path
if not extensions:
extensions = _VALID_IMAGE_TYPES
self._zip_file = ZipFile(path)
self.zip_dict = {}
self.samples = []
self.transform = transform
self.target_transform = target_transform
self.class_to_idx = {}
with open(map_file, 'r') as f:
for line in iter(f.readline, ""):
line = line.strip()
if not line:
continue
cls_idx = [l for l in line.split('\t') if l]
if not cls_idx:
continue
assert len(cls_idx) >= 2, "invalid line: {}".format(line)
idx = int(cls_idx[1])
cls = cls_idx[0]
del cls_idx
at_idx = cls.find('@')
assert at_idx >= 0, "invalid class: {}".format(cls)
cls = cls[at_idx + 1:]
if cls.startswith('/'):
# Python ZipFile expects no root
cls = cls[1:]
assert cls, "invalid class in line {}".format(line)
prev_idx = self.class_to_idx.get(cls)
assert prev_idx is None or prev_idx == idx, "class: {} idx: {} previously had idx: {}".format(
cls, idx, prev_idx
)
self.class_to_idx[cls] = idx
for fst in self._zip_file.infolist():
fname = fst.filename
target = self.class_to_idx.get(fname)
if target is None:
continue
if fname.endswith('/') or fname.startswith('.') or fst.file_size == 0:
continue
ext = op.splitext(fname)[1].lower()
if ext in extensions:
self.samples.append((fname, target))
assert len(self), "No images found in: {} with map: {}".format(self._path, map_file)
def __repr__(self):
return 'ZipData({}, size={})'.format(self._path, len(self))
def __getstate__(self):
return {
key: val if key not in self._IGNORE_ATTRS else None
for key, val in self.__dict__.iteritems()
}
def __getitem__(self, index):
proc = multiprocessing.current_process()
pid = proc.pid # get pid of this process.
if pid not in self.zip_dict:
self.zip_dict[pid] = ZipFile(self._path)
zip_file = self.zip_dict[pid]
if index >= len(self) or index < 0:
raise KeyError("{} is invalid".format(index))
path, target = self.samples[index]
try:
sample = Image.open(BytesIO(zip_file.read(path))).convert('RGB')
except BadZipFile:
print("bad zip file")
return None, None
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
| 3,481 | 35.270833 | 110 |
py
|
Input-Specific-Certification
|
Input-Specific-Certification-main/certify_iss.py
|
# evaluate a smoothed classifier on a dataset
import argparse
from time import time
from model import resnet110
from datasets import get_dataset, DATASETS, get_num_classes
import numpy as np
from scipy.stats import norm
from statsmodels.stats.proportion import proportion_confint
import torch
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Certify with FastCertify')
# prepare data, model, output file
parser.add_argument("dataset", default='cifar10', help="which dataset")
parser.add_argument("base_classifier", type=str, help="path to saved pytorch model of base classifier")
parser.add_argument("sigma", type=float, help="noise hyperparameter")
parser.add_argument("outfile", type=str, help="output file")
# hyperparameters for sample size planning
parser.add_argument("--loss_type", choices=['absolute', 'relative'], help="loss type")
parser.add_argument("--max_loss", type=float, default=0.01, help="the tolerable loss of certified radius")
parser.add_argument("--batch_size", type=int, default=200, help="batch size")
parser.add_argument("--max_size", type=int, default=200, help="the maximum sample size")
parser.add_argument('--n0', type=int, default=10, help='the sample size for FastCertify')
parser.add_argument("--alpha", type=float, default=0.001, help="failure probability")
# hyperparameters for the input iteration
parser.add_argument("--skip", type=int, default=1, help="how many examples to skip")
parser.add_argument("--max", type=int, default=-1, help="stop after this many examples")
parser.add_argument("--split", choices=["train", "test"], default="test", help="train or test set")
args = parser.parse_args()
print(args)
def _sample_noise(base_classifier, num_classes, x: torch.tensor, sigma: float, batchnum: int, batch_size) -> np.ndarray:
with torch.no_grad():
counts = np.zeros(num_classes, dtype=int)
for _ in range(batchnum):
batch = x.repeat((batch_size, 1, 1, 1))
noise = torch.randn_like(batch, device='cuda') * sigma
predictions = base_classifier(batch + noise).argmax(1)
counts += _count_arr(predictions.cpu().numpy(), num_classes)
return counts
def _count_arr(arr: np.ndarray, length: int) -> np.ndarray:
counts = np.zeros(length, dtype=int)
for idx in arr:
counts[idx] += 1
return counts
def _lower_confidence_bound(NA, N, alpha: float) -> float:
return proportion_confint(NA, N, alpha=2 * alpha, method="beta")[0]
def generate_iss(loss, batch_size, upper, sigma, alpha, loss_type) -> dict:
iss = {}
max_sample_size = upper * batch_size
if loss_type=='absolute':
pre=0
for pa in list(np.arange(500 + 1) * 0.001+0.5):
iss[pa] = upper
opt_radius = sigma * norm.ppf(
_lower_confidence_bound(max_sample_size * pa, max_sample_size, alpha))
standard = opt_radius - loss
if standard <= 0:
iss[pa] = 0
else:
for num in range(pre,upper + 1):
sample_size = num * batch_size
if sigma * norm.ppf(_lower_confidence_bound(sample_size * pa, sample_size, alpha)) >= standard:
iss[pa] = num
pre=num
break
if loss_type=='relative':
for pa in list(np.arange(500 + 1) * 0.001+0.5):
iss[pa] = upper
opt_radius = sigma * norm.ppf(
_lower_confidence_bound(max_sample_size * pa, max_sample_size, alpha))
standard = opt_radius*(1- loss)
if standard <= 0:
iss[pa] = 0
else:
for num in range(upper + 1):
sample_size = num * batch_size
if sigma * norm.ppf(_lower_confidence_bound(sample_size * pa, sample_size, alpha)) >= standard:
iss[pa] = num
break
return iss
def find_opt_batchnum(iss, pa_lower, pa_upper):
list_p = list(iss.keys())
pa_lower = np.clip(pa_lower, 0.0, 1.0)
pa_upper = np.clip(pa_upper, 0.0, 1.0)
for i, p in enumerate(list_p):
if pa_lower <= p:
opt_batchnum = max(iss[list_p[max(0,i - 1)]], iss[p])
break
for i, p in enumerate(list_p):
if pa_upper <= p:
opt_batchnum = max(opt_batchnum, iss[list_p[max(0,i - 1)]], iss[p])
break
return opt_batchnum
if __name__ == "__main__":
t_start = time()
batch_size = args.batch_size
n0 = args.n0//batch_size
alpha = args.alpha
num_classes = 10
sigma = args.sigma
loss = args.max_loss
upper = args.max_size//batch_size
loss_type = args.loss_type
model = resnet110().cuda()
# load the base classifier
checkpoint = torch.load(args.base_classifier)
if checkpoint is not None:
print('==> Resuming from checkpoint..')
model.load_state_dict(checkpoint['net'])
model.eval()
# prepare output file
f = open(args.outfile, 'w')
print("idx\tlabel\tpredict\tpA\tsamplesize\tradius\tcorrect\ttime_forward", file=f, flush=True)
# iterate through the dataset
dataset = get_dataset(args.dataset, args.split)
certify_num_total = 0
sample_size_total = 0
radius_total = 0
grid = [0.25, 0.50, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25]
cnt_grid_hard =np.zeros((len(grid) + 1,), dtype=np.int)
t1=time()
iss=generate_iss(loss, batch_size, upper, sigma, alpha, loss_type)
t2=time()
time_iss=t2-t1
for i in tqdm(range(len(dataset))):
# only certify every args.skip examples, and stop after args.max examples
if i % args.skip != 0:
continue
if i == args.max:
break
(x, label) = dataset[i]
certify_num_total += 1
t1 = time()
# certify the prediction of g around x
x = x.cuda()
counts_prediction = _sample_noise(model, num_classes, x, sigma, n0, batch_size)
# use these samples to take a guess at the top class
prediction_uncertain = counts_prediction.argmax().item()
pa_lower, pa_upper = proportion_confint(counts_prediction[prediction_uncertain].item(), n0 * batch_size, alpha,
method="beta")
# compute the optimal batchnum
opt_batchnum = find_opt_batchnum(iss, pa_lower, pa_upper)
sample_size = opt_batchnum * batch_size
# forward
if sample_size != 0:
# draw more samples of f(x + epsilon)
counts_certification = counts_prediction
counts_certification += _sample_noise(model, num_classes, x, sigma, opt_batchnum - n0, batch_size)
# use these samples to estimate a lower bound on pA
nA = counts_certification[prediction_uncertain].item()
pABar = _lower_confidence_bound(nA, sample_size, alpha)
if pABar < 0.5:
prediction = -1
radius = 0
else:
prediction = prediction_uncertain
radius = sigma * norm.ppf(pABar)
else:
pABar=pa_lower
prediction = -1
radius = 0
t2 = time()
sample_size_total += sample_size
correct = int(prediction == label)
if correct == 1:
cnt_grid_hard[0] += 1
radius_total += radius
for j in range(len(grid)):
if radius >= grid[j]:
cnt_grid_hard[j + 1] += 1
time_forward = t2 - t1
print(f"{i}\t{label}\t{prediction}\t{pABar:.3f}\t{sample_size}\t{radius:.3f}\t{correct}\t{time_forward:.3f}",
file=f, flush=True)
t_end = time()
print(f'===Certification Summary({loss_type})===', file=f, flush=True)
print(
f"image number={certify_num_total}, total time={t_end - t_start}, time_iss={time_iss}, total sample size={sample_size_total}, loss control={100 * loss:.2f}%, average radius={radius_total/certify_num_total:.3f}",
file=f, flush=True)
print('Radius: 0.0 Number: {} Acc: {}%'.format(
cnt_grid_hard[0], cnt_grid_hard[0] / certify_num_total * 100),file=f, flush=True)
for j in range(len(grid)):
print('Radius: {} Number: {} Acc: {}%'.format(
grid[j], cnt_grid_hard[j + 1], cnt_grid_hard[j + 1] / certify_num_total * 100),file=f, flush=True)
print('ACR: {}'.format(radius_total / certify_num_total), file=f, flush=True)
f.close()
| 8,532 | 38.50463 | 219 |
py
|
Input-Specific-Certification
|
Input-Specific-Certification-main/model.py
|
'''
ResNet110 for Cifar-10
References:
[1] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In CVPR, 2016.
[2] K. He, X. Zhang, S. Ren, and J. Sun. Identity mappings in deep residual networks. In ECCV, 2016.
'''
import torch.nn as nn
import torch.nn.functional as F
import math
def conv3x3(in_planes, out_planes, stride=1):
" 3x3 convolution with padding "
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet_Cifar(nn.Module):
def __init__(self, block, layers, width=1, num_classes=10):
super(ResNet_Cifar, self).__init__()
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16 * width, layers[0])
self.layer2 = self._make_layer(block, 32 * width, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64 * width, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64 * block.expansion * width, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet110(**kwargs):
model = ResNet_Cifar(BasicBlock, [18, 18, 18], width=1, **kwargs)
return model
| 3,129 | 27.198198 | 101 |
py
|
Input-Specific-Certification
|
Input-Specific-Certification-main/datasets.py
|
import bisect
import os
import pickle
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, datasets
from torchvision.datasets.utils import check_integrity
from typing import *
from zipdata import ZipData
# set this environment variable to the location of your imagenet directory if you want to read ImageNet data.
# make sure your val directory is preprocessed to look like the train directory, e.g. by running this script
# https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh
IMAGENET_LOC_ENV = "IMAGENET_DIR"
IMAGENET_ON_PHILLY_DIR = "/hdfs/public/imagenet/2012/"
# list of all datasets
DATASETS = ["imagenet", "imagenet32", "cifar10"]
def get_dataset(dataset: str, split: str) -> Dataset:
"""Return the dataset as a PyTorch Dataset object"""
if dataset == "imagenet":
if "PT_DATA_DIR" in os.environ: #running on Philly
return _imagenet_on_philly(split)
else:
return _imagenet(split)
elif dataset == "imagenet32":
return _imagenet32(split)
elif dataset == "cifar10":
return _cifar10(split)
def get_num_classes(dataset: str):
"""Return the number of classes in the dataset. """
if dataset == "imagenet":
return 1000
elif dataset == "cifar10":
return 10
def get_normalize_layer(dataset: str) -> torch.nn.Module:
"""Return the dataset's normalization layer"""
if dataset == "imagenet":
return NormalizeLayer(_IMAGENET_MEAN, _IMAGENET_STDDEV)
elif dataset == "cifar10":
return NormalizeLayer(_CIFAR10_MEAN, _CIFAR10_STDDEV)
elif dataset == "imagenet32":
return NormalizeLayer(_CIFAR10_MEAN, _CIFAR10_STDDEV)
def get_input_center_layer(dataset: str) -> torch.nn.Module:
"""Return the dataset's Input Centering layer"""
if dataset == "imagenet":
return InputCenterLayer(_IMAGENET_MEAN)
elif dataset == "cifar10":
return InputCenterLayer(_CIFAR10_MEAN)
_IMAGENET_MEAN = [0.485, 0.456, 0.406]
_IMAGENET_STDDEV = [0.229, 0.224, 0.225]
_CIFAR10_MEAN = [0.4914, 0.4822, 0.4465]
_CIFAR10_STDDEV = [0.2023, 0.1994, 0.2010]
def _cifar10(split: str) -> Dataset:
dataset_path = os.path.join('datasets', 'dataset_cache')
if split == "train":
return datasets.CIFAR10(dataset_path, train=True, download=True, transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
]))
elif split == "test":
return datasets.CIFAR10(dataset_path, train=False, download=True, transform=transforms.ToTensor())
elif split in ["mini_labelled", "mini_unlabelled", "mini_test"]:
return HybridCifarDataset(split)
# return MiniCifarDataset(split)
else:
raise Exception("Unknown split name.")
def _imagenet_on_philly(split: str) -> Dataset:
trainpath = os.path.join(IMAGENET_ON_PHILLY_DIR, 'train.zip')
train_map = os.path.join(IMAGENET_ON_PHILLY_DIR, 'train_map.txt')
valpath = os.path.join(IMAGENET_ON_PHILLY_DIR, 'val.zip')
val_map = os.path.join(IMAGENET_ON_PHILLY_DIR, 'val_map.txt')
if split == "train":
return ZipData(trainpath, train_map,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]))
elif split == "test":
return ZipData(valpath, val_map,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
]))
def _imagenet(split: str) -> Dataset:
if not IMAGENET_LOC_ENV in os.environ:
raise RuntimeError("environment variable for ImageNet directory not set")
dir = os.environ[IMAGENET_LOC_ENV]
if split == "train":
subdir = os.path.join(dir, "train")
transform = transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
elif split == "test":
subdir = os.path.join(dir, "val")
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
return datasets.ImageFolder(subdir, transform)
def _imagenet32(split: str) -> Dataset:
dataset_path = os.path.join('datasets', 'Imagenet32')
if split == "train":
return ImageNetDS(dataset_path, 32, train=True, transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()]))
elif split == "test":
return ImageNetDS(dataset_path, 32, train=False, transform=transforms.ToTensor())
class NormalizeLayer(torch.nn.Module):
"""Standardize the channels of a batch of images by subtracting the dataset mean
and dividing by the dataset standard deviation.
In order to certify radii in original coordinates rather than standardized coordinates, we
add the Gaussian noise _before_ standardizing, which is why we have standardization be the first
layer of the classifier rather than as a part of preprocessing as is typical.
"""
def __init__(self, means: List[float], sds: List[float]):
"""
:param means: the channel means
:param sds: the channel standard deviations
"""
super(NormalizeLayer, self).__init__()
self.means = torch.tensor(means).cuda()
self.sds = torch.tensor(sds).cuda()
def forward(self, input: torch.tensor):
(batch_size, num_channels, height, width) = input.shape
means = self.means.repeat((batch_size, height, width, 1)).permute(0, 3, 1, 2)
sds = self.sds.repeat((batch_size, height, width, 1)).permute(0, 3, 1, 2)
return (input - means)/sds
class InputCenterLayer(torch.nn.Module):
"""Centers the channels of a batch of images by subtracting the dataset mean.
In order to certify radii in original coordinates rather than standardized coordinates, we
add the Gaussian noise _before_ standardizing, which is why we have standardization be the first
layer of the classifier rather than as a part of preprocessing as is typical.
"""
def __init__(self, means: List[float]):
"""
:param means: the channel means
:param sds: the channel standard deviations
"""
super(InputCenterLayer, self).__init__()
self.means = torch.tensor(means).cuda()
def forward(self, input: torch.tensor):
(batch_size, num_channels, height, width) = input.shape
means = self.means.repeat((batch_size, height, width, 1)).permute(0, 3, 1, 2)
return input - means
# from https://github.com/hendrycks/pre-training
class ImageNetDS(Dataset):
"""`Downsampled ImageNet <https://patrykchrabaszcz.github.io/Imagenet32/>`_ Datasets.
Args:
root (string): Root directory of dataset where directory
``ImagenetXX_train`` exists.
img_size (int): Dimensions of the images: 64,32,16,8
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
base_folder = 'Imagenet{}_train'
train_list = [
['train_data_batch_1', ''],
['train_data_batch_2', ''],
['train_data_batch_3', ''],
['train_data_batch_4', ''],
['train_data_batch_5', ''],
['train_data_batch_6', ''],
['train_data_batch_7', ''],
['train_data_batch_8', ''],
['train_data_batch_9', ''],
['train_data_batch_10', '']
]
test_list = [
['val_data', ''],
]
def __init__(self, root, img_size, train=True, transform=None, target_transform=None):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.img_size = img_size
self.base_folder = self.base_folder.format(img_size)
# if not self._check_integrity():
# raise RuntimeError('Dataset not found or corrupted.') # TODO
# now load the picked numpy arrays
if self.train:
self.train_data = []
self.train_labels = []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(self.root, self.base_folder, f)
with open(file, 'rb') as fo:
entry = pickle.load(fo)
self.train_data.append(entry['data'])
self.train_labels += [label - 1 for label in entry['labels']]
self.mean = entry['mean']
self.train_data = np.concatenate(self.train_data)
self.train_data = self.train_data.reshape((self.train_data.shape[0], 3, 32, 32))
self.train_data = self.train_data.transpose((0, 2, 3, 1)) # convert to HWC
else:
f = self.test_list[0][0]
file = os.path.join(self.root, f)
fo = open(file, 'rb')
entry = pickle.load(fo)
self.test_data = entry['data']
self.test_labels = [label - 1 for label in entry['labels']]
fo.close()
self.test_data = self.test_data.reshape((self.test_data.shape[0], 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
## To use this dataset, please contact the authors of https://arxiv.org/pdf/1905.13736.pdf
# to get access to this pickle file (ti_top_50000_pred_v3.1.pickle) containing the dataset.
class TiTop50KDataset(Dataset):
"""500K images closest to the CIFAR-10 dataset from
the 80 Millon Tiny Images Datasets"""
def __init__(self):
super(TiTop50KDataset, self).__init__()
dataset_path = os.path.join('datasets', 'ti_top_50000_pred_v3.1.pickle')
self.dataset_dict = pickle.load(open(dataset_path,'rb'))
#{'data', 'extrapolated_targets', 'ti_index',
# 'prediction_model', 'prediction_model_epoch'}
self.length = len(self.dataset_dict['data'])
self.transforms = transforms.Compose([
transforms.Resize((32,32)),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
def __getitem__(self, index):
img = self.dataset_dict['data'][index]
target = self.dataset_dict['extrapolated_targets'][index]
img = Image.fromarray(img)
img = self.transforms(img)
return img, target
def __len__(self):
return self.length
class MultiDatasetsDataLoader(object):
"""Dataloader to alternate between batches from multiple dataloaders
"""
def __init__(self, task_data_loaders, equal_num_batch=True, start_iteration=0):
if equal_num_batch:
lengths = [len(task_data_loaders[0]) for i,_ in enumerate(task_data_loaders)]
else:
lengths = [len(data_loader) for data_loader in task_data_loaders]
self.task_data_loaders = task_data_loaders
self.start_iteration = start_iteration
self.length = sum(lengths)
self.dataloader_indices = np.hstack([
np.full(task_length, loader_id)
for loader_id, task_length in enumerate(lengths)
])
def __iter__(self):
self.task_data_iters = [iter(data_loader)
for data_loader in self.task_data_loaders]
self.cur_idx = self.start_iteration
# synchronizing the task sequence on each of the worker processes
# for distributed training. The data will still be different, but
# will come from the same task on each GPU.
# np.random.seed(22)
np.random.shuffle(self.dataloader_indices)
# np.random.seed()
return self
def __next__(self):
if self.cur_idx == len(self.dataloader_indices):
raise StopIteration
loader_id = self.dataloader_indices[self.cur_idx]
self.cur_idx += 1
return next(self.task_data_iters[loader_id]), loader_id
next = __next__ # Python 2 compatibility
def __len__(self):
return self.length
@property
def num_tasks(self):
return len(self.task_data_iters)
| 14,566 | 36.836364 | 109 |
py
|
ENCAS
|
ENCAS-main/nat_api.py
|
import pickle
import numpy as np
from networks.attentive_nas_dynamic_model import AttentiveNasDynamicModel
from networks.ofa_mbv3_my import OFAMobileNetV3My
from networks.proxyless_my import OFAProxylessNASNetsMy
from search_space.ensemble_ss import EnsembleSearchSpace
from utils import get_metric_complement, get_net_info, SupernetworkWrapper
class NatAPI():
def __init__(self, filename):
super().__init__()
self.use_cache = True
# some variables are unused, kept for backwards compatibility
predictor, n_classes, supernet_paths, archive_path, sec_obj, _, alphabet_name, n_image_channels, dataset, \
search_space_name, ensemble_ss_names, _ = pickle.load(open(filename, 'rb'))
self.search_space = EnsembleSearchSpace(ensemble_ss_names,
[{'alphabet':alphabet_name_cur, 'ensemble_size': len(alphabet_name)}
for alphabet_name_cur in alphabet_name])
self.predictor = predictor
self.sec_obj = sec_obj
self.n_image_channels = n_image_channels
if search_space_name == 'ensemble':
# assume supernet_paths is a list of paths, 1 per supernet
ss_name_to_class = {'alphanet': AttentiveNasDynamicModel, 'ofa': OFAMobileNetV3My,
'proxyless': OFAProxylessNASNetsMy}
classes_to_use = [ss_name_to_class[ss_name] for ss_name in ensemble_ss_names]
self.evaluators = [SupernetworkWrapper(n_classes=n_classes, model_path=supernet_path,
engine_class_to_use=encoder_class,
n_image_channels=self.n_image_channels, if_ignore_decoder=False, dataset=dataset,
search_space_name=ss_name, decoder_name='')
for supernet_path, ss_name, encoder_class in zip(supernet_paths, ensemble_ss_names, classes_to_use)]
def fitness(self, solution):
solution = [int(x) for x in solution]
config = self.search_space.decode(solution)
sec_objs = []
for conf, evaluator in zip(config, self.evaluators):
subnet, _ = evaluator.sample({'ks': conf['ks'], 'e': conf['e'], 'd': conf['d'], 'w': conf['w']})
info = get_net_info(subnet, (self.n_image_channels, conf['r'], conf['r']),
measure_latency=self.sec_obj, print_info=False, clean=True)
sec_objs.append(info[self.sec_obj])
if 'position' not in conf:
obj1_proper_form = -sum(sec_objs)
top1_err = self.predictor.predict(np.array(solution)[np.newaxis, :])[0]
obj0_proper_form = get_metric_complement(top1_err[0])
else:
input_acc = np.array(solution)[np.newaxis, :]
solution_reencoded_sep = self.search_space.encode(config, if_return_separate=True)
input_flops = np.concatenate([sol_sep[-2:] for sol_sep in solution_reencoded_sep] + [[int(f) for f in sec_objs]])[np.newaxis, :]
top1_err = self.predictor.predict({'for_acc': input_acc, 'for_flops': input_flops})[0]
obj0_proper_form = get_metric_complement(top1_err[0])
obj1_proper_form = -top1_err[1]
# third objective was removed during code clean-up, but want to return 3 values for backward compatibility
return (obj0_proper_form, obj1_proper_form, 0)
| 3,511 | 53.030769 | 140 |
py
|
ENCAS
|
ENCAS-main/evaluate.py
|
import time
from collections import defaultdict
import json
import torch
import numpy as np
from ofa.imagenet_classification.elastic_nn.utils import set_running_statistics
from networks.attentive_nas_dynamic_model import AttentiveNasDynamicModel
from networks.ofa_mbv3_my import OFAMobileNetV3My
from networks.proxyless_my import OFAProxylessNASNetsMy
from run_manager import get_run_config
from ofa.imagenet_classification.elastic_nn.modules.dynamic_op import DynamicSeparableConv2d
DynamicSeparableConv2d.KERNEL_TRANSFORM_MODE = 1
from run_manager.run_manager_my import RunManagerMy
from utils import set_seed, get_net_info, SupernetworkWrapper
def evaluate_many_configs(supernet_folder_path, configs, if_test=False, config_msunas=None, **kwargs):
accs = []
args = {k: v[0] for k, v in default_kwargs.items()}
if config_msunas is not None:
for key in ['data', 'dataset', 'n_classes', 'trn_batch_size', 'vld_batch_size',
'vld_size', 'n_workers', 'sec_obj']:
args[key] = config_msunas[key]
args['pass_subnet_config_directly'] = True
args['test'] = if_test
args['cutout_size'] = config_msunas.get('cutout_size', 32)
args['reset_running_statistics'] = True
args.update(kwargs)
if 'thresholds' not in args:
args['thresholds'] = None
info_keys_to_return = []
if 'info_keys_to_return' in kwargs:
info_keys_to_return = kwargs['info_keys_to_return']
info_keys_to_return_2_values = defaultdict(list)
args['info_keys_to_return'] = info_keys_to_return
args['supernet_path'] = supernet_folder_path
args['search_space_name'] = kwargs['search_space_name']
args['ensemble_ss_names'] = kwargs['ensemble_ss_names']
# a hack for speed: reuse run_config for all the subnetworks evaluated
# it should change nothing because the subnet architecture is the only
# thing changing in the _for_ loop
run_config = kwargs.get('run_config', None)
for config in configs:
args['subnet'] = config
args['run_config'] = run_config
info = _evaluate_one_config(args)
top1_error = info['top1']
run_config = info['run_config']
accs.append(top1_error)
for key in info_keys_to_return:
info_keys_to_return_2_values[key].append(info[key])
if len(info_keys_to_return) > 0:
return accs, info_keys_to_return_2_values
return accs
def _evaluate_one_config(args):
set_seed(args['random_seed'])
preproc_alphanet = False
if args['pass_subnet_config_directly']:
config = args['subnet']
else:
config = json.load(open(args['subnet']))
if args['search_space_name'] == 'reproduce_nat':
if config['w'] == 1.0:
evaluator = SupernetworkWrapper(n_classes=args['n_classes'], model_path=args['supernet_path'][0],
engine_class_to_use=OFAMobileNetV3My, dataset=args['dataset'],
search_space_name='ofa')
else:
evaluator = SupernetworkWrapper(n_classes=args['n_classes'], model_path=args['supernet_path'][1],
engine_class_to_use=OFAMobileNetV3My, dataset=args['dataset'],
search_space_name='ofa')
subnet, _ = evaluator.sample(config)
subnet = subnet.cuda()
resolution = config['r']
elif args['search_space_name'] == 'ensemble':
ensemble_ss_names = args['ensemble_ss_names']
supernet_paths = args['supernet_path']
ss_name_to_class = {'alphanet': AttentiveNasDynamicModel, 'ofa': OFAMobileNetV3My,
'proxyless': OFAProxylessNASNetsMy}
# some ensembles have missing members which are represented by config that is None
# for ENCAS, also need to remove thresholds
if args['thresholds'] is None:
filtered = filter(lambda conf_p_e: conf_p_e[0] is not None, zip(config, supernet_paths, ensemble_ss_names))
config, supernet_paths, ensemble_ss_names = list(zip(*filtered))
else:
filtered = filter(lambda conf_p_e_t: conf_p_e_t[0] is not None, zip(config, supernet_paths, ensemble_ss_names, args['thresholds']))
config, supernet_paths, ensemble_ss_names, thresholds = list(zip(*filtered))
args['thresholds'] = thresholds
print(f'{supernet_paths=}')
classes_to_use = [ss_name_to_class[ss_name] for ss_name in ensemble_ss_names]
evaluators = [SupernetworkWrapper(n_classes=args['n_classes'], model_path=supernet_path,
engine_class_to_use=encoder_class, dataset=args['dataset'],
search_space_name=ss_name)
for supernet_path, ss_name, encoder_class in zip(supernet_paths, ensemble_ss_names, classes_to_use)]
subnet = [e.sample(c)[0] for e, c in zip(evaluators, config)]
resolution = [conf['r'] for conf in config]
# If normal ENCAS, thresholds are already provided. Otherwise:
if args['thresholds'] is None:
if 'threshold' in config[0]: # ENCAS-joint
# (but the condition is also satisfied if ENCAS was run on subnets extracted from ENCAS-joint)
# (that was a bug, now ENCAS won't execute this code no matter which subnets it uses)
thresholds = [c['threshold'] for c in config]
positions = [c['position'] for c in config]
idx = np.argsort(positions)[::-1]
# don' need to sort positions_list itself?
thresholds = np.array(thresholds)[idx].tolist()
resolution = np.array(resolution)[idx].tolist()
subnet = np.array(subnet)[idx].tolist()
args['thresholds'] = thresholds
else: # not a cascade => can rearrange order
idx = np.argsort(resolution)[::-1]
resolution = np.array(resolution)[idx].tolist()
subnet = np.array(subnet)[idx].tolist()
preproc_alphanet ='alphanet' in ensemble_ss_names
return _evaluate_one_model(
subnet, data_path=args['data'], dataset=args['dataset'], resolution=resolution,
trn_batch_size=args['trn_batch_size'], vld_batch_size=args['vld_batch_size'], num_workers=args['n_workers'],
valid_size=args['vld_size'], is_test=args['test'], measure_latency=args['latency'],
no_logs=(not args['verbose']), reset_running_statistics=args['reset_running_statistics'],
run_config=args.get('run_config', None), sec_obj=args['sec_obj'],
info_keys_to_return=args['info_keys_to_return'], cutout_size=args['cutout_size'], thresholds=args['thresholds'],
if_use_logit_gaps=args['if_use_logit_gaps'], preproc_alphanet=preproc_alphanet)
def _evaluate_one_model(subnet, data_path, dataset='imagenet', resolution=224, trn_batch_size=128,
vld_batch_size=250, num_workers=4, valid_size=None, is_test=True,
measure_latency=None, no_logs=False, reset_running_statistics=True,
run_config=None, sec_obj='flops', info_keys_to_return=(), cutout_size=None, thresholds=None, if_use_logit_gaps=False,
preproc_alphanet=False):
info = get_net_info(subnet, (3, resolution, resolution), measure_latency=measure_latency,
print_info=False, clean=True, if_dont_sum=thresholds is not None)
print(f"{info['flops']=}")
if_return_logit_gaps = 'logit_gaps' in info_keys_to_return
validation_kwargs = {'if_return_outputs': 'output_distr' in info_keys_to_return,
'if_return_logit_gaps': if_return_logit_gaps}
resolution_is_list = type(resolution) is list
if resolution_is_list:
validation_kwargs['resolutions_list'] = resolution_list = resolution
resolution = max(resolution) # collators need max resolution; will downsample in the val loop
# Actually, collators need the first resolution, which in a cascade won't be the largest one
validation_kwargs['thresholds'] = thresholds
if thresholds is not None:
resolution = resolution_list[0]
validation_kwargs['if_use_logit_gaps'] = if_use_logit_gaps
if run_config is None:
run_config = get_run_config(dataset=dataset, data_path=data_path, image_size=resolution, n_epochs=0,
train_batch_size=trn_batch_size, test_batch_size=vld_batch_size, n_worker=num_workers,
valid_size=valid_size, total_epochs=0, dataset_name=dataset, cutout_size=cutout_size,
preproc_alphanet=preproc_alphanet)
data_provider = run_config.data_provider
data_provider.collator_train.set_resolutions([resolution])
data_provider.collator_subtrain.set_resolutions([resolution])
run_config.valid_loader.collate_fn.set_resolutions([resolution])
run_config.test_loader.collate_fn.set_resolutions([resolution])
data_provider.assign_active_img_size(resolution)
run_manager = RunManagerMy(subnet, run_config, no_gpu=False, sec_obj=sec_obj)
if reset_running_statistics:
# same subset size & batch size as during evaluation in training
if not run_manager.is_ensemble:
data_provider.collator_subtrain.set_resolutions([resolution])
data_loader = run_config.random_sub_train_loader(2304 * 6, vld_batch_size, resolution)
set_running_statistics(subnet, data_loader)
else:
for i_net, net_cur in enumerate(subnet):
print(f'Resetting BNs for network {i_net}')
st = time.time()
data_provider.collator_subtrain.set_resolutions([resolution_list[i_net]])
mul_due_to_logit_gaps = 6 # logit gaps differ a lot when comparing 3 and 1
data_loader = run_config.random_sub_train_loader(2304 * mul_due_to_logit_gaps, vld_batch_size,
resolution_list[i_net])
net_cur.cuda()
if hasattr(net_cur, 'reset_running_stats_for_calibration'): # alphanet & attentiveNAS
with torch.no_grad(), torch.cuda.amp.autocast():
net_cur.set_bn_param(0.1, 1e-5)
net_cur.eval()
net_cur.reset_running_stats_for_calibration()
for images, _ in data_loader:
images = images.cuda(non_blocking=True)
out = net_cur(images)
images.cpu(), out.cpu()
del images, out
else:
set_running_statistics(net_cur, data_loader)
ed = time.time()
print(f'BN resetting time for {i_net}: {ed - st}')
print('BNs reset')
loss, dict_of_metrics = run_manager.validate(net=subnet, is_test=is_test, no_logs=no_logs, **validation_kwargs)
top1 = dict_of_metrics['top1']
info['loss'], info['top1'] = loss, top1
if thresholds is not None:
n_not_predicted_per_stage = dict_of_metrics['n_not_predicted_per_stage']
flops_per_stage = info['flops']
if is_test:
data_loader = run_config.test_loader
else:
data_loader = run_config.valid_loader
n_images_total = len(data_loader.dataset)
print(f'{n_images_total=}, {n_not_predicted_per_stage=}')
true_flops = flops_per_stage[0] + sum([n_not_predicted / n_images_total * flops for (n_not_predicted, flops) in
zip(n_not_predicted_per_stage, flops_per_stage[1:])])
info['flops'] = true_flops
print(f'{thresholds=}')
print(info)
info['run_config'] = run_config # a hack
for k in info_keys_to_return:
if k not in info:
info[k] = dict_of_metrics[k]
return info
# these are mostly irrelevant, will be overwritten. TODO: remove
default_kwargs = {
'n_gpus': [1, 'total number of available gpus'],
'gpu': [1, 'number of gpus per evaluation job'],
'data': ['/export/scratch3/aleksand/data/CIFAR/', 'location of the data corpus'],
'dataset': ['cifar10', 'name of the dataset [imagenet, cifar10, cifar100, ...]'],
'n_classes': [10, 'number of classes of the given dataset'],
'n_workers': [8, 'number of workers for dataloaders'],
'vld_size': [5000, 'validation set size, randomly sampled from training set'],
'trn_batch_size': [96, 'train batch size for training'],
'vld_batch_size': [96, 'validation batch size'],
'n_epochs': [0, 'n epochs to train'],
'drop_rate': [0.2, 'dropout rate'],
'drop_connect_rate': [0.0, ''],
'resolution': [224, 'resolution'],
'supernet_path': ['/export/scratch3/aleksand/nsganetv2/data/ofa_mbv3_d234_e346_k357_w1.0',
'path to supernet'],
'subnet': ['', 'location of a json file of ks, e, d, and e'],
'pass_subnet_config_directly': [False, 'Pass config as object instead of file path'],
'config': ['', 'location of a json file of specific model declaration; not relevant for me'],
'init': [None, 'location of initial weight to load'],
'test': [False, 'if evaluate on test set'],
'verbose': [True, ''],
'save': [None, ''],
'reset_running_statistics': [False, 'reset_running_statistics for BN'],
'latency': [None, 'latency measurement settings (gpu64#cpu)'],
'random_seed': [42, 'random seed'],
'teacher_model': [None, ''],
}
| 13,651 | 49.940299 | 143 |
py
|
ENCAS
|
ENCAS-main/nat.py
|
import itertools
import os
import time
from concurrent.futures.process import ProcessPoolExecutor
from pathlib import Path
import torch
import torch.nn.functional as F
import torchvision.transforms.functional
from torch.cuda.amp import GradScaler
from ofa.utils import AverageMeter, accuracy
from tqdm import tqdm
from matplotlib import pyplot as plt
import utils
from networks.ofa_mbv3_my import OFAMobileNetV3My
from run_manager import get_run_config
from ofa.imagenet_classification.elastic_nn.utils import set_running_statistics
from networks.attentive_nas_dynamic_model import AttentiveNasDynamicModel
from networks.proxyless_my import OFAProxylessNASNetsMy
from utils import validate_config, get_net_info
from searcher_wrappers.mo_gomea_wrapper import MoGomeaWrapper
from searcher_wrappers.nsga3_wrapper import Nsga3Wrapper
from searcher_wrappers.random_search_wrapper import RandomSearchWrapper
import subset_selectors
import gc
from filelock import FileLock
import dill
from utils_train import CutMixCrossEntropyLoss, LabelSmoothing
os.environ['MKL_THREADING_LAYER'] = 'GNU'
import json
import shutil
import numpy as np
from utils import get_correlation, alphabet_dict, get_metric_complement, setup_logging
from search_space import OFASearchSpace
from search_space.ensemble_ss import EnsembleSearchSpace
from acc_predictor.factory import get_acc_predictor
from pymoo.visualization.scatter import Scatter
plt.rcParams.update({'font.size': 16})
from collections import defaultdict
from utils import set_seed
import re
import yaml
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = True
class NAT:
def __init__(self, kwargs):
kwargs_copy = dict(kwargs)
plt.rcParams['axes.grid'] = True
def get_from_kwargs_or_default_kwargs(key_name):
return kwargs.pop(key_name, default_kwargs[key_name][0])
self.random_seed = kwargs.pop('random_seed', default_kwargs['random_seed'][0])
set_seed(self.random_seed)
# 1. search space & alphabets
self.search_space_name = kwargs.pop('search_space', default_kwargs['search_space'][0])
search_goal = kwargs.pop('search_goal', default_kwargs['search_goal'][0])
self.if_cascade = search_goal == 'cascade'
self.ensemble_ss_names = kwargs.pop('ensemble_ss_names', default_kwargs['ensemble_ss_names'][0])
alphabet_names = kwargs.pop('alphabet', default_kwargs['alphabet'][0])
alphabet_paths = [alphabet_dict[alphabet_name] for alphabet_name in alphabet_names]
if self.search_space_name == 'ensemble':
self.search_space = EnsembleSearchSpace(self.ensemble_ss_names,
[{'alphabet': alphabet_name, 'ensemble_size': len(alphabet_names)}
for alphabet_name in alphabet_names])
self.alphabets = []
self.alphabets_lb = []
for alphabet_path in alphabet_paths:
with open(alphabet_path, 'r') as f:
self.alphabets.append(list(map(int, f.readline().split(' '))))
with open(alphabet_path.replace('.txt', '_lb.txt'), 'r') as f:
self.alphabets_lb.append(list(map(int, f.readline().split(' ')))) #lower bound
# combined alphabets
self.alphabet = list(itertools.chain(*self.alphabets))
self.alphabet_lb = list(itertools.chain(*self.alphabets_lb))
elif self.search_space_name == 'reproduce_nat':
assert len(alphabet_names) == 2
assert alphabet_names[0] == alphabet_names[1]
alphabet_path = alphabet_paths[0]
alphabet_name = alphabet_names[0]
self.search_space = OFASearchSpace(alphabet=alphabet_name)
with open(alphabet_path, 'r') as f:
self.alphabet = list(map(int, f.readline().split(' ')))
with open(alphabet_path.replace('.txt', '_lb.txt'), 'r') as f:
self.alphabet_lb = list(map(int, f.readline().split(' '))) # lower bound
# 2. save & log
self.path_logs = kwargs.pop('path_logs', default_kwargs['path_logs'][0])
self.resume = kwargs.pop('resume', default_kwargs['resume'][0])
if self.resume is not None:
self.resume = os.path.join(self.path_logs, self.resume)
save_name = kwargs.pop('experiment_name', default_kwargs['experiment_name'][0])
self.path_logs = os.path.join(self.path_logs, save_name)
Path(self.path_logs).mkdir(exist_ok=True)
self.log_file_path = os.path.join(self.path_logs, '_log.txt')
setup_logging(self.log_file_path)
print(f'{self.path_logs=}')
# 3. copy pre-trained supernets
supernet_paths = kwargs.pop('supernet_path', default_kwargs['supernet_path'][0])
print(f'{supernet_paths=}')
supernet_paths_true = []
for supernet_path in supernet_paths:
# try:
shutil.copy(supernet_path, self.path_logs)
# except:
# pass
supernet_paths_true.append(os.path.join(self.path_logs, os.path.basename(supernet_path)))
self.supernet_paths = supernet_paths_true
# 4. data
trn_batch_size = get_from_kwargs_or_default_kwargs('trn_batch_size')
vld_batch_size = get_from_kwargs_or_default_kwargs('vld_batch_size')
n_workers = get_from_kwargs_or_default_kwargs('n_workers')
vld_size = get_from_kwargs_or_default_kwargs('vld_size')
total_size = get_from_kwargs_or_default_kwargs('total_size')
data_path = get_from_kwargs_or_default_kwargs('data')
init_lr = get_from_kwargs_or_default_kwargs('init_lr')
lr_schedule_type = kwargs.pop('lr_schedule_type', default_kwargs['lr_schedule_type'][0])
cutout_size = kwargs.pop('cutout_size', default_kwargs['cutout_size'][0])
weight_decay = kwargs.pop('weight_decay', default_kwargs['weight_decay'][0])
if_center_crop = kwargs.pop('if_center_crop', default_kwargs['if_center_crop'][0])
auto_augment = kwargs.pop('auto_augment', default_kwargs['auto_augment'][0])
resize_scale = kwargs.pop('resize_scale', default_kwargs['resize_scale'][0])
if_cutmix = kwargs.pop('if_cutmix', default_kwargs['if_cutmix'][0])
self.iterations = kwargs.pop('iterations', default_kwargs['iterations'][0])
self.dataset = kwargs.pop('dataset', default_kwargs['dataset'][0])
self.n_epochs = kwargs.pop('n_epochs', default_kwargs['n_epochs'][0])
# in order not to pickle "self", create variables without it:
dataset, n_epochs, iterations, ensemble_ss_names = self.dataset, self.n_epochs, self.iterations, self.ensemble_ss_names
self.run_config_lambda = lambda: get_run_config(
dataset=dataset, data_path=data_path, image_size=256,
n_epochs=n_epochs, train_batch_size=trn_batch_size, test_batch_size=vld_batch_size,
n_worker=n_workers, valid_size=vld_size, total_size=total_size, dataset_name=dataset,
total_epochs=(iterations + 1) * n_epochs, lr_schedule_type=lr_schedule_type,
weight_decay=weight_decay, init_lr=init_lr, cutout_size=cutout_size, if_center_crop=if_center_crop,
auto_augment=auto_augment, resize_scale=resize_scale, if_cutmix=if_cutmix,
preproc_alphanet='alphanet' in ensemble_ss_names # needed only for imagenet
)
# 5. search algorithm
run_config = self.run_config_lambda() # need to create run_config here just to get the number of classes
self.n_classes = run_config.data_provider.n_classes
gomea_exe_path = get_from_kwargs_or_default_kwargs('gomea_exe')
search_algo = kwargs.pop('search_algo', default_kwargs['search_algo'][0])
assert search_algo in ['nsga3', 'mo-gomea', 'random']
search_algo_class = {'nsga3': Nsga3Wrapper, 'mo-gomea': MoGomeaWrapper,
'random': RandomSearchWrapper}[search_algo]
init_with_nd_front_size = kwargs.pop('init_with_nd_front_size', default_kwargs['init_with_nd_front_size'][0])
n_surrogate_evals = kwargs.pop('n_surrogate_evals', default_kwargs['n_surrogate_evals'][0])
self.sec_obj = kwargs.pop('sec_obj', default_kwargs['sec_obj'][0])
self.if_add_archive_to_candidates = get_from_kwargs_or_default_kwargs('add_archive_to_candidates')
self.search_wrapper = search_algo_class(self.search_space, self.sec_obj, self.path_logs,
self.n_classes, self.supernet_paths,
n_surrogate_evals, self.if_add_archive_to_candidates,
alphabet=self.alphabet, alphabet_path=alphabet_paths, alphabet_name=alphabet_names,
init_with_nd_front_size=init_with_nd_front_size, gomea_exe_path=gomea_exe_path,
n_image_channels=3,
dataset=self.dataset, search_space_name=self.search_space_name,
alphabet_lb=self.alphabet_lb, ensemble_ss_names=self.ensemble_ss_names)
subset_selector_name = kwargs.pop('subset_selector', default_kwargs['subset_selector'][0])
archive_size = kwargs.pop('n_iter', default_kwargs['n_iter'][0])
self.subset_selector = subset_selectors.create_subset_selector(subset_selector_name, archive_size)
# 6. create lambdas for creating supernets (engines)
# Why lambdas? Because they can be used multiple times and in subprocesses to create engines
# with the same setup (but loaded weights will be different because I'll be overwriting save files)
self.create_engine_lambdas = []
ss_name_to_class = {'alphanet': AttentiveNasDynamicModel, 'ofa': OFAMobileNetV3My,
'proxyless': OFAProxylessNASNetsMy}
use_gradient_checkpointing = get_from_kwargs_or_default_kwargs('use_gradient_checkpointing')
for ss_name in self.ensemble_ss_names:
class_to_use = ss_name_to_class[ss_name]
self.create_engine_lambdas.append(NAT.make_lambda_for_engine_creation(class_to_use, self.n_classes,
use_gradient_checkpointing,
self.dataset, ss_name))
# 7. loss functions
label_smoothing = kwargs.pop('label_smoothing', default_kwargs['label_smoothing'][0])
if label_smoothing == 0.0:
if if_cutmix:
self.train_criterion = CutMixCrossEntropyLoss()
else:
self.train_criterion = torch.nn.CrossEntropyLoss()
self.val_criterion = torch.nn.CrossEntropyLoss()
else:
assert not if_cutmix
print(f'Using label smoothing with coefficient == {label_smoothing}')
self.train_criterion = LabelSmoothing(label_smoothing)
self.val_criterion = LabelSmoothing(label_smoothing)
# 8. used later
self.initial_sample_size = kwargs.pop('n_doe', default_kwargs['n_doe'][0])
self.predictor = kwargs.pop('predictor', default_kwargs['predictor'][0])
self.n_warmup_epochs = kwargs.pop('n_warmup_epochs', default_kwargs['n_warmup_epochs'][0])
self.if_amp = get_from_kwargs_or_default_kwargs('if_amp')
self.rbf_ensemble_size = kwargs.pop('rbf_ensemble_size', default_kwargs['rbf_ensemble_size'][0])
self.if_check_duplicates = not kwargs.pop('dont_check_duplicates', default_kwargs['dont_check_duplicates'][0])
self.if_sample_configs_to_train = get_from_kwargs_or_default_kwargs('sample_configs_to_train')
self.store_checkpoint_freq = kwargs.pop('store_checkpoint_freq', default_kwargs['store_checkpoint_freq'][0])
self.get_scalar_from_accuracy = lambda acc: acc[0].item()
self.lock = FileLock(os.path.join(str(Path(self.path_logs).parents[1]),
f'gpu_{os.environ["CUDA_VISIBLE_DEVICES"].replace(",", "_")}.lock'))
# 9. save config
with open(os.path.join(self.path_logs, 'config_msunas.yml'), 'w') as f:
yaml.dump(kwargs_copy, f)
def search(self):
worst_top1_err, worst_flops = 40, 4000
ref_pt = np.array([worst_top1_err, worst_flops])
archive, first_iteration = self.create_or_restore_archive(ref_pt)
for it in range(first_iteration, self.iterations + 1):
archive, *_ = self.search_step(archive, it, ref_pt)
def search_step(self, archive, it, ref_pt):
acc_predictor, pred_for_archive = self.fit_surrogate(archive, self.alphabet, self.alphabet_lb)
candidates, pred_for_candidates = self.surrogate_search(archive, acc_predictor, it=it)
objs_evaluated = self.train_and_evaluate(candidates, it)
candidates_top1_err, candidates_complexity = objs_evaluated[0], objs_evaluated[1]
# correlation for accuracy
rmse, rho, tau = get_correlation(np.hstack((pred_for_archive[:, 0], pred_for_candidates[:, 0])),
np.array([x[1] for x in archive] + candidates_top1_err))
# correlation for flops
if self.if_cascade:
_, rho_flops, _ = get_correlation(np.hstack((pred_for_archive[:, 1], pred_for_candidates[:, 1])),
np.array([x[2] for x in archive] + candidates_complexity))
print(f'{rho_flops=}')
candidates_with_objs = []
for member in zip(candidates, *objs_evaluated):
candidates_with_objs.append(member)
if self.if_add_archive_to_candidates:
archive = candidates_with_objs # because archive was added to candidates in self.surrogate_search
else:
archive += candidates_with_objs # because candidates don't include archive
hv = utils.compute_hypervolume(ref_pt, np.column_stack(list(zip(*archive))[1:3]))
hv_candidates = utils.compute_hypervolume(ref_pt, np.column_stack(list(zip(*candidates_with_objs))[1:3]))
print(f'\nIter {it}: hv = {hv:.2f}')
print(f"fitting {self.predictor}: RMSE = {rmse:.4f}, Spearman's Rho = {rho:.4f}, Kendall’s Tau = {tau:.4f}")
with open(os.path.join(self.path_logs, 'iter_{}.stats'.format(it)), 'w') as handle:
json.dump({'archive': archive, 'candidates': candidates_with_objs, 'hv': hv, 'hv_candidates': hv_candidates,
'surrogate': {'model': self.predictor, 'name': acc_predictor.name, 'winner': acc_predictor.name,
'rmse': rmse, 'rho': rho, 'tau': tau}}, handle)
self.plot_archive(archive, candidates_top1_err, candidates, candidates_complexity, it, pred_for_candidates)
return archive, {'acc_val_max': get_metric_complement(np.min([x[1] for x in archive]))}
def create_or_restore_archive(self, ref_pt):
if self.resume:
# loads the full archive, not just the candidates of the latest iteration
data = json.load(open(self.resume))
iter = re.search('(\d+)(?!.*\d)', self.resume)[0] # last number in the name
archive, first_iteration = data['archive'], int(iter)
if first_iteration == 0:
# MO-GOMEA needs the archive of previous iteration => copy it into the folder of the current run
try:
shutil.copy(self.resume, self.path_logs)
except shutil.SameFileError:
pass
first_iteration += 1
else:
archive = []
arch_doe = self.search_space.initialize(self.initial_sample_size)
if self.n_warmup_epochs > 0:
print(f'Warmup: train for {self.n_warmup_epochs} epochs')
self.lock.acquire()
st = time.time()
self._train(arch_doe, -1, n_epochs=self.n_warmup_epochs, if_warmup=True)
ed = time.time()
print(f'Train time = {ed - st}')
self.lock.release()
objs_evaluated = self.train_and_evaluate(arch_doe, 0)
for member in zip(arch_doe, *objs_evaluated):
archive.append(member)
hv = utils.compute_hypervolume(ref_pt, np.column_stack(list(zip(*archive))[1:3]))
with open(os.path.join(self.path_logs, 'iter_0.stats'), 'w') as handle:
json.dump({'archive': archive, 'candidates': [], 'hv': hv, 'hv_candidates': hv,
'surrogate': {}}, handle)
first_iteration = 1
return archive, first_iteration
def fit_surrogate(self, archive, alphabet, alphabet_lb):
if 'rbf_ensemble_per_ensemble_member' not in self.predictor:
inputs = np.array([self.search_space.encode(x[0]) for x in archive])
targets = np.array([x[1] for x in archive])
print(len(inputs), len(inputs[0]))
assert len(inputs) > len(inputs[0]), '# of training samples have to be > # of dimensions'
inputs_additional = {}
else:
inputs = list(zip(*[self.search_space.encode(x[0], if_return_separate=True) for x in archive]))
inputs = [np.array(i) for i in inputs]
targets = {}
metric_per_member = list(zip(*[x[-1][0] for x in archive]))
targets['metrics_sep'] = [np.array(x) for x in metric_per_member]
targets['flops_cascade'] = np.array([x[2] for x in archive])
inputs_additional = {}
flops_per_member = list(zip(*[x[-1][1] for x in archive]))
flops_per_member = [np.array(x) for x in flops_per_member]
flops_per_member = np.array(flops_per_member, dtype=np.int).T
inputs_for_flops = [i[:, -2:] for i in inputs]
inputs_for_flops = np.concatenate(inputs_for_flops, axis=1)
inputs_for_flops = np.hstack((inputs_for_flops, flops_per_member)) # n_samples, (ensemble_size*3) // because positions, thresholds, flops for each member
inputs_additional['inputs_for_flops'] = inputs_for_flops
inputs_for_flops_alphabet = np.concatenate([a[-2:] for a in self.alphabets] + [[2000] * len(self.alphabets)]) # for flops: they shouldn't be bigger than 2000
inputs_for_flops_alphabet_lb = np.concatenate([a[-2:] for a in self.alphabets_lb] + [[0] * len(self.alphabets)])
inputs_additional['inputs_for_flops_alphabet'] = inputs_for_flops_alphabet
inputs_additional['inputs_for_flops_alphabet_lb'] = inputs_for_flops_alphabet_lb
print(len(inputs), len(inputs[0]), len(inputs[0][0]))
assert len(inputs[0]) > max([len(x) for x in inputs[0]]), '# of training samples have to be > # of dimensions'
if 'combo' in self.predictor:
targets['metrics_ens'] = np.array([x[1] for x in archive])
if self.search_space_name == 'reproduce_nat':
# NAT uses only 100 out of 300 archs to fit the predictor
# we can use the same subset selector, but need to change number of archs to select, and then change it back
normal_n_select = self.subset_selector.n_select
self.subset_selector.n_select = 100
errs = 100 - targets # reference selection assumes minimization
flops = np.array([x[2] for x in archive])
objs = np.vstack((errs, flops)).T
# ReferenceBasedSelector doesn't actually use archive
indices = self.subset_selector.select([], objs)
self.subset_selector.n_select = normal_n_select
actual_inputs_for_fit = inputs[indices]
targets = targets[indices]
print(f'{actual_inputs_for_fit.shape=}, {targets.shape=}')
else:
actual_inputs_for_fit = inputs
acc_predictor = get_acc_predictor(self.predictor, actual_inputs_for_fit, targets, np.array(alphabet),
np.array(alphabet_lb), inputs_additional=inputs_additional,
ensemble_size=self.rbf_ensemble_size)
if 'rbf_ensemble_per_ensemble_member' in self.predictor:
inputs = np.concatenate(inputs, axis=1) # for creating predictor need them separately, but for prediction need a single vector
inputs = {'for_acc': inputs, 'for_flops': inputs_for_flops}
# to calculate predictor correlation:
predictions = acc_predictor.predict(inputs)
return acc_predictor, predictions
def surrogate_search(self, archive, predictor, it=0):
seed_cur = self.random_seed + it
set_seed(seed_cur)
st = time.time()
genomes, objs = self.search_wrapper.search(archive, predictor, it, seed=seed_cur)
ed = time.time()
print(f'Search time = {ed - st}')
if self.if_check_duplicates:
archive_genomes = [x[0] for x in archive]
new_genomes_decoded = [self.search_space.decode(x) for x in genomes]
not_duplicate = np.logical_not([x in archive_genomes for x in new_genomes_decoded])
else:
not_duplicate = np.full(genomes.shape[0], True, dtype=bool)
st = time.time()
indices = self.subset_selector.select(archive, objs[not_duplicate])
genomes_selected = genomes[not_duplicate][indices]
objs_selected = objs[not_duplicate][indices]
ed = time.time()
print(f'Select time = {ed - st}')
genomes_selected, unique_idx = np.unique(genomes_selected, axis=0, return_index=True)
objs_selected = objs_selected[unique_idx]
candidates = [self.search_space.decode(x) for x in genomes_selected]
return candidates, objs_selected
def train_and_evaluate(self, archs, it, n_epochs=None, if_warmup=False):
self.lock.acquire()
st = time.time()
self._train(archs, it, n_epochs=n_epochs, if_warmup=if_warmup)
ed = time.time()
print(f'Train time = {ed - st}')
self.lock.release()
st = time.time()
eval_res = self._evaluate_model_list(archs)
ed = time.time()
print(f'Eval time = {ed - st}')
gc.collect()
torch.cuda.empty_cache()
# self.lock.release()
return eval_res
@staticmethod
def _init_subprocess(log_file_path, fraction):
setup_logging(log_file_path)
torch.cuda.set_per_process_memory_fraction(fraction, 0)
def _train(self, archs, it, number_to_add_to_i=0, n_epochs=None, if_warmup=False):
thread_pool = ProcessPoolExecutor(max_workers=1,
initializer=NAT._init_subprocess, initargs=(self.log_file_path, 0.44,))
# initializer=setup_logging, initargs=(self.log_file_path,))
n_engines_to_train = len(self.create_engine_lambdas)
if self.search_space_name == 'ensemble':
percent_train_per_engine = [1 / n_engines_to_train] * len(self.ensemble_ss_names)
lambda_select_archs_per_engine = [lambda _: True] * len(self.ensemble_ss_names)
elif self.search_space_name == 'reproduce_nat':
n_archs_w1_0 = np.sum([config['w'] == 1.0 for config in archs])
percent_w1_0 = n_archs_w1_0 / len(archs)
print(f'{percent_w1_0=}')
percent_train_per_engine = [percent_w1_0, 1 - percent_w1_0]
lambda_select_archs_per_engine = [lambda arch: arch['w'] == 1.0, lambda arch: arch['w'] == 1.2]
for i, (ss_name, create_engine_lambda) in enumerate(zip(self.ensemble_ss_names, self.create_engine_lambdas)):
dump_path_train1 = os.path.join(self.path_logs, 'dump_train1.pkl')
if self.search_space_name == 'ensemble':
archs_cur = [arch[i] for arch in archs] # archs is a list of lists, each of which contains configs for an ensemble
search_space = self.search_space.search_spaces[i]
elif self.search_space_name == 'reproduce_nat':
archs_cur = archs
search_space = self.search_space
actual_logs_path = self.path_logs
with open(dump_path_train1, 'wb') as f:
dill.dump((archs_cur, it, number_to_add_to_i, n_epochs, if_warmup, create_engine_lambda,
self.random_seed + i, self.run_config_lambda, self.if_sample_configs_to_train,
search_space, self.dataset, torch.device('cuda' if torch.cuda.is_available() else 'cpu'),
self.train_criterion, self.get_scalar_from_accuracy, actual_logs_path,
self.supernet_paths[i], lambda_select_archs_per_engine[i], percent_train_per_engine[i],
self.store_checkpoint_freq, self.sec_obj, self.if_amp), f)
future = thread_pool.submit(NAT._train_one_supernetwork_stateless, dump_path_train1)
future.result()
del thread_pool
gc.collect()
torch.cuda.empty_cache()
@staticmethod
def _train_one_supernetwork_stateless(args_dump_path):
with open(args_dump_path, 'rb') as f:
archs, it, number_to_add_to_i, n_epochs, if_warmup, create_engine_lambda, random_seed, run_config_lambda, \
if_sample_configs_to_train, search_space, dataset_name, device, train_criterion, \
get_scalar_from_accuracy, path_logs, supernet_path, lambda_filter_archs, percent_steps_to_take, \
store_checkpoint_freq, sec_obj, if_amp \
= dill.load(f)
set_seed(random_seed + it) # need to keep changing the seed, otherwise all the epochs use the same random values
run_config = run_config_lambda()
engine, optimizer = create_engine_lambda(supernet_path, run_config, device=device)
n_batches = len(run_config.train_loader)
if n_epochs is None:
n_epochs = run_config.n_epochs
if if_sample_configs_to_train:
configs_encoded = np.array([search_space.encode(c) for c in archs])
unique_with_counts = [np.unique(i, return_counts=True) for i in configs_encoded.T]
unique_with_probs = [(u, c / configs_encoded.shape[0]) for (u, c) in unique_with_counts]
sample = np.array([np.random.choice(u, n_epochs * n_batches, p=p)
for (u, p) in unique_with_probs])
sample_decoded = [search_space.decode(c) for c in sample.T]
else:
archs = [arch for arch in archs if lambda_filter_archs(arch)]
all_resolutions = [arch['r'] for arch in archs]
run_config.data_provider.collator_train.set_resolutions(all_resolutions)
n_steps_to_take = int(n_epochs * n_batches * percent_steps_to_take)
n_epochs_to_take = n_steps_to_take // n_batches
if if_amp:
scaler = GradScaler()
step = 0
epoch = 0 # for saving not to fail when n_epochs == 0
for epoch in range(0, n_epochs):
if step == n_steps_to_take: #don't waste time initializing dataloader threads for the epochs that won't run
break
engine.train()
losses = AverageMeter()
metric_dict = defaultdict(lambda: AverageMeter())
data_time = AverageMeter()
with tqdm(total=n_batches,
desc='{} Train #{}'.format(run_config.dataset, epoch + number_to_add_to_i), ncols=175) as t:
end = time.time()
for i, (images, labels, config_idx) in enumerate(run_config.train_loader):
time_diff = time.time() - end
data_time.update(time_diff)
if step == n_steps_to_take:
break
step += 1
if if_sample_configs_to_train:
config = sample_decoded[epoch * n_batches + i] # all the variables other than resolution have already been sampled in advance
else:
config = archs[config_idx]
if search_space.name in ['ofa', 'proxyless']:
config = validate_config(config)
engine.set_active_subnet(ks=config['ks'], e=config['e'], d=config['d'], w=config['w'])
if if_warmup:
# new_lr = run_config.init_lr
# previously warmup had constant lr, switch to linear warmup
new_lr = (step / n_steps_to_take) * run_config.init_lr
else:
new_lr = run_config.adjust_learning_rate(optimizer, epoch, i, n_batches,
it * n_epochs + epoch, n_epochs_to_take, n_epochs)
images, labels = images.to(device), labels.to(device)
if not if_amp:
output = engine(images)
loss = train_criterion(output, labels)
else:
with torch.cuda.amp.autocast():
output = engine(images)
loss = train_criterion(output, labels)
optimizer.zero_grad()
if not if_amp:
loss.backward()
optimizer.step()
else:
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
losses.update(loss.item(), images.size(0))
labels_for_acc = labels
if len(labels.shape) > 1:
labels_for_acc = torch.argmax(labels, dim=-1)
acc1 = accuracy(output, labels_for_acc, topk=(1,))
acc1 = get_scalar_from_accuracy(acc1)
metric_dict['top1'].update(acc1, output.size(0))
t.set_postfix({'loss': losses.avg,
**{key: metric_dict[key].avg for key in metric_dict},
'img_size': images.size(2),
'lr': new_lr,
'data_time': data_time.avg})
t.update(1)
end = time.time()
width_mult = engine.width_mult[0]
# save the new supernet weights
save_path_iter = os.path.join(path_logs, f'iter_{it}')
Path(save_path_iter).mkdir(exist_ok=True)
def save_engine_weights(save_path):
dict_to_save = {'epoch': epoch,
'model_state_dict': engine.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'width_mult': width_mult}
engine.state_dict(torch.save(dict_to_save, save_path))
if (it + 1) % store_checkpoint_freq == 0:
save_engine_weights(os.path.join(save_path_iter, os.path.basename(supernet_path)))
# but additionally always save in the main log folder: needed for the whole thing to keep on working
# ("train" updates & overwrites these weights, "eval" uses the latest version of the weights)
save_engine_weights(supernet_path)
def _evaluate_model_list(self, archs, number_to_add_to_i=0):
engines = []
for i, (create_engine_lambda, supernet_path) in enumerate(zip(self.create_engine_lambdas, self.supernet_paths)):
run_config = self.run_config_lambda() # only used within create_engine_lambda
engine, opt = create_engine_lambda(supernet_path, run_config, to_cuda=False)
engines.append(engine)
def capture_variable_in_lambda(t):
return lambda _: t
get_engines = [capture_variable_in_lambda(engine) for engine in engines]
thread_pool = ProcessPoolExecutor(max_workers=1,
# initializer=setup_logging, initargs=(self.log_file_path,))
initializer=NAT._init_subprocess, initargs=(self.log_file_path,0.44))
dump1_path = os.path.join(self.path_logs, 'dump1.pkl')
with open(dump1_path, 'wb') as f:
dill.dump({'archs': archs, 'device': torch.device('cuda' if torch.cuda.is_available() else 'cpu'),
'val_criterion': self.val_criterion, 'get_scalar_from_accuracy': self.get_scalar_from_accuracy,
'sec_obj': self.sec_obj, 'search_space_ensemble': self.search_space,
'get_engines': get_engines,
'run_config_lambda': self.run_config_lambda, 'number_to_add_to_i': number_to_add_to_i,
'if_ensemble_perf_per_member': 'rbf_ensemble_per_ensemble_member' in self.predictor,
'if_cascade': self.if_cascade}, f)
future = thread_pool.submit(NAT._evaluate_model_list_stateless, dump1_path)
res = future.result()
del thread_pool
try:
os.remove(dump1_path)
except:
pass
return tuple(res)
@staticmethod
def _evaluate_model_list_stateless(args_dump_path): # must be called by _evaluate_model_list
with open(args_dump_path, 'rb') as f:
kwargs_loaded = dill.load(f)
top1_errs = []
complexities = []
if_ensemble_perf_per_member = kwargs_loaded['if_ensemble_perf_per_member']
if if_ensemble_perf_per_member:
perf_and_flops_per_subnet_all = []
run_config = kwargs_loaded['run_config_lambda']()
kwargs_loaded['run_config'] = run_config
archs = kwargs_loaded['archs']
for i_config in range(len(archs)):
kwargs_loaded['config_ensemble'] = archs[i_config]
kwargs_loaded['i_config'] = i_config
top1_err, complexity, perf_and_flops_per_subnet = NAT._evaluate_model(**kwargs_loaded)
top1_errs.append(top1_err)
complexities.append(complexity)
if if_ensemble_perf_per_member:
perf_and_flops_per_subnet_all.append(perf_and_flops_per_subnet)
to_return = top1_errs, complexities
if if_ensemble_perf_per_member:
to_return += (perf_and_flops_per_subnet_all,)
return to_return
@staticmethod
def _evaluate_model(device, val_criterion,
get_scalar_from_accuracy, sec_obj, search_space_ensemble, get_engines,
run_config, config_ensemble, i_config, number_to_add_to_i, if_ensemble_perf_per_member,
if_cascade, **kwargs): #don't need kwargs, have them to ignore irrelevant parameters passed here
print('started _evaluate_model')
subnets = []
resolution_max = -1
resolutions_list = []
thresholds = None
if if_cascade:
positions_list = []
thresholds = []
if type(search_space_ensemble) is OFASearchSpace: # reproduce_nat
search_spaces = [search_space_ensemble]
config_ensemble = [config_ensemble]
# I had a bug caused by the fact that the zero-th engine is used every time
if config_ensemble[0]['w'] == 1.0:
get_engines = [get_engines[0]]
else:
get_engines = [get_engines[1]]
else:
search_spaces = search_space_ensemble.search_spaces
vld_batch_size = run_config.valid_loader.batch_size
for i, search_space in enumerate(search_spaces):
if search_space.name in ['ofa', 'proxyless']:
config_ensemble[i].update(validate_config(config_ensemble[i])) # tuple doesn't support item assignment
resolution, subnet = NAT._extract_subnet_from_supernet(config_ensemble[i], get_engines[i], run_config, vld_batch_size, device)
subnets.append(subnet)
resolution_max = max(resolution_max, resolution)
resolutions_list.append(resolution)
if if_cascade:
positions_list.append(config_ensemble[i]['position'])
thresholds.append(config_ensemble[i]['threshold'])
if if_cascade:
idx = np.argsort(positions_list)[::-1]
thresholds = np.array(thresholds)[idx].tolist()
resolutions_list = np.array(resolutions_list)[idx].tolist()
subnets = np.array(subnets)[idx].tolist()
reverse_idx = np.argsort(idx) #https://stackoverflow.com/questions/2483696/undo-or-reverse-argsort-python
resolution = resolution_max
run_config.valid_loader.collate_fn.set_resolutions([resolution]) # at this point all resolutions should be the same
metric_dict_val = defaultdict(lambda: AverageMeter())
losses_val = AverageMeter()
n_input_channels = -1
if if_cascade:
n_not_predicted_per_stage = [0 for _ in range(len(subnets) - 1)]
with torch.no_grad(), torch.cuda.amp.autocast():
with tqdm(total=len(run_config.valid_loader),
desc='{} Val #{}'.format(run_config.dataset, i_config + number_to_add_to_i),
ncols=200) as t:
# print(i_cuda, 'before dataloader_val loop')
for i, (images, labels, *other_stuff) in enumerate(run_config.valid_loader):
images, labels = images.to(device), labels.to(device)
images_orig = None # don't make a backup unless I need to
output = None
if if_cascade:
idx_more_predictions_needed = torch.ones(images.shape[0], dtype=torch.bool)
for i_subnet, subnet in enumerate(subnets):
if i_subnet > 0:
cur_threshold = thresholds[i_subnet - 1]
idx_more_predictions_needed[torch.max(output, dim=1).values >= cur_threshold] = False
output_tmp = output[idx_more_predictions_needed]
if len(output_tmp) == 0:
n_not_predicted = 0
else:
not_predicted_idx = torch.max(output_tmp, dim=1).values < cur_threshold
n_not_predicted = torch.sum(not_predicted_idx).item()
n_not_predicted_per_stage[i_subnet - 1] += n_not_predicted
'''
wanna know accuracies of all the subnets even if their predictions aren't used
=> no breaking
'''
# if n_not_predicted == 0:
# break
if resolutions_list[i_subnet] != resolutions_list[i_subnet - 1]:
if images_orig is None:
images_orig = torch.clone(images)
r = resolutions_list[i_subnet]
images = torchvision.transforms.functional.resize(images_orig, (r, r))
if i_subnet == 0:
out_logits = subnet(images)
output_cur_softmaxed = torch.nn.functional.softmax(out_logits, dim=1)
else:
out_logits = subnet(images)
if len(out_logits.shape) < 2: # a single image is left in the batch, need to fix dim # wait, because I want per-subnet accuracies I pass the whole batch through the net, so this isn't necessary?
out_logits = out_logits[None, ...]
output_cur_softmaxed = torch.nn.functional.softmax(out_logits, dim=1)
if i_subnet == 0:
output = output_cur_softmaxed
else:
if n_not_predicted > 0: # if 0, actual predictions are not modified
n_nets_used_in_cascade = i_subnet + 1
coeff1 = ((n_nets_used_in_cascade - 1) / n_nets_used_in_cascade)
coeff2 = (1 / n_nets_used_in_cascade)
output_tmp[not_predicted_idx] = coeff1 * output_tmp[not_predicted_idx] \
+ coeff2 * output_cur_softmaxed[idx_more_predictions_needed][not_predicted_idx]
# need "output_tmp" because in pytorch "a[x][y] = z" doesn't modify "a".
output[idx_more_predictions_needed] = output_tmp
if if_ensemble_perf_per_member:
acc1 = accuracy(output_cur_softmaxed.detach(), labels, topk=(1,))
acc1 = get_scalar_from_accuracy(acc1)
# the line below caused a bug because I sorted the subnets by their desired position
# the fix is done at the very end because I want the numbering to be consistent,
# i.e. within the loop the subnets are sorted by their desired position.
metric_dict_val[f'top1_s{i_subnet}'].update(acc1, output.size(0))
loss = val_criterion(output, labels)
acc1 = accuracy(output, labels, topk=(1,))
acc1 = get_scalar_from_accuracy(acc1)
metric_dict_val['top1'].update(acc1, output.size(0))
losses_val.update(loss.item(), images.size(0))
n_input_channels = images.size(1)
tqdm_postfix = {'l': losses_val.avg,
**{key: metric_dict_val[key].avg for key in metric_dict_val},
'i': images.size(2)}
if thresholds is not None:
tqdm_postfix['not_pr'] = n_not_predicted_per_stage
tqdm_postfix['thr'] = thresholds
t.set_postfix(tqdm_postfix)
t.update(1)
metric = metric_dict_val['top1'].avg
top1_err = utils.get_metric_complement(metric)
resolution_for_flops = resolutions_list
info = get_net_info(subnets, (n_input_channels, resolution_for_flops, resolution_for_flops),
measure_latency=None, print_info=False, clean=True, lut=None, if_dont_sum=if_cascade)
if not if_cascade:
complexity = info[sec_obj]
else:
flops_per_stage = info[sec_obj]
n_images_total = len(run_config.valid_loader.dataset)
true_flops = flops_per_stage[0] + sum(
[n_not_predicted / n_images_total * flops for (n_not_predicted, flops) in
zip(n_not_predicted_per_stage, flops_per_stage[1:])])
complexity = true_flops
del subnet
to_return = top1_err, complexity
if if_ensemble_perf_per_member:
top1_err_per_member = []
for i_subnet in range(len(subnets)):
metric_cur = metric_dict_val[f'top1_s{i_subnet}'].avg
top1_err_cur = utils.get_metric_complement(metric_cur)
top1_err_per_member.append(top1_err_cur)
# fixing the bug that arose because subnets were sorted by resolution but the code that gets
# the output of this assumes sorting by supernet
top1_err_per_member = np.array(top1_err_per_member)[reverse_idx].tolist()
flops_per_member = np.array(flops_per_stage)[reverse_idx].tolist()
to_return = (*to_return, (tuple(top1_err_per_member), tuple(flops_per_member)))
else:
to_return = (*to_return, None)
return to_return
@staticmethod
def _extract_subnet_from_supernet(config_padded, get_engine, run_config, vld_batch_size, device):
engine = get_engine(config_padded['w'])
engine.set_active_subnet(ks=config_padded['ks'], e=config_padded['e'], d=config_padded['d'],
w=config_padded['w'])
resolution = config_padded['r']
run_config.data_provider.collator_subtrain.set_resolutions([resolution])# for sub_train_loader
run_config.data_provider.assign_active_img_size(resolution) # if no training is done, active image size is not set
st = time.time()
data_loader_set_bn = run_config.random_sub_train_loader(2000, vld_batch_size, resolution)
end = time.time()
print(f'sub_train_loader time = {end-st}')
subnet = engine.get_active_subnet(True)
subnet.eval().to(device)
# set BatchNorm for proper values for this subnet
st = time.time()
set_running_statistics(subnet, data_loader_set_bn)
end = time.time()
print(f'Setting BN time = {end-st}')
return resolution, subnet
@staticmethod
def make_lambda_for_engine_creation(class_to_use, n_classes, use_gradient_checkpointing,
dataset_name, search_space_name):
def inner(supernet_path, run_config, to_cuda=True, device=None, if_create_optimizer=True):
loaded_checkpoint = torch.load(supernet_path, map_location='cpu')
n_in_channels = 3
if search_space_name == 'ofa':
if 'width_mult' in loaded_checkpoint:
width_mult = loaded_checkpoint['width_mult']
else:
width_mult = 1.0 if 'w1.0' in supernet_path else 1.2 if 'w1.2' in supernet_path else None
assert width_mult is not None
kernel_size = [3, 5, 7]
exp_ratio = [3, 4, 6]
depth = [2, 3, 4]
engine = class_to_use(n_classes=n_classes, dropout_rate=0, width_mult=width_mult, ks_list=kernel_size,
expand_ratio_list=exp_ratio, depth_list=depth, if_use_gradient_checkpointing=use_gradient_checkpointing,
n_image_channels=n_in_channels)
elif search_space_name == 'alphanet':
engine = class_to_use(n_classes=n_classes, if_use_gradient_checkpointing=use_gradient_checkpointing,
n_image_channels=n_in_channels)
elif search_space_name == 'proxyless':
width_mult = 1.3
kernel_size = [3, 5, 7]
exp_ratio = [3, 4, 6]
depth = [2, 3, 4]
engine = class_to_use(n_classes=n_classes, dropout_rate=0, width_mult=width_mult, ks_list=kernel_size,
expand_ratio_list=exp_ratio, depth_list=depth, if_use_gradient_checkpointing=use_gradient_checkpointing,
n_image_channels=n_in_channels)
else:
raise NotImplementedError
if 'state_dict' in loaded_checkpoint: # for the pretrained model
init = loaded_checkpoint['state_dict']
elif 'model_state_dict' in loaded_checkpoint:
init = loaded_checkpoint['model_state_dict']
else:
raise ValueError
if search_space_name == 'alphanet': #each key in the pretrained model starts with "module."
init = {k.replace('module.', ''):v for k, v in init.items()}
classifier_linear_name = 'classifier.linear'
if classifier_linear_name + '.weight' not in init:
classifier_linear_name += '.linear'
loaded_classifier_weight_shape = init[classifier_linear_name + '.weight'].shape
if (loaded_classifier_weight_shape[0] != n_classes):
init[classifier_linear_name + '.weight'] = torch.rand((n_classes, loaded_classifier_weight_shape[1]))
init[classifier_linear_name + '.bias'] = torch.rand((n_classes))
engine.load_state_dict(init)
if to_cuda:
assert device is not None
print(f'{device=}')
engine.to(device)
if if_create_optimizer:
try:
net_params = engine.weight_parameters()
except:
net_params = [param for param in engine.parameters() if param.requires_grad]
optimizer = run_config.build_optimizer(net_params)
if 'optimizer_state_dict' in loaded_checkpoint:
optimizer.load_state_dict(loaded_checkpoint['optimizer_state_dict'])
print(optimizer)
else:
optimizer = None
return engine, optimizer
return inner
def plot_archive(self, archive, c_top1_err, candidates, complexity, it, pred_for_candidates):
plot = Scatter(legend=(True, {'loc': 'lower right'}), figsize=(12, 9))
F = np.full((len(archive), 2), np.nan)
F[:, 0] = np.array([x[2] for x in archive]) # second obj. (complexity)
F[:, 1] = get_metric_complement(np.array([x[1] for x in archive])) # top-1 accuracy
plot.add(F, s=15, facecolors='none', edgecolors='b', label='archive')
F = np.full((len(candidates), 2), np.nan)
proper_second_obj = np.array(complexity)
F[:, 0] = proper_second_obj
F[:, 1] = get_metric_complement(np.array(c_top1_err))
plot.add(F, s=30, color='r', label='candidates evaluated')
F = np.full((len(candidates), 2), np.nan)
if not self.if_cascade:
F[:, 0] = proper_second_obj
else:
F[:, 0] = pred_for_candidates[:, 1]
F[:, 1] = get_metric_complement(pred_for_candidates[:, 0])
plot.add(F, s=20, facecolors='none', edgecolors='g', label='candidates predicted')
plot.plot_if_not_done_yet()
plt.xlim(left=30)
if self.dataset == 'cifar10':
if np.median(F[:, 1]) > 85:
plt.xlim(left=0, right=3000)
plt.ylim(85, 100)
elif self.dataset == 'cifar100':
if np.median(F[:, 1]) > 70:
plt.xlim(left=0, right=3000)
plt.ylim(70, 90)
elif self.dataset == 'imagenet':
plt.xlim(left=0, right=2100)
plt.ylim(64, 78)
plot.save(os.path.join(self.path_logs, 'iter_{}.png'.format(it)))
def main(args):
engine = NAT(args)
engine.search()
try:
save_for_c_api_last_path = os.path.join(engine.path_logs, f'iter_{args["iterations"]}', 'save_for_c_api')
os.remove(save_for_c_api_last_path)
except:
pass
del engine
gc.collect()
torch.cuda.empty_cache()
default_kwargs = {
'experiment_name': ['debug_run', 'location of dir to save'],
'resume': [None, 'resume search from a checkpoint'],
'sec_obj': ['flops', 'second objective to optimize simultaneously'],
'iterations': [30, 'number of search iterations'],
'n_doe': [100, 'number of architectures to sample initially '
'(I kept the old name which is a bit weird; "doe"=="design of experiment")'],
'n_iter': [8, 'number of architectures to evaluate in each iteration'],
'predictor': ['rbf', 'which accuracy predictor model to fit'],
'data': ['/export/scratch3/aleksand/data/CIFAR/', 'location of the data corpus'],
'dataset': ['cifar10', 'name of the dataset [imagenet, cifar10, cifar100, ...]'],
'n_workers': [8, 'number of workers for dataloaders'],
'vld_size': [10000, 'validation size'],
'total_size': [None, 'train+validation size'],
'trn_batch_size': [96, 'train batch size'],
'vld_batch_size': [96, 'validation batch size '],
'n_epochs': [5, 'test batch size for inference'],
'supernet_path': [['/export/scratch3/aleksand/nsganetv2/data/ofa_mbv3_d234_e346_k357_w1.0'], 'list of paths to supernets'],
'search_algo': ['nsga3', 'which search algo to use [NSGA-III, MO-GOMEA, random]'],
'subset_selector': ['reference', 'which subset selector algo to use'],
'init_with_nd_front_size': [0, 'initialize the search algorithm with subset of non-dominated front of this size'],
'dont_check_duplicates': [False, 'if disable check for duplicates in search results'],
'add_archive_to_candidates': [False, 'if a searcher should append archive to the candidates'],
'sample_configs_to_train': [False, 'if instead of training selected candidates, a probability distribution '
'should be constructed from archive, and sampled from (like in NAT)'],
'random_seed': [42, 'random seed'],
'n_warmup_epochs': [0, 'number of epochs for warmup'],
'path_logs': ['/export/scratch3/aleksand/nsganetv2/logs/', 'Path to the logs folder'],
'n_surrogate_evals': [800, 'Number of evaluations of the surrogate per meta-iteration'],
'config_msunas_path': [None, 'Path to the yml file with all the parameters'],
'gomea_exe': [None, 'Path to the mo-gomea executable file'],
'alphabet': [['2'], 'Paths to text files (one per supernetwork) with alphabet size per variable'],
'search_space': [['ensemble'], 'Supernetwork search space to use'],
'store_checkpoint_freq': [1, 'Checkpoints will be stored for every x-th iteration'],
'init_lr': [None, 'initial learning rate'],
'ensemble_ss_names': [[], 'names of search spaces used in the ensemble'],
'rbf_ensemble_size': [500, 'number of the predictors in the rbf_ensemble surrogate'],
'cutout_size': [32, 'Cutout size. 0 == disabled'],
'label_smoothing': [0.0, 'label smoothing coeff when doing classification'],
'if_amp': [False, 'if train in mixed precision'],
'use_gradient_checkpointing': [False, 'if use gradient checkpointing'],
'lr_schedule_type': ['cosine', 'learning rate schedule; "cosine" is cyclic'],
'if_cutmix': [False, 'if to use cutmix'],
'weight_decay': [4e-5, ''],
'if_center_crop': [True, 'if do center crop, or just resize to target size'],
'auto_augment': ['rand-m9-mstd0.5', 'randaugment policy to use, or None to not use randaugment'],
'resize_scale': [0.08, 'minimum resize scale in RandomResizedCrop, or None to not use RandomResizedCrop'],
'search_goal': ['ensemble', 'Either "reproduce_nat" for reproducing NAT, or "ensemble" for everything else'],
}
| 54,638 | 52.672888 | 223 |
py
|
ENCAS
|
ENCAS-main/mo_gomea.py
|
import os
import pandas as pd
import numpy as np
from utils import capture_subprocess_output
from pathlib import Path
class MoGomeaCInterface():
name = 'mo_gomea'
def __init__(self, api_name, path, path_data_for_c_api, n_objectives=2, n_genes=10, alphabet='2',
alphabet_lower_bound_path='0', init_path=None,
gomea_executable_path='/export/scratch3/aleksand/MO_GOMEA/cmake-build-debug-remote/MO_GOMEA'):
super().__init__()
self.api_name = api_name
self.path = path
self.path_data_for_c_api = path_data_for_c_api
Path(self.path).mkdir(exist_ok=True) # need to create it before calling the C executable
# self.logger = CsvLogger(self.path, self.name + '.csv')
self.n_objectives = n_objectives
self.n_elitists = 10000#40
self.n_genes = n_genes
self.alphabet = alphabet
self.alphabet_lower_bound_path = alphabet_lower_bound_path
self.init_path = init_path
self.gomea_executable_path = gomea_executable_path
def search(self, n_evaluations, seed):
n_inbetween_log_files = 10
log_interval = n_evaluations // n_inbetween_log_files
subprocess_params = [str(self.gomea_executable_path), '-p', '5', str(self.n_objectives),
str(self.n_genes), str(self.n_elitists), str(n_evaluations),
str(log_interval), str(self.path), str(self.api_name), str(seed),
str(self.path_data_for_c_api), self.alphabet, self.alphabet_lower_bound_path]
if self.init_path is not None:
subprocess_params.append(self.init_path)
print(' '.join(subprocess_params))
output = capture_subprocess_output(subprocess_params)
df = pd.read_csv(os.path.join(self.path, 'elitist_archive_generation_final.dat'), sep=' ', header=None)
genomes = np.array([x.split(',')[:-1] for x in df.iloc[:, -1]], dtype=np.int)
obj0 = np.array(df.iloc[:, 0])
obj1 = np.array(df.iloc[:, 1])
if self.n_objectives > 2:
obj2 = np.array(df.iloc[:, 2])
objs_final = np.vstack([obj0, obj1, obj2]).T
return genomes, objs_final # shape is (n_individuals, n_objs)
return genomes, np.vstack([obj0, obj1]).T
| 2,288 | 47.702128 | 111 |
py
|
ENCAS
|
ENCAS-main/utils.py
|
import atexit
import gzip
import logging
import math
import os
import random
import sys
import yaml
from ofa.utils import count_parameters, measure_net_latency
from pathlib import Path
from ptflops import get_model_complexity_info
from pymoo.factory import get_performance_indicator
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
from typing import List
import numpy as np
from collections import defaultdict
from PIL import Image, ImageDraw
import torch
import torch.nn.functional
from matplotlib import pyplot as plt
import io
import selectors
import subprocess
from networks.ofa_mbv3_my import OFAMobileNetV3My
# NAT_PATH = '/export/scratch3/aleksand/nsganetv2'
NAT_PATH = '/projects/0/einf2071/nsganetv2'
NAT_LOGS_PATH = os.path.join(NAT_PATH, 'logs')
NAT_DATA_PATH = os.path.join(NAT_PATH, 'data')
_alphabets = ['full_nat', 'full_nat_w12', 'full_nat_w10', 'full_alphanet', 'full_nat_proxyless',
'full_alphanet_cascade2', 'full_nat_w12_cascade2',
'full_nat_w12_cascade5', 'full_nat_w10_cascade5', 'full_alphanet_cascade5', 'full_nat_proxyless_cascade5']
alphabet_dict = {a: os.path.join(NAT_PATH, 'alphabets', f'{a}.txt') for a in _alphabets}
ss_name_to_supernet_path = {'ofa12': 'supernet_w1.2', 'ofa10': 'supernet_w1.0',
'alphanet': 'alphanet_pretrained.pth.tar',
'alphanet1': 'alphanet_pretrained.pth.tar',
'alphanet2': 'alphanet_pretrained.pth.tar',
'alphanet3': 'alphanet_pretrained.pth.tar',
'alphanet4': 'alphanet_pretrained.pth.tar',
'attn': 'attentive_nas_pretrained.pth.tar',
'proxyless': 'ofa_proxyless_d234_e346_k357_w1.3'}
threshold_gene_to_value = {i: 0.1*(i + 1) for i in range(10)}
threshold_gene_to_value_moregranular = {i: 0.02 * i for i in range(51)}
def get_correlation(prediction, target):
import scipy.stats as stats
rmse = np.sqrt(((prediction - target) ** 2).mean())
rho, _ = stats.spearmanr(prediction, target)
tau, _ = stats.kendalltau(prediction, target)
return rmse, rho, tau
def look_up_latency(net, lut, resolution=224):
def _half(x, times=1):
for _ in range(times):
x = np.ceil(x / 2)
return int(x)
predicted_latency = 0
# first_conv
predicted_latency += lut.predict(
'first_conv', [resolution, resolution, 3],
[resolution // 2, resolution // 2, net.first_conv.out_channels])
# final_expand_layer (only for MobileNet V3 models)
input_resolution = _half(resolution, times=5)
predicted_latency += lut.predict(
'final_expand_layer',
[input_resolution, input_resolution, net.final_expand_layer.in_channels],
[input_resolution, input_resolution, net.final_expand_layer.out_channels]
)
# feature_mix_layer
predicted_latency += lut.predict(
'feature_mix_layer',
[1, 1, net.feature_mix_layer.in_channels],
[1, 1, net.feature_mix_layer.out_channels]
)
# classifier
predicted_latency += lut.predict(
'classifier',
[net.classifier.in_features],
[net.classifier.out_features]
)
# blocks
fsize = _half(resolution)
for block in net.blocks:
idskip = 0 if block.config['shortcut'] is None else 1
se = 1 if block.config['mobile_inverted_conv']['use_se'] else 0
stride = block.config['mobile_inverted_conv']['stride']
out_fz = _half(fsize) if stride > 1 else fsize
block_latency = lut.predict(
'MBConv',
[fsize, fsize, block.config['mobile_inverted_conv']['in_channels']],
[out_fz, out_fz, block.config['mobile_inverted_conv']['out_channels']],
expand=block.config['mobile_inverted_conv']['expand_ratio'],
kernel=block.config['mobile_inverted_conv']['kernel_size'],
stride=stride, idskip=idskip, se=se
)
predicted_latency += block_latency
fsize = out_fz
return predicted_latency
def get_metric_complement(metric, if_segmentation=False):
max_value = 100
if if_segmentation:
max_value = 1
return max_value - metric
def fix_folder_names_imagenetv2():
import os, glob
for path in glob.glob('/export/scratch3/aleksand/data/imagenet/imagenetv2_all'):
if os.path.isdir(path):
for subpath in glob.glob(f'{path}/*'):
dirname = subpath.split('/')[-1]
os.rename(subpath, '/'.join(subpath.split('/')[:-1]) + '/' + dirname.zfill(4))
def compute_hypervolume(ref_pt, F, normalized=True, if_increase_ref_pt=True, if_input_already_pareto=False):
# calculate hypervolume on the non-dominated set of F
if not if_input_already_pareto:
front = NonDominatedSorting().do(F, only_non_dominated_front=True)
nd_F = F[front, :]
else:
nd_F = F
if if_increase_ref_pt:
ref_pt = 1.01 * ref_pt
hv = get_performance_indicator('hv', ref_point=ref_pt).calc(nd_F)
if normalized:
hv = hv / np.prod(ref_pt)
return hv
class LoggerWriter:
def __init__(self, log_fun):
self.log_fun = log_fun
self.buf = []
self.is_tqdm_msg_fun = lambda msg: '%|' in msg
def write(self, msg):
is_tqdm = self.is_tqdm_msg_fun(msg)
has_newline = msg.endswith('\n')
if has_newline or is_tqdm:
self.buf.append(msg)#.rstrip('\n'))
self.log_fun(''.join(self.buf))
self.buf = []
else:
self.buf.append(msg)
def flush(self):
pass
def close(self):
self.log_fun.close()
def setup_logging(log_path):
from importlib import reload
reload(logging)
logging.StreamHandler.terminator = '' # don't add new line, I'll do it myself; this line affects both handlers
stream_handler = logging.StreamHandler(sys.__stdout__)
file_handler = logging.FileHandler(log_path, mode='a')
# don't want a bazillion tqdm lines in the log:
# file_handler.filter = lambda record: '%|' not in record.msg or '100%|' in record.msg
file_handler.filter = lambda record: '[A' not in record.msg and ('%|' not in record.msg or '100%|' in record.msg)
handlers = [
file_handler,
stream_handler]
logging.basicConfig(level=logging.INFO,
# format='%(asctime)s %(message)s',
format='%(message)s',
handlers=handlers,
datefmt='%H:%M')
sys.stdout = LoggerWriter(logging.info)
sys.stderr = LoggerWriter(logging.error)
# https://dev.to/taqkarim/extending-simplenamespace-for-nested-dictionaries-58e8
from types import SimpleNamespace
class RecursiveNamespace(SimpleNamespace):
@staticmethod
def map_entry(entry):
if isinstance(entry, dict):
return RecursiveNamespace(**entry)
return entry
def __init__(self, **kwargs):
super().__init__(**kwargs)
for key, val in kwargs.items():
if type(val) == dict:
setattr(self, key, RecursiveNamespace(**val))
elif type(val) == list:
setattr(self, key, list(map(self.map_entry, val)))
alphanet_config_str = '''
use_v3_head: True
resolutions: [192, 224, 256, 288]
first_conv:
c: [16, 24]
act_func: 'swish'
s: 2
mb1:
c: [16, 24]
d: [1, 2]
k: [3, 5]
t: [1]
s: 1
act_func: 'swish'
se: False
mb2:
c: [24, 32]
d: [3, 4, 5]
k: [3, 5]
t: [4, 5, 6]
s: 2
act_func: 'swish'
se: False
mb3:
c: [32, 40]
d: [3, 4, 5, 6]
k: [3, 5]
t: [4, 5, 6]
s: 2
act_func: 'swish'
se: True
mb4:
c: [64, 72]
d: [3, 4, 5, 6]
k: [3, 5]
t: [4, 5, 6]
s: 2
act_func: 'swish'
se: False
mb5:
c: [112, 120, 128]
d: [3, 4, 5, 6, 7, 8]
k: [3, 5]
t: [4, 5, 6]
s: 1
act_func: 'swish'
se: True
mb6:
c: [192, 200, 208, 216]
d: [3, 4, 5, 6, 7, 8]
k: [3, 5]
t: [6]
s: 2
act_func: 'swish'
se: True
mb7:
c: [216, 224]
d: [1, 2]
k: [3, 5]
t: [6]
s: 1
act_func: 'swish'
se: True
last_conv:
c: [1792, 1984]
act_func: 'swish'
'''
def images_list_to_grid_image(ims, if_rgba=False, if_draw_middle_line=False, if_draw_grid=False,
n_rows=None, n_cols=None):
n_ims = len(ims)
width, height = ims[0].size
rows_num = math.floor(math.sqrt(n_ims)) if n_rows is None else n_rows
cols_num = int(math.ceil(n_ims / rows_num)) if n_cols is None else n_cols
new_im = Image.new('RGB' if not if_rgba else 'RGBA', (cols_num * width, rows_num * height))
for j in range(n_ims):
row = j // cols_num
column = j - row * cols_num
new_im.paste(ims[j], (column * width, row * height))
if if_draw_middle_line or if_draw_grid:
draw = ImageDraw.Draw(new_im)
if if_draw_middle_line:
draw.line((0, height // 2 * rows_num - 1, width * cols_num, height // 2 * rows_num - 1),
fill=(200, 100, 100, 255), width=1)
if if_draw_grid:
if rows_num > 1:
for i in range(1, rows_num):
draw.line((0, height * i - 1, width * cols_num, height * i - 1), fill=(0, 0, 0, 255), width=5)
if cols_num > 1:
for i in range(1, cols_num):
draw.line((width * i - 1, 0, width * i - 1, height * rows_num), fill=(0, 0, 0, 255), width=5)
return new_im
class CsvLogger():
def __init__(self, path, name):
Path(path).mkdir(exist_ok=True)
self.full_path = os.path.join(path, name)
self.columns = ['Evaluation', 'Time', 'Solution', 'Fitness']
self.data = []
self.f = open(self.full_path, 'w', buffering=100)
self.f.write(' '.join(self.columns) + '\n')
atexit.register(self.close_f)
def log(self, values: List):
values_str = ' '.join(str(v) for v in values) + '\n'
# print(values_str)
self.f.write(values_str)
def close_f(self):
self.f.close()
def capture_subprocess_output(subprocess_args):
# taken from https://gist.github.com/nawatts/e2cdca610463200c12eac2a14efc0bfb
# Start subprocess
# bufsize = 1 means output is line buffered
# universal_newlines = True is required for line buffering
process = subprocess.Popen(subprocess_args,
bufsize=1,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
# env=dict(os.environ, OMP_NUM_THREADS='9')
)
# Create callback function for process output
buf = io.StringIO()
def handle_output(stream, mask):
# Because the process' output is line buffered, there's only ever one
# line to read when this function is called
line = stream.readline()
buf.write(line)
sys.stdout.write(line)
# Register callback for an "available for read" event from subprocess' stdout stream
selector = selectors.DefaultSelector()
selector.register(process.stdout, selectors.EVENT_READ, handle_output)
# Loop until subprocess is terminated
while process.poll() is None:
# Wait for events and handle them with their registered callbacks
events = selector.select()
for key, mask in events:
callback = key.data
callback(key.fileobj, mask)
# Get process return code
return_code = process.wait()
selector.close()
success = (return_code == 0)
# Store buffered output
output = buf.getvalue()
buf.close()
return output
def set_seed(seed):
print(f'Setting random seed to {seed}')
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def execute_func_for_all_runs_and_combine(experiment_name, func, func_combine=None, **kwargs):
experiment_path = os.path.join(NAT_LOGS_PATH, experiment_name)
algo_names = []
algo_name_to_seed_to_result = defaultdict(dict)
target_algos = kwargs.get('target_algos', None) # useful for debugging
target_runs = kwargs.get('target_runs', None) # useful for debugging
# print(f'{target_algos=}, {target_runs=}')
for f in reversed(sorted(os.scandir(experiment_path), key=lambda e: e.name)):
if not f.is_dir():
continue
name_cur = f.name
if target_algos is not None and name_cur not in target_algos:
continue
algo_names.append(name_cur)
for run_folder in os.scandir(f.path):
if not run_folder.is_dir():
continue
run_idx = int(run_folder.name)
if target_runs is not None and run_idx not in target_runs:
continue
run_path = os.path.join(experiment_path, name_cur, str(run_idx))
out = func(run_path, run_idx=run_idx, **kwargs)
algo_name_to_seed_to_result[name_cur][run_idx] = out
if func_combine:
return func_combine(experiment_path, algo_name_to_seed_to_result, experiment_name=experiment_name, **kwargs)
return algo_name_to_seed_to_result
def save_gz(path, data):
f = gzip.GzipFile(path, "w")
np.save(file=f, arr=data)
f.close()
print(f'{path} saved')
def _pil_interp(method):
if method == 'bicubic':
return Image.BICUBIC
elif method == 'lanczos':
return Image.LANCZOS
elif method == 'hamming':
return Image.HAMMING
else:
# default bilinear, do we want to allow nearest?
return Image.BILINEAR
def show_im_from_torch_tensor(t):
im = t.permute(1, 2, 0).numpy()
plt.imshow(im * np.array([0.24703233, 0.24348505, 0.26158768]) + np.array([0.49139968, 0.48215827, 0.44653124]))
plt.show()
def onehot(size, target):
vec = torch.zeros(size, dtype=torch.float32)
vec[target] = 1.
return vec
def rand_bbox(W, H, lam):
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def transform_supernet_name_swa(supernet_name_in, swa):
if supernet_name_in == 'alphanet_pretrained.pth.tar':
return f'alphanet_pretrained_swa{swa}.pth.tar'
elif supernet_name_in == 'attentive_nas_pretrained.pth.tar':
return f'attentive_nas_pretrained_swa{swa}.pth.tar'
elif 'supernet_w1' in supernet_name_in:
return supernet_name_in + f'_swa{swa}'
elif 'ofa_proxyless' in supernet_name_in:
return supernet_name_in + f'_swa{swa}'
else:
return 'noop'
class LatencyEstimator(object):
"""
Modified from https://github.com/mit-han-lab/proxylessnas/blob/
f273683a77c4df082dd11cc963b07fc3613079a0/search/utils/latency_estimator.py#L29
"""
def __init__(self, fname):
# fname = download_url(url, overwrite=True)
with open(fname, 'r') as fp:
self.lut = yaml.safe_load(fp, yaml.SafeLoader)
@staticmethod
def repr_shape(shape):
if isinstance(shape, (list, tuple)):
return 'x'.join(str(_) for _ in shape)
elif isinstance(shape, str):
return shape
else:
return TypeError
def predict(self, ltype: str, _input, output, expand=None,
kernel=None, stride=None, idskip=None, se=None):
"""
:param ltype:
Layer type must be one of the followings
1. `first_conv`: The initial stem 3x3 conv with stride 2
2. `final_expand_layer`: (Only for MobileNet-V3)
The upsample 1x1 conv that increases num_filters by 6 times + GAP.
3. 'feature_mix_layer':
The upsample 1x1 conv that increase num_filters to num_features + torch.squeeze
3. `classifier`: fully connected linear layer (num_features to num_classes)
4. `MBConv`: MobileInvertedResidual
:param _input: input shape (h, w, #channels)
:param output: output shape (h, w, #channels)
:param expand: expansion ratio
:param kernel: kernel size
:param stride:
:param idskip: indicate whether has the residual connection
:param se: indicate whether has squeeze-and-excitation
"""
infos = [ltype, 'input:%s' % self.repr_shape(_input),
'output:%s' % self.repr_shape(output), ]
if ltype in ('MBConv',):
assert None not in (expand, kernel, stride, idskip, se)
infos += ['expand:%d' % expand, 'kernel:%d' % kernel,
'stride:%d' % stride, 'idskip:%d' % idskip, 'se:%d' % se]
key = '-'.join(infos)
return self.lut[key]['mean']
def parse_string_list(string):
if isinstance(string, str):
# convert '[5 5 5 7 7 7 3 3 7 7 7 3 3]' to [5, 5, 5, 7, 7, 7, 3, 3, 7, 7, 7, 3, 3]
return list(map(int, string[1:-1].split()))
else:
return string
def pad_none(x, depth, max_depth):
new_x, counter = [], 0
for d in depth:
for _ in range(d):
new_x.append(x[counter])
counter += 1
if d < max_depth:
new_x += [None] * (max_depth - d)
return new_x
def validate_config(config, max_depth=4):
kernel_size, exp_ratio, depth = config['ks'], config['e'], config['d']
if isinstance(kernel_size, str): kernel_size = parse_string_list(kernel_size)
if isinstance(exp_ratio, str): exp_ratio = parse_string_list(exp_ratio)
if isinstance(depth, str): depth = parse_string_list(depth)
assert (isinstance(kernel_size, list) or isinstance(kernel_size, int))
assert (isinstance(exp_ratio, list) or isinstance(exp_ratio, int))
assert isinstance(depth, list)
if len(kernel_size) < len(depth) * max_depth:
kernel_size = pad_none(kernel_size, depth, max_depth)
if len(exp_ratio) < len(depth) * max_depth:
exp_ratio = pad_none(exp_ratio, depth, max_depth)
# return {'ks': kernel_size, 'e': exp_ratio, 'd': depth, 'w': config['w']}
res = {'ks': kernel_size, 'e': exp_ratio, 'd': depth}
if 'r' in config:
res['r'] = config['r']
if 'w' in config:
res['w'] = config['w']
else:
res['w'] = 1.0
if 'position' in config:
res['position'] = config['position']
if 'threshold' in config:
res['threshold'] = config['threshold']
return res
if __name__ == '__main__':
fix_folder_names_imagenetv2()
sys.exit()
def get_net_info(net, data_shape, measure_latency=None, print_info=True, clean=False, lut=None,
if_dont_sum=False):
def inner(net_cur, data_shape):
net_info = {}
if isinstance(net_cur, torch.nn.DataParallel):
net_cur = net_cur.module
net_info['params'] = count_parameters(net_cur)
net_info['flops'] = get_model_complexity_info(net_cur, (data_shape[0], data_shape[1], data_shape[2]),
print_per_layer_stat=False, as_strings=False, verbose=False)[0]
latency_types = [] if measure_latency is None else measure_latency.split('#')
for l_type in latency_types:
if l_type == 'flops':
continue # already calculated above
if lut is not None and l_type in lut:
latency_estimator = LatencyEstimator(lut[l_type])
latency = look_up_latency(net_cur, latency_estimator, data_shape[2])
measured_latency = None
else:
latency, measured_latency = measure_net_latency(
net_cur, l_type, fast=False, input_shape=data_shape, clean=clean)
net_info['%s latency' % l_type] = {'val': latency, 'hist': measured_latency}
if print_info:
print('Total training params: %.2fM' % (net_info['params'] / 1e6))
print('Total FLOPs: %.2fM' % (net_info['flops'] / 1e6))
for l_type in latency_types:
print('Estimated %s latency: %.3fms' % (l_type, net_info['%s latency' % l_type]['val']))
gpu_latency, cpu_latency = None, None
for k in net_info.keys():
if 'gpu' in k:
gpu_latency = np.round(net_info[k]['val'], 2)
if 'cpu' in k:
cpu_latency = np.round(net_info[k]['val'], 2)
return {'params': np.round(net_info['params'] / 1e6, 2),
'flops': np.round(net_info['flops'] / 1e6, 2),
'gpu': gpu_latency, 'cpu': cpu_latency}
if not isinstance(net, list): # if not an ensemble, just calculate it
return inner(net, data_shape)
# if an ensemble, need to sum properly
data_shapes = [(data_shape[0], s1, s2) for s1, s2 in zip(data_shape[1], data_shape[2])]
results = [inner(net_cur, d_s) for net_cur, d_s in zip(net, data_shapes)]
res_final = {} # sum everything, keep None as None
for k, v in results[0].items():
if not if_dont_sum:
res_final[k] = v
for res_i in results[1:]:
if v is None:
continue
res_final[k] += res_i[k]
else:
res_final[k] = [v]
for res_i in results[1:]:
if v is None:
continue
res_final[k] += [res_i[k]]
return res_final
class SupernetworkWrapper:
def __init__(self,
n_classes=1000,
model_path='./data/ofa_mbv3_d234_e346_k357_w1.0',
engine_class_to_use=OFAMobileNetV3My, **kwargs):
from nat import NAT
self.dataset_name = kwargs['dataset']
self.search_space_name = kwargs['search_space_name']
engine_lambda = NAT.make_lambda_for_engine_creation(engine_class_to_use, n_classes, False,
self.dataset_name, self.search_space_name)
self.engine, _ = engine_lambda(model_path, None, to_cuda=False, if_create_optimizer=False)
def sample(self, config):
if self.search_space_name == 'ofa':
config = validate_config(config)
self.engine.set_active_subnet(ks=config['ks'], e=config['e'], d=config['d'], w=config['w'])
subnet = self.engine.get_active_subnet(preserve_weight=True)
return subnet, config
| 23,267 | 34.577982 | 120 |
py
|
ENCAS
|
ENCAS-main/nat_run_many.py
|
import argparse
import glob
import os
from concurrent.futures.process import ProcessPoolExecutor
from pathlib import Path
import datetime
import torch
from matplotlib import pyplot as plt
import utils
from nat import default_kwargs, main
import yaml
from shutil import copy
import traceback
from concurrent.futures import ThreadPoolExecutor
import cv2
from plot_results.plotting_functions import compare_val_and_test
from after_search.average_weights import swa_for_whole_experiment
from after_search.evaluate_stored_outputs import evaluate_stored_whole_experiment
from after_search.store_outputs import store_cumulative_pareto_front_outputs
from after_search.symlink_imagenet import create_symlinks
cv2.setNumThreads(0)
def create_all_run_kwargs(config_path):
config_loaded = yaml.safe_load(open(config_path))
experiment_name = config_loaded['experiment_name']
print(experiment_name)
experiment_kwargs = {k: v[0] for k, v in default_kwargs.items()} # don't need help string
experiment_kwargs = dict(experiment_kwargs, **config_loaded)
path_logs = experiment_kwargs['path_logs'] # '/export/scratch3/aleksand/nsganetv2/'
Path(path_logs).mkdir(exist_ok=True)
path = os.path.join(path_logs, experiment_name)
experiment_kwargs['experiment_name'] = experiment_name
Path(path).mkdir(exist_ok=True)
copy(config_path, path)
algo_mods_all = config_loaded['algo_mods_all']
transform_str = lambda x: x if type(x) != str else x.replace("/", "_").replace(".", "_")
algo_mods_names = [
'!'.join(f'{k}:{transform_str(v)}' for k, v in algo_mods.items())
for algo_mods in algo_mods_all
]
algo_kwargs_all = [dict(experiment_kwargs, **algo_mods) for algo_mods in algo_mods_all]
seed_offset = experiment_kwargs.get('seed_offset', 0) # wanna run the 10 runs on different machines
cur_seed = experiment_kwargs['random_seed'] + seed_offset
algo_run_kwargs_all = []
for i_algo, algo_kwargs in enumerate(algo_kwargs_all):
path_algo = os.path.join(path, algo_mods_names[i_algo])
Path(path_algo).mkdir(exist_ok=True)
n_runs = algo_kwargs['n_runs']
for run in range(n_runs):
algo_run_kwargs = algo_kwargs.copy() # because NAT pops the values, which breaks all the runs after the first
path_algo_run = os.path.join(path_algo, f'{run + seed_offset}')
algo_run_kwargs['experiment_name'] = path_algo_run
algo_run_kwargs['random_seed'] = cur_seed
algo_run_kwargs_all.append(algo_run_kwargs)
cur_seed += 1
return experiment_kwargs, algo_run_kwargs_all
def create_config_for_continuation(run_path, target_max_iter):
config_path = os.path.join(run_path, 'config_msunas.yml')
config = yaml.safe_load(open(config_path, 'r'))
exp_name = config['experiment_name']
n_iters = len(glob.glob(os.path.join(run_path, "iter_*.stats")))
if n_iters > 0:
last_iter = n_iters - 1
if last_iter == target_max_iter:
return None
config['resume'] = os.path.join(exp_name, f'iter_{last_iter}.stats')
supernet_paths = config['supernet_path']
supernet_paths_new = []
for p in supernet_paths:
name = os.path.basename(p)
supernet_paths_new.append(os.path.join(exp_name, f'iter_{last_iter}', name))
config['supernet_path'] = supernet_paths_new
config_path_new = os.path.join(run_path, 'config_msunas_cont.yml')
yaml.dump(config, open(config_path_new, 'w'))
return config_path_new
def create_all_run_continue_kwargs(config_path):
config_loaded = yaml.safe_load(open(config_path))
experiment_name = config_loaded['experiment_name']
print(experiment_name)
experiment_kwargs = {k: v[0] for k, v in default_kwargs.items()} # don't need help string
experiment_kwargs = dict(experiment_kwargs, **config_loaded)
path_logs = experiment_kwargs['path_logs'] # '/export/scratch3/aleksand/nsganetv2/'
experiment_path = os.path.join(path_logs, experiment_name)
experiment_kwargs['experiment_name'] = experiment_name
algo_run_kwargs_all = []
for f in sorted(os.scandir(experiment_path), key=lambda e: e.name):
if not f.is_dir():
continue
name_cur = f.name
for run_folder in sorted(os.scandir(f.path), key=lambda e: e.name):
if not run_folder.is_dir():
continue
run_idx = run_folder.name
run_path = os.path.join(experiment_path, name_cur, run_folder.name)
config_path_cur = create_config_for_continuation(run_path, experiment_kwargs['iterations'])
if config_path_cur is not None:
algo_run_kwargs_all.append(yaml.safe_load(open(config_path_cur, 'r')))
return experiment_kwargs, algo_run_kwargs_all
def execute_run(algo_run_kwargs):
try:
main(algo_run_kwargs)
except Exception as e:
print(traceback.format_exc())
print(e)
def init_worker(zeroeth_gpu):
os.environ["CUDA_VISIBLE_DEVICES"] = f'{zeroeth_gpu}'
print('cuda = ', os.environ["CUDA_VISIBLE_DEVICES"])
def run_kwargs_many(experiment_kwargs, algo_run_kwargs_all):
zeroeth_gpu = experiment_kwargs['zeroeth_gpu']
executor_class = ProcessPoolExecutor
if experiment_kwargs['if_debug_run']:
executor_class = ThreadPoolExecutor # it's easier to debug with threads
with executor_class(max_workers=experiment_kwargs['n_gpus'], initializer=init_worker,
initargs=(zeroeth_gpu,)) as executor:
print(algo_run_kwargs_all)
futures = [executor.submit(execute_run, kwargs) for kwargs in algo_run_kwargs_all]
for f in futures:
f.result() # wait on everything
print(datetime.datetime.now())
def store(store_kwargs):
print(f'{store_kwargs=}')
store_cumulative_pareto_front_outputs(store_kwargs['exp_name'], store_kwargs['dataset_type'],
max_iter=store_kwargs['max_iter'], swa=store_kwargs['swa'],
target_runs=store_kwargs['target_runs'])
if __name__ == '__main__':
torch.multiprocessing.set_start_method('spawn')
p = argparse.ArgumentParser()
p.add_argument(f'--config', default='configs_nat/cifar100_q0_ofa10_sep_DEBUG.yml', type=str)
p.add_argument(f'--continue', default=False, action='store_true')
cfgs = vars(p.parse_args())
config_path = cfgs['config']
# 1. run NAT
if not cfgs['continue']:
experiment_kwargs, algo_run_kwargs_all = create_all_run_kwargs(config_path)
else:
experiment_kwargs, algo_run_kwargs_all = create_all_run_continue_kwargs(config_path)
run_kwargs_many(experiment_kwargs, algo_run_kwargs_all)
# 2 (optional). do SWA, store subnetwork outputs, compare validation & test
plt.rcParams.update({'font.size': 14})
plt.rcParams['axes.grid'] = True
exp_name = experiment_kwargs['experiment_name']
max_iter = experiment_kwargs['iterations']
if_store = experiment_kwargs['if_store']
dataset = experiment_kwargs['dataset']
os.environ["CUDA_VISIBLE_DEVICES"] = f'{experiment_kwargs["zeroeth_gpu"]}'
swa = experiment_kwargs.get('post_swa', None)
if len(algo_run_kwargs_all) > 0: # == 0 can occur with 'continue'
target_runs = [int(x['experiment_name'][-1]) for x in algo_run_kwargs_all]
else:
target_runs = list(range(experiment_kwargs['seed_offset'],
experiment_kwargs['seed_offset'] + experiment_kwargs['n_runs']))
if dataset == 'imagenet':
# for imagenet weights are not trained => stored only once, but my code needs a supernet-per-metaiteration
# => symlink
utils.execute_func_for_all_runs_and_combine(exp_name, create_symlinks, target_runs=target_runs)
if swa is not None:
for supernet in experiment_kwargs['supernet_path']:
swa_for_whole_experiment(exp_name, range(max_iter + 1 - swa, max_iter + 1),
os.path.basename(supernet), target_runs=target_runs)
if not if_store:
compare_val_and_test(exp_name, f'test_swa{swa}', swa=swa, max_iter=max_iter, target_runs=target_runs)
if if_store:
zeroeth_gpu = experiment_kwargs['zeroeth_gpu']
kwargs_for_store = [
dict(exp_name=exp_name, dataset_type='val', max_iter=max_iter, swa=swa, target_runs=target_runs),
dict(exp_name=exp_name, dataset_type='test', max_iter=max_iter, swa=swa, target_runs=target_runs)
]
n_workers = 1
with ProcessPoolExecutor(max_workers=n_workers, initializer=init_worker,
initargs=(zeroeth_gpu,)) as executor:
futures = [executor.submit(store, kwargs) for kwargs in kwargs_for_store]
for f in futures:
f.result() # wait on everything
test_name = 'test' if swa is None else f'test_swa{swa}'
dataset_to_label_path = {'cifar100': 'labels_cifar100_test.npy', 'cifar10': 'labels_cifar10_test.npy',
'imagenet': 'labels_imagenet_test.npy'}
evaluate_stored_whole_experiment(exp_name, test_name, dataset_to_label_path[dataset],
max_iter=max_iter, target_runs=target_runs)
compare_val_and_test(exp_name, test_name, max_iter=max_iter, target_runs=target_runs)
else:
if swa is None:
compare_val_and_test(exp_name, f'test', max_iter=max_iter, target_runs=target_runs)
| 9,599 | 41.105263 | 122 |
py
|
ENCAS
|
ENCAS-main/dynamic_resolution_collator.py
|
import random
import copy
import ctypes
import torch
import multiprocessing as mp
import numpy as np
from torchvision import transforms
from utils import onehot, rand_bbox, show_im_from_torch_tensor
class DynamicResolutionCollator:
def __init__(self, n_resolutions_max, if_return_target_idx=True, if_cutmix=False, cutmix_kwargs=None):
self.resolutions = mp.Array(ctypes.c_int, n_resolutions_max)
self.n_resolutions_to_use = n_resolutions_max
self.n_resolutions_max = n_resolutions_max
self.resize_dict = {}
self.if_return_target_idx = if_return_target_idx
self.if_cutmix = if_cutmix
self.prev_batch_for_cutmix = None
self.cutmix_kwargs = cutmix_kwargs
def set_info_for_transforms(self, resize_class_lambda, transforms_after_resize, transforms_pre_resize=[]):
# this MUST be called before the dataloaders are actually used!
# I would've put it in __init__, but I need to create collators before creating the dataprovider,
# and these values are created only during creation of the dataprovider
self.resize_class_lambda = resize_class_lambda
self.transforms_after_resize = transforms_after_resize
self.transforms_pre_resize = transforms_pre_resize
def set_resolutions(self, resolutions):
self.n_resolutions_to_use = len(resolutions)
if self.n_resolutions_to_use > self.n_resolutions_max:
raise ValueError('self.n_resolutions_to_use > self.n_resolutions_max')
for i in range(self.n_resolutions_to_use):
cur_res = resolutions[i]
self.resolutions[i] = cur_res
def __call__(self, batch):
# don't need sync 'cause don't need to change the array of resolutions
target_idx = np.random.choice(self.n_resolutions_to_use)
target_res = self.resolutions[target_idx]
if target_res not in self.resize_dict:
self.resize_dict[target_res] = self.resize_class_lambda(target_res)
cur_resize_op = self.resize_dict[target_res]
transforms_composed = transforms.Compose(self.transforms_pre_resize + [cur_resize_op] + self.transforms_after_resize)
imgs = [transforms_composed(img_n_label[0]) for img_n_label in batch]
label = [img_n_label[1] for img_n_label in batch]
if self.if_cutmix:
cur_batch_before_cutmix = list(zip(copy.deepcopy(imgs), copy.deepcopy(label)))
if self.prev_batch_for_cutmix is None: #this is the first batch
self.prev_batch_for_cutmix = cur_batch_before_cutmix
def cutmix(img, lbl):
args = self.cutmix_kwargs
lbl_onehot = onehot(args['n_classes'], lbl)
if np.random.rand(1) > args['prob']:
return img, lbl_onehot
rand_index = random.choice(range(len(self.prev_batch_for_cutmix)))
img2, lbl2 = self.prev_batch_for_cutmix[rand_index]
lbl2_onehot = onehot(args['n_classes'], lbl2)
lam = np.random.beta(args['beta'], args['beta'])
W, H = img.shape[-2:]
W2, H2 = img2.shape[-2:]
# my batches have different spatial sizes - that's the whole point of this collator!
W, H = min(W, W2), min(H, H2)
bbx1, bby1, bbx2, bby2 = rand_bbox(W, H, lam)
img[:, bbx1:bbx2, bby1:bby2] = img2[:, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (W * H))
lbl_onehot = lbl_onehot * lam + lbl2_onehot * (1. - lam)
return img, lbl_onehot
img_n_label_cutmix = [cutmix(im, lbl) for im, lbl in zip(imgs, label)]
imgs = [img_n_label[0] for img_n_label in img_n_label_cutmix]
label = [img_n_label[1] for img_n_label in img_n_label_cutmix]
self.prev_batch_for_cutmix = cur_batch_before_cutmix
imgs = torch.stack(imgs)
if type(label[0]) is int:
label = torch.LongTensor(label)
else:
label = torch.stack(label)
to_return = (imgs, label)
if self.if_return_target_idx:
to_return += (target_idx,)
return to_return
| 4,319 | 43.081633 | 125 |
py
|
ENCAS
|
ENCAS-main/fitness_functions.py
|
import numpy as np
import time
from utils import set_seed
from utils import CsvLogger
from nat_api import NatAPI
from encas.encas_api import EncasAPI
def alphabet_to_list(alphabet, n_variables):
if alphabet.isnumeric():
return [int(alphabet) for _ in range(n_variables)]
file = open(alphabet, 'r')
alphabetSizes = file.readline().split(' ')
file.close()
return [int(alphabetSizes[i]) for i in range(n_variables)]
class Logger:
def __init__(self, folder):
self.folder = folder
self.solutions_cache = {}
# self.solutionsCounter = {}
self.start_time = time.time()
# self.file = open('%s/optimization.txt' % self.folder, 'w', buffering=1)
# self.file.write('#Evals time solution fitness\n')
# file.close()
self.csv_logger = CsvLogger(self.folder, 'gomea.csv')
self.eval_cnt = 0
def elapsed_time(self):
return time.time() - self.start_time
def return_solution(self, x):
return self.solutions_cache.get(x, None)
def solution_to_str(self, arr):
x = [str(i) for i in arr]
x = ''.join(x)
return x
def solution_to_str_commas(self, arr):
x = [str(i) for i in arr]
x = ','.join(x)
return x
def write(self, x, fitness):
if x not in self.solutions_cache:
self.solutions_cache[x] = fitness
elapsed_time = time.time() - self.start_time
cur_solution_idx = self.eval_cnt
self.eval_cnt += 1
fitness_str = str(fitness).replace(' ', '')
self.csv_logger.log([cur_solution_idx, elapsed_time, x, fitness_str])
class FitnessFunction():
def __init__(self, folder, filename, n_variables, alphabet, random_seed):
self.logger = Logger(folder)
self.numberOfVariables = int(n_variables)
self.alphabet = alphabet_to_list(alphabet, n_variables)
self.filename = filename
def fitness(self, x):
pass
class FitnessFunctionAPIWrapper(FitnessFunction):
def __init__(self, folder, filename, n_variables, alphabet, random_seed, if_count_zeros=True):
super().__init__(folder, filename, n_variables, alphabet, random_seed)
self.api = None # descendants will need to initialize the API
self.if_count_zeros = if_count_zeros
set_seed(random_seed)
def fitness(self, solution):
assert isinstance(solution, list) or isinstance(solution, tuple) or isinstance(solution, np.ndarray)
solution = np.array(solution).astype(np.int32)
solution_str = self.logger.solution_to_str(solution)
if self.api.use_cache:
find = self.logger.return_solution(solution_str)
if find != None:
return find
score = self.api.fitness(solution)
if self.if_count_zeros or score != 0:
self.logger.write(solution_str, score)
return score
class FitnessFunctionAPIWrapperWithTransparentCaching(FitnessFunction):
'''
difference to FitnessFunctionAPIWrapper: the first value in the returned tuple is True if cache was used
(i.e. no new evaluation). It is cast to long because that's easier to handle on the C side
'''
def __init__(self, folder, filename, n_variables, alphabet, random_seed, if_count_zeros=True):
super().__init__(folder, filename, n_variables, alphabet, random_seed)
self.api = None # descendants will need to initialize the API
self.if_count_zeros = if_count_zeros
set_seed(random_seed)
def fitness(self, solution):
assert isinstance(solution, list) or isinstance(solution, tuple) or isinstance(solution, np.ndarray)
solution = np.array(solution).astype(np.int32)
solution_str = self.logger.solution_to_str_commas(solution)
if self.api.use_cache:
find = self.logger.return_solution(solution_str)
if find != None:
return (int(True),) + find
score = self.api.fitness(solution)
if self.if_count_zeros or score != 0:
self.logger.write(solution_str, score)
return (int(False),) + score
class NatFitness(FitnessFunctionAPIWrapperWithTransparentCaching):
def __init__(self, folder, filename, n_variables, alphabet, random_seed):
super().__init__(folder, filename, n_variables, alphabet, random_seed)
self.api = NatAPI(filename)
class EncasFitness(FitnessFunctionAPIWrapperWithTransparentCaching):
def __init__(self, folder, filename, n_variables, alphabet, random_seed):
super().__init__(folder, filename, n_variables, alphabet, random_seed)
self.api = EncasAPI(filename)
| 4,678 | 34.180451 | 108 |
py
|
ENCAS
|
ENCAS-main/utils_pareto.py
|
import json
import os
import numpy as np
from utils import NAT_LOGS_PATH
def is_pareto_efficient(costs): # from https://stackoverflow.com/a/40239615/5126900
"""
Find the pareto-efficient points
:param costs: An (n_points, n_costs) array
:return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient
"""
is_efficient = np.ones(costs.shape[0], dtype=bool)
for i, c in enumerate(costs):
if is_efficient[i]:
is_efficient[is_efficient] = np.any(costs[is_efficient] < c, axis=1) # Keep any point with a lower cost
is_efficient[i] = True # And keep self
return is_efficient
def get_best_pareto_from_iter(experiment_path, iter):
path = os.path.join(NAT_LOGS_PATH, experiment_path)
obj1_archive = []
true_errors_archive = []
configs_archive = []
with open(os.path.join(path, "iter_{}.stats".format(iter))) as f:
data = json.load(f)
for data_archive in data['archive']: # archive always includes candidates
try:
(config, perf, flops) = data_archive
except:
config, perf, flops, diversity = data_archive
obj1_archive.append(flops)
true_errors_archive.append(perf)
configs_archive.append(config)
idx_archive_sort_flops = np.argsort(obj1_archive)
obj1_archive = np.array(obj1_archive)[idx_archive_sort_flops]
true_errors_archive = np.array(true_errors_archive)[idx_archive_sort_flops]
all_objs = list(zip(true_errors_archive, obj1_archive))
all_objs_cur = np.array(all_objs)
pareto_best_cur_idx = is_pareto_efficient(all_objs_cur)
return np.array(configs_archive)[idx_archive_sort_flops][pareto_best_cur_idx], \
true_errors_archive[pareto_best_cur_idx], obj1_archive[pareto_best_cur_idx]
def get_best_pareto_up_and_including_iter(experiment_path, iter):
path = os.path.join(NAT_LOGS_PATH, experiment_path)
obj1_archive = []
true_errors_archive = []
configs_archive = []
iterations_archive = [] #need to store with which version of supernet's weights the performance was achieved
for i in range(iter + 1):
with open(os.path.join(path, "iter_{}.stats".format(i))) as f:
data = json.load(f)
for data_archive in data['archive']: # archive always includes candidates
try:
(config, perf, flops) = data_archive
except:
config, perf, flops, diversity = data_archive
obj1_archive.append(flops)
true_errors_archive.append(perf)
configs_archive.append(config)
iterations_archive.append(i)
idx_archive_sort_flops = np.argsort(obj1_archive)
obj1_archive = np.array(obj1_archive)[idx_archive_sort_flops]
true_errors_archive = np.array(true_errors_archive)[idx_archive_sort_flops]
all_objs = list(zip(true_errors_archive, obj1_archive))
all_objs_cur = np.array(all_objs)
pareto_best_cur_idx = is_pareto_efficient(all_objs_cur)
return np.array(configs_archive)[idx_archive_sort_flops][pareto_best_cur_idx], \
true_errors_archive[pareto_best_cur_idx], \
obj1_archive[pareto_best_cur_idx], \
np.array(iterations_archive)[idx_archive_sort_flops][pareto_best_cur_idx]
def get_everything_up_and_including_iter(experiment_path, iter):
path = os.path.join(NAT_LOGS_PATH, experiment_path)
obj1_archive = []
true_errors_archive = []
configs_archive = []
iterations_archive = [] #need to store with which version of supernet's weights the performance was achieved
for i in range(iter + 1):
with open(os.path.join(path, "iter_{}.stats".format(i))) as f:
data = json.load(f)
for data_archive in data['archive']: # archive always includes candidates
try:
(config, perf, flops) = data_archive
except:
config, perf, flops, diversity = data_archive
obj1_archive.append(flops)
true_errors_archive.append(perf)
configs_archive.append(config)
iterations_archive.append(i)
idx_archive_sort_flops = np.argsort(obj1_archive)
obj1_archive = np.array(obj1_archive)[idx_archive_sort_flops]
true_errors_archive = np.array(true_errors_archive)[idx_archive_sort_flops]
return np.array(configs_archive)[idx_archive_sort_flops], \
true_errors_archive, \
obj1_archive, \
np.array(iterations_archive)[idx_archive_sort_flops]
def get_everything_from_iter(experiment_path, iter):
# the only diffs from the fun above are (1) removal of pareto_best_cur_idx (2) returning of iters
path = os.path.join(NAT_LOGS_PATH, experiment_path)
obj1_archive = []
true_errors_archive = []
configs_archive = []
with open(os.path.join(path, "iter_{}.stats".format(iter))) as f:
data = json.load(f)
for data_archive in data['archive']: # archive always includes candidates
try:
(config, perf, flops) = data_archive
except:
config, perf, flops, diversity = data_archive
obj1_archive.append(flops)
true_errors_archive.append(perf)
configs_archive.append(config)
idx_archive_sort_flops = np.argsort(obj1_archive)
obj1_archive = np.array(obj1_archive)[idx_archive_sort_flops]
true_errors_archive = np.array(true_errors_archive)[idx_archive_sort_flops]
return np.array(configs_archive)[idx_archive_sort_flops], \
true_errors_archive, obj1_archive, np.array([iter] * len(configs_archive))
| 5,740 | 40.302158 | 116 |
py
|
ENCAS
|
ENCAS-main/utils_train.py
|
import random
import numpy as np
import torch
from torch.nn.modules.module import Module
# implementation of CutMixCrossEntropyLoss taken from https://github.com/ildoonet/cutmix
class CutMixCrossEntropyLoss(Module):
def __init__(self, size_average=True):
super().__init__()
self.size_average = size_average
def forward(self, input, target):
if len(target.size()) == 1:
target = torch.nn.functional.one_hot(target, num_classes=input.size(-1))
target = target.float().cuda()
return cross_entropy(input, target, self.size_average)
def cross_entropy(input, target, size_average=True):
""" Cross entropy that accepts soft targets
Args:
pred: predictions for neural network
targets: targets, can be soft
size_average: if false, sum is returned instead of mean
Examples::
input = torch.FloatTensor([[1.1, 2.8, 1.3], [1.1, 2.1, 4.8]])
input = torch.autograd.Variable(out, requires_grad=True)
target = torch.FloatTensor([[0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])
target = torch.autograd.Variable(y1)
loss = cross_entropy(input, target)
loss.backward()
"""
logsoftmax = torch.nn.LogSoftmax(dim=1)
if size_average:
return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))
else:
return torch.sum(torch.sum(-target * logsoftmax(input), dim=1))
def init_dataloader_worker_state(worker_id):
seed = (torch.initial_seed() + worker_id) % 2 ** 32
print(f'Init dataloader seed: {seed}')
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
return np.random.seed(seed)
def create_resize_class_lambda_train(resize_transform_class, image_size, **kwargs): #pickle can't handle lambdas
return resize_transform_class((image_size, image_size), **kwargs)
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
if self.length == 0:
return img
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class LabelSmoothing(torch.nn.Module):
"""NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.0):
"""Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(LabelSmoothing, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
def albumentation_to_torch_transform(albumentation_f, image):
return torch.tensor(albumentation_f(image=image.permute(1, 2, 0).numpy())['image']).permute(2, 0, 1)
| 3,420 | 32.213592 | 112 |
py
|
ENCAS
|
ENCAS-main/plot_results/plot_results_imagenet.py
|
from plotting_functions import *
if __name__ == '__main__':
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'serif'
# plt.rcParams.update({'font.size': 15})
plt.rcParams.update({'font.size': 18})
plt.rcParams['axes.grid'] = True
from cycler import cycler
plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#348ABD', '#988ED5', '#52bde0', '#FBC15E', '#8EBA42', '#FFB5B8', '#777777', 'tab:brown'])
# Fig. 4
# compare_test_many_experiments([
# 'imagenet_r0_proxyless_sep',
# 'imagenet_r0_ofa10_sep',
# 'imagenet_r0_ofa12_sep',
# 'imagenet_r0_attn_sep',
# 'imagenet_r0_alpha_sep',
# # 'logs_classification/imagenet_NAT_front',
# ], ['test'] * 5, if_plot_many_in_one=True, max_iters=15, out_name='baselines_imagenet', pdf=True)
# Fig. 5
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#FBC15E', '#988ED5', '#348ABD'])
# compare_test_many_experiments(['imagenet_r0_alpha_sep',
# 'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3',
# 'posthoc_imagenet_r0_1nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# # 'logs_classification/imagenet_efficientnet_front',
# # 'logs_classification/imagenet_efficientnet_cascade_front',
# ], ['test'] * 4, if_plot_many_in_one=True, max_iters=15,
# algo_names=['search_algo:nsga3!subset_selector:reference', 'greedy', 'mo-gomea', 'mo-gomea',
# # 'effnet', 'effnet-cascade'
# ],
# out_name='cascades_and_greedy_imagenet', pdf=True,
# legend_labels=['NAT (best)', 'GreedyCascade', 'ENCAS (1 supernet)', 'ENCAS (5 supernets)'])
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#348ABD', '#988ED5', '#52bde0', '#FBC15E', '#8EBA42', '#FFB5B8', '#777777', 'tab:brown'])
# Fig. 6
# paths = ['posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'logs_classification/imagenet_efficientnet_front',
# 'logs_classification/imagenet_NsgaNet2_front',
# 'logs_classification/imagenet_alphanet_front',
# 'logs_classification/imagenet_efficientnet_cascade_front',
# 'logs_classification/imagenet_NEAS',
# 'logs_classification/imagenet_MobileNetV3',
# 'logs_classification/imagenet_BigNAS',
# 'logs_classification/imagenet_OFA',
# ]
# compare_test_many_experiments(paths, ['test'] * len(paths), if_plot_many_in_one=True, max_iters=15,
# algo_names=['mo-gomea'] + ['whatever'] * (len(paths) - 1),
# out_name='cmp_sota_imagenet', pdf=True,
# legend_labels=['ENCAS', 'EfficientNet', 'NSGANetV2', 'AlphaNet', 'EfficientNet Cascade',
# 'NEAS', 'MobileNetV3', 'BigNAS', 'OFA (#75)'])
# Fig. 8
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#348ABD', '#988ED5', '#FBC15E'])
# compare_test_many_experiments(['posthoc_imagenet_timm_1nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'logs_classification/timm_all',
# 'logs_classification/imagenet_efficientnet_front_full',
# 'logs_classification/imagenet_efficientnet_cascade_front_full',
#
# ],'test', algo_names=['mo-gomea', 'timm: trade-off front', 'EfficientNet', 'EfficientNet Cascade'],
# legend_labels=['ENCAS', 'timm: trade-off front', 'EfficientNet', 'EfficientNet Cascade'],
# max_iters=15, if_plot_many_in_one=True, out_name='timm', pdf=True,
# if_log_scale_x=True)
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#348ABD', '#988ED5', '#52bde0', '#FBC15E', '#8EBA42', '#FFB5B8', '#777777', 'tab:brown'])
# Fig. 12
# compare_test_many_experiments([
# 'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# ], ['test'] * 2, if_plot_many_in_one=True, max_iters=30,
# algo_names=['random', 'mo-gomea'],# 'nsganetv2'],
# out_name='random_acc_imagenet', pdf=True,
# legend_labels=['Random', 'MO-GOMEA'])
# Fig. 11
# compare_test_many_experiments([
# 'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_imagenet_r0_5nets_sep_n5_evals600000_ensemble_moregranular3',
# ], ['test'] * 2, if_plot_many_in_one=True, max_iters=30,
# algo_names=['mo-gomea'] * 2,# 'nsganetv2'],
# out_name='ensemble_acc_imagenet', pdf=True,
# legend_labels=['ENCAS', 'ENENS'], if_log_scale_x=True)
# wanna know the median run to get the named models from it
# compare_test_many_experiments(['posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002'],
# 'test', if_plot_many_in_one=True, max_iters=15,
# algo_names=['mo-gomea'], print_median_run_flops_and_accs=True)
| 6,002 | 64.967033 | 155 |
py
|
ENCAS
|
ENCAS-main/plot_results/plot_results_cifar100.py
|
from plotting_functions import *
if __name__ == '__main__':
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'serif'
# plt.rcParams.update({'font.size': 15})
plt.rcParams.update({'font.size': 18})
plt.rcParams['axes.grid'] = True
tmp_path = os.path.join(utils.NAT_PATH, '.tmp')
from cycler import cycler
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#348ABD', '#988ED5', 'c', '#777777', '#FBC15E', '#8EBA42', '#FFB5B8', 'tab:brown'])
plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#348ABD', '#988ED5', '#52bde0', '#FBC15E', '#8EBA42', '#FFB5B8', '#777777', 'tab:brown'])
# Fig. 4
# plt.rcParams.update({'font.size': 16})
# compare_test_many_experiments([
# 'cifar100_r0_proxyless_sep',
# 'cifar100_r0_ofa10_sep',
# 'cifar100_r0_ofa12_sep',
# 'cifar100_r0_attn_sep',
# 'cifar100_r0_alpha_sep',
# 'cifar100_reproducenat',
# ], ['test_swa20']*5 + ['test'], if_plot_many_in_one=True, max_iters=30,
# out_name='baselines_cifar100', pdf=True)
# plt.rcParams.update({'font.size': 18})
# Fig. 5
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#FBC15E', '#988ED5', '#348ABD'])
# compare_test_many_experiments(['cifar100_r0_alpha_sep',
# 'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3',
# 'posthoc_cifar100_r0_swa20_1nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# # 'logs_classification/cifar100_NsgaNet2_front',
# ], ['test_swa20'] + ['test'] * 3, if_plot_many_in_one=True, max_iters=30,
# algo_names=['search_algo:nsga3!subset_selector:reference', 'greedy', 'mo-gomea', 'mo-gomea'],# 'nsganetv2'],
# out_name='cascades_and_greedy_cifar100', pdf=True,
# legend_labels=['NAT (best)', 'GreedyCascade', 'ENCAS (1 supernet)',
# 'ENCAS (5 supernets)'])
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#348ABD', '#988ED5', '#52bde0', '#FBC15E', '#8EBA42', '#FFB5B8', '#777777', 'tab:brown'])
# Fig. 6
# compare_test_many_experiments(['posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'logs_classification/cifar100_efficientnet_front',
# 'logs_classification/cifar100_NsgaNet2_front',
# 'logs_classification/cifar100_GDAS',
# 'logs_classification/cifar100_SETN',
# ], ['test'] * 5, if_plot_many_in_one=True, max_iters=30,
# algo_names=['mo-gomea'] + ['whatever'] * 4,
# out_name='cmp_sota_cifar100', pdf=True,
# legend_labels=['ENCAS', 'EfficientNet', 'NSGANetV2', 'GDAS', 'SETN'])
# Fig. 7
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33','#348ABD', '#8EBA42', '#FBC15E'])
# compare_test_many_experiments(['cifar100_r0_attn_sep',
# 'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'cifar100_r0_5nets',
# 'posthoc_cifar100_r0_swa20_5nets_join_n5_evals600000_cascade_moregranular3_002',
# ], ['test_swa20', 'test', 'test_swa20', 'test'], if_plot_many_in_one=True, max_iters=30,
# algo_names=['search_algo:nsga3!subset_selector:reference','mo-gomea',
# 'search_algo:mo-gomea!subset_selector:reference', 'mo-gomea'],
# out_name='sep_vs_join_cifar100', pdf=True,
# legend_labels=['NAT (best)','ENCAS', 'ENCAS-joint', 'ENCAS-joint+'],# target_runs=[0, 1],
# if_log_scale_x=True)
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#348ABD', '#988ED5', '#52bde0', '#FBC15E', '#8EBA42', '#FFB5B8', '#777777', 'tab:brown'])
# Fig. 9: HVs: impact_n_supernets
# plot_hypervolumes_impact_n_supernets([
# 'posthoc_cifar100_r0_swa20_1nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar100_r0_swa20_2nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# ], 'test',
# 30, ['mo-gomea'] * 3, supernet_numbers=[1, 2, 5], set_xticks=True, label='ENCAS')#, target_runs=[0, 1])
# plot_hypervolumes_impact_n_supernets([
# 'cifar100_r0_alphaofa',
# 'cifar100_r0_5nets'], 'test_swa20',
# 30, ['search_algo:mo-gomea!subset_selector:reference'] * 2, supernet_numbers=[2, 5],
# set_xticks=False, label='ENCAS-joint')
# plot_hypervolumes_impact_n_supernets([
# 'posthoc_cifar100_r0_swa20_2nets_join_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar100_r0_swa20_5nets_join_n5_evals600000_cascade_moregranular3_002'], 'test',
# 30, ['mo-gomea'] * 2, supernet_numbers=[2, 5],
# set_xticks=False, label='ENCAS-joint+')
# plt.legend()
# plt.xlabel('Number of supernetworks')
# plt.ylabel('Hypervolume')
# plt.savefig(os.path.join(tmp_path, 'impact_n_supernets_cifar100.pdf'), bbox_inches='tight', pad_inches=0.01)
# plt.close()
# Fig. 10: HVs: impact_n_clones
# plt.figure(figsize=(8, 4))
# plot_hypervolumes_impact_n_supernets([
# 'posthoc_cifar100_r0_swa20_2nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar100_r0_swa20_3nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar100_r0_swa20_4nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# ], 'test',
# 30, ['mo-gomea'] * 4, supernet_numbers=[2, 3, 4, 5], set_xticks=True, label='Different supernets')
# plot_hypervolumes_impact_n_supernets([
# 'posthoc_cifar100_r0_swa20_clones_2nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar100_r0_swa20_clones_3nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar100_r0_swa20_clones_4nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar100_r0_swa20_clones_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# ], 'test',
# 30, ['mo-gomea'] * 4, supernet_numbers=[2, 3, 4, 5], set_xticks=False, label='Different seeds')
# plt.legend()
# plt.xlabel('Number of supernetworks')
# plt.ylabel('Hypervolume')
# plt.savefig(os.path.join(tmp_path, 'impact_n_clones_cifar100.pdf'), bbox_inches='tight', pad_inches=0.01)
# plt.close()
# Fig. 12
# compare_test_many_experiments([
# 'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# ], ['test'] * 2, if_plot_many_in_one=True, max_iters=30,
# algo_names=['random', 'mo-gomea'],# 'nsganetv2'],
# out_name='random_acc_cifar100', pdf=True,
# legend_labels=['Random', 'MO-GOMEA'])
# ,target_runs=[0, 1, 2, 3])
# Fig. 11
# compare_test_many_experiments([
# 'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_ensemble_moregranular3',
# ], ['test'] * 2, if_plot_many_in_one=True, max_iters=30,
# algo_names=['mo-gomea'] * 2,# 'nsganetv2'],
# out_name='ensemble_acc_cifar100', pdf=True,
# legend_labels=['ENCAS', 'ENENS'], if_log_scale_x=True)
# wanna know the median run to get the named models from it
# compare_test_many_experiments(['posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002'],
# 'test', if_plot_many_in_one=True, max_iters=30,
# algo_names=['mo-gomea'], print_median_run_flops_and_accs=True)
| 9,012 | 66.261194 | 155 |
py
|
ENCAS
|
ENCAS-main/plot_results/timm_pareto.py
|
'''
find pareto front of timm models, save it 10 times to make my code think there are 10 seeds (this is needed for plotting)
'''
import json
import numpy as np
import os
import yaml
import utils
from utils import NAT_LOGS_PATH
from utils_pareto import is_pareto_efficient
from pathlib import Path
path_test_data = os.path.join(NAT_LOGS_PATH, 'timm_all/pretrained/0/output_distrs_test/info.json')
loaded = json.load(open(path_test_data))
flops = np.array(loaded['flops'])
acc = np.array(loaded['test'])
err = 100 - acc
all_objs_cur = np.vstack((err, flops)).T
pareto_best_cur_idx = is_pareto_efficient(all_objs_cur)
flops_pareto = list(reversed(flops[pareto_best_cur_idx].tolist()))
acc_pareto = list(reversed(acc[pareto_best_cur_idx].tolist()))
print(flops_pareto, acc_pareto)
out_dir = os.path.join(utils.NAT_PATH, 'logs_classification', 'timm_all')
for i in range(10):
out_dir_cur = os.path.join(out_dir, str(i))
Path(out_dir_cur).mkdir(exist_ok=True)
out_file_cur = os.path.join(out_dir_cur, 'data.yml')
yaml.safe_dump(dict(flops=flops_pareto, test=acc_pareto), open(out_file_cur, 'w'))
| 1,113 | 31.764706 | 121 |
py
|
ENCAS
|
ENCAS-main/plot_results/plot_results_cifar10.py
|
import matplotlib.pyplot as plt
import utils
from plotting_functions import *
if __name__ == '__main__':
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'serif'
# plt.rcParams.update({'font.size': 15})
plt.rcParams.update({'font.size': 18})
plt.rcParams['axes.grid'] = True
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
tmp_path = os.path.join(utils.NAT_PATH, '.tmp')
from cycler import cycler
plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#348ABD', '#988ED5', '#52bde0', '#FBC15E', '#8EBA42', '#FFB5B8', '#777777', 'tab:brown'])
# Fig. 4
# compare_test_many_experiments([
# 'cifar10_r0_proxyless_sep',
# 'cifar10_r0_ofa10_sep',
# 'cifar10_r0_ofa12_sep',
# 'cifar10_r0_attn_sep',
# 'cifar10_r0_alpha_sep',
# 'cifar10_reproducenat',
# ], ['test_swa20']*5 + ['test'], if_plot_many_in_one=True, max_iters=30,
# out_name='baselines_cifar10', pdf=True)
# Fig. 5
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#FBC15E', '#988ED5', '#348ABD'])
# compare_test_many_experiments(['cifar10_r0_attn_sep',
# 'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3',
# 'posthoc_cifar10_r0_swa20_1nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# # 'logs_classification/cifar10_NsgaNet2_front',
# ], ['test_swa20'] + ['test'] * 3, if_plot_many_in_one=True, max_iters=30,
# algo_names=['search_algo:nsga3!subset_selector:reference', 'greedy',
# 'mo-gomea', 'mo-gomea', #'nsganet2'
# ],
# out_name='cascades_and_greedy_cifar10', pdf=True,
# legend_labels=['NAT (best)', 'GreedyCascade', 'ENCAS (1 supernet)',
# 'ENCAS (5 supernets)'])
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#348ABD', '#988ED5', '#52bde0', '#FBC15E', '#8EBA42', '#FFB5B8', '#777777', 'tab:brown'])
# Fig. 6
# compare_test_many_experiments(['posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'logs_classification/cifar10_efficientnet_front',
# 'logs_classification/cifar10_NsgaNet2_front',
# 'logs_classification/cifar10_GDAS',
# 'logs_classification/cifar10_SETN',
# 'logs_classification/cifar10_DARTS',
# ], 'test', if_plot_many_in_one=True, max_iters=30,
# algo_names=['mo-gomea'] + ['whatever'] * 5,
# out_name='cmp_sota_cifar10', pdf=True,
# legend_labels=['ENCAS', 'EfficientNet', 'NSGANetV2', 'GDAS', 'SETN', 'DARTS'])
# Fig. 7
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33','#348ABD', '#8EBA42', '#FBC15E'])
# compare_test_many_experiments(['cifar10_r0_attn_sep',
# 'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'cifar10_r0_5nets',
# 'posthoc_cifar10_r0_swa20_5nets_join_n5_evals600000_cascade_moregranular3_002',
# ], ['test_swa20', 'test', 'test_swa20', 'test'], if_plot_many_in_one=True, max_iters=30,
# algo_names=['search_algo:nsga3!subset_selector:reference','mo-gomea',
# 'search_algo:mo-gomea!subset_selector:reference', 'mo-gomea'],
# out_name='sep_vs_join_cifar10', pdf=True,
# legend_labels=['NAT (best)','ENCAS', 'ENCAS-joint', 'ENCAS-joint+'],# target_runs=[0, 1],
# if_log_scale_x=True)
# plt.rcParams['axes.prop_cycle'] = cycler(color=['#E24A33', '#348ABD', '#988ED5', '#52bde0', '#FBC15E', '#8EBA42', '#FFB5B8', '#777777', 'tab:brown'])
# Fig. 9: HVs: impact_n_supernets
# plot_hypervolumes_impact_n_supernets([
# 'posthoc_cifar10_r0_swa20_1nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_2nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# ], 'test',
# 30, ['mo-gomea'] * 3, supernet_numbers=[1, 2, 5], set_xticks=True, label='ENCAS')#, target_runs=[0, 1])
# plot_hypervolumes_impact_n_supernets([
# 'cifar10_r0_alphaofa',
# 'cifar10_r0_5nets'], 'test_swa20',
# 30, ['search_algo:mo-gomea!subset_selector:reference'] * 2, supernet_numbers=[2, 5],
# set_xticks=False, label='ENCAS-joint')
# plot_hypervolumes_impact_n_supernets([
# 'posthoc_cifar10_r0_swa20_2nets_join_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_5nets_join_n5_evals600000_cascade_moregranular3_002'], 'test',
# 30, ['mo-gomea'] * 2, supernet_numbers=[2, 5],
# set_xticks=False, label='ENCAS-joint+')
# plt.legend()
# plt.xlabel('Number of supernetworks')
# plt.ylabel('Hypervolume')
# plt.savefig(os.path.join(tmp_path, 'impact_n_supernets_cifar10.pdf'), bbox_inches='tight', pad_inches=0.01)
# plt.close()
# Fig. 10 (left): HVs: impact_n_clones
# plt.figure(figsize=(8, 4))
# plot_hypervolumes_impact_n_supernets([
# 'posthoc_cifar10_r0_swa20_2nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_3nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_4nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# ], 'test',
# 30, ['mo-gomea'] * 4, supernet_numbers=[2, 3, 4, 5], set_xticks=True, label='Different supernets')
# plot_hypervolumes_impact_n_supernets([
# 'posthoc_cifar10_r0_swa20_clones_2nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_clones_3nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_clones_4nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_clones_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# ], 'test',
# 30, ['mo-gomea'] * 4, supernet_numbers=[2, 3, 4, 5], set_xticks=False, label='Different seeds')
# plt.legend()
# plt.xlabel('Number of supernetworks')
# plt.ylabel('Hypervolume')
# plt.savefig(os.path.join(tmp_path, 'impact_n_clones_cifar10.pdf'), bbox_inches='tight', pad_inches=0.01)
# plt.close()
# Fig. 10 (right)
# compare_test_many_experiments(['posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_clones_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# ], ['test'] * 2, if_plot_many_in_one=True, max_iters=30,
# algo_names=['mo-gomea', 'mo-gomea'],
# out_name='clones5_cifar10', pdf=True,
# legend_labels=['ENCAS (different supernets)','ENCAS (different seeds)'])
# Fig. 11
# compare_test_many_experiments([
# 'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# ], ['test'] * 2, if_plot_many_in_one=True, max_iters=30,
# algo_names=['random', 'mo-gomea'],# 'nsganetv2'],
# out_name='random_acc_cifar10', pdf=True,
# legend_labels=['Random', 'MO-GOMEA'])
# Fig. 12
# compare_test_many_experiments([
# 'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
# 'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_ensemble_moregranular3',
# ], ['test'] * 2, if_plot_many_in_one=True, max_iters=30,
# algo_names=['mo-gomea'] * 2,# 'nsganetv2'],
# out_name='ensemble_acc_cifar10', pdf=True,
# legend_labels=['ENCAS', 'ENENS'], if_log_scale_x=True)
# wanna know the median run to get the named models from it
# compare_test_many_experiments(['posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002'],
# 'test', if_plot_many_in_one=True, max_iters=30,
# algo_names=['mo-gomea'], print_median_run_flops_and_accs=True)
| 9,572 | 64.568493 | 155 |
py
|
ENCAS
|
ENCAS-main/plot_results/plot_hv_over_time.py
|
import itertools
import os
import glob
from pathlib import Path
import matplotlib
import pandas as pd
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
import utils
from nat import NAT
import yaml
def compute_hypervolumes_over_time(run_path, **kwargs):
csv_path = glob.glob(os.path.join(run_path, '*.csv'))[0]
save_path = os.path.join(run_path, kwargs['out_name_hvs'])
if os.path.exists(save_path) and not kwargs.get('overwrite', False):
print(f'Loaded the already-computed values for {run_path}')
return yaml.safe_load(open(save_path, 'r'))#, Loader=yaml.BaseLoader)
df = pd.read_csv(os.path.join(csv_path), sep=' ')
fitnesses = np.array([[float(t) for t in x.strip('()').split(',')] for x in df.iloc[:, -1]], dtype=np.float)
if 'gomea.csv' in csv_path:
fitnesses *= -1
worst_top1_err, worst_flops = 40, 4000
ref_pt = np.array([worst_top1_err, worst_flops])
pareto = np.array([])
hvs = []
if_pareto_changed = False
hvs_step = 100
for i, (top1_err, flops) in enumerate(fitnesses):
print(i)
if len(pareto) == 0:
pareto = np.array([[top1_err, flops]])
continue
idx_dominate0 = pareto[:, 0] < top1_err
idx_dominate1 = pareto[:, 1] < flops
is_dominated = np.any(idx_dominate0 * idx_dominate1)
if not is_dominated:
idx_dominated0 = pareto[:, 0] >= top1_err
idx_dominated1 = pareto[:, 1] >= flops
idx_not_dominated = (1 - idx_dominated0 * idx_dominated1).astype(np.bool)
pareto = pareto[idx_not_dominated]
pareto = np.append(pareto, [[top1_err, flops]], axis=0)
if_pareto_changed = True
print(f'{pareto.shape=}')
if (i + 1) % hvs_step == 0:
if if_pareto_changed:
hv = utils.compute_hypervolume(ref_pt, pareto, if_increase_ref_pt=False, if_input_already_pareto=True)
if_pareto_changed = False
hvs.append(float(hv))
# if (i + 1) % 60000 == 0:
# plt.plot(pareto[:, 1], utils.get_metric_complement(pareto[:, 0], False), 'o')
# plt.title(str(hv))
# plt.show()
out = hvs_step, hvs
yaml.safe_dump(out, open(save_path, 'w'), default_flow_style=None)
return out
def plot_hvs_with_stds(experiment_path, algo_name_to_seed_to_result, **kwargs):
clist = matplotlib.rcParams['axes.prop_cycle']
cgen = itertools.cycle(clist)
map_algo_name = {'random': 'Random search', 'mo-gomea': 'MO-GOMEA'}
for algo_name, seed_to_result in algo_name_to_seed_to_result.items():
n_seeds = len(seed_to_result)
all_hvs_cur = np.zeros((n_seeds, len(seed_to_result[0][1])))
for i in range(n_seeds):
hvs_step, hvs_cur = seed_to_result[i]
all_hvs_cur[i] = hvs_cur
mean_hvs_cur = np.mean(all_hvs_cur, axis=0)
std_hvs_cur = np.std(all_hvs_cur, axis=0)
x_ticks = np.arange(1, all_hvs_cur.shape[-1] + 1) * hvs_step
cur_color = next(cgen)['color']
plt.plot(x_ticks, mean_hvs_cur, label=map_algo_name[algo_name], c=cur_color)
plt.fill_between(x_ticks, mean_hvs_cur - std_hvs_cur, mean_hvs_cur + std_hvs_cur,
facecolor=cur_color + '50')
plt.legend()
plt.xlabel('Evaluations - log scale')
plt.xscale('log')
if kwargs.get('show_title', True):
plt.title(f'Hypervolume, {kwargs["experiment_name"]}')
plt_path = kwargs.get('plt_path', os.path.join(experiment_path, 'hv_over_time.png'))
plt.savefig(plt_path, bbox_inches='tight', pad_inches=0)
plt.show()
plt.close()
def plot_hvs_experiment(experiment_name, **kwargs):
utils.execute_func_for_all_runs_and_combine(experiment_name, compute_hypervolumes_over_time, plot_hvs_with_stds, **kwargs)
if __name__ == '__main__':
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'serif'
# plt.rcParams.update({'font.size': 15})
plt.rcParams.update({'font.size': 18})
plt.rcParams['axes.grid'] = True
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
tmp_path = os.path.join(utils.NAT_PATH, '.tmp')
# Fig. 12
plot_hvs_experiment('posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
out_name_hvs='hvs.yml', target_algos=['random', 'mo-gomea'], show_title=False,
plt_path=os.path.join(tmp_path, 'vs_random_hv_cifar10.pdf'), overwrite=False)
plot_hvs_experiment('posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
out_name_hvs='hvs.yml', target_algos=['random', 'mo-gomea'], show_title=False,
plt_path=os.path.join(tmp_path, 'vs_random_hv_cifar100.pdf'), overwrite=False)
plot_hvs_experiment('posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002',
out_name_hvs='hvs.yml', target_algos=['random', 'mo-gomea'], show_title=False,
plt_path=os.path.join(tmp_path, 'vs_random_hv_imagenet.pdf'), overwrite=False)
| 5,163 | 40.98374 | 126 |
py
|
ENCAS
|
ENCAS-main/plot_results/stat_test.py
|
from plotting_functions import *
from scipy.stats import wilcoxon
def get_wilcoxon_p(x, y):
print(x)
print(y)
return wilcoxon(x, y, alternative='greater').pvalue
if __name__ == '__main__':
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'serif'
plt.rcParams.update({'font.size': 15})
plt.rcParams['axes.grid'] = True
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
tmp_path = os.path.join(utils.NAT_PATH, '.tmp')
print('0. ENCAS (1 supernetwork) > NAT (best)')
all_hvs_c10, all_max_accs_c10 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar10_r0_swa20_1nets_sep_n5_evals600000_cascade_moregranular3_002',
'cifar10_r0_attn_sep',], ['test', 'test_swa20'], 30,
algo_names=['mo-gomea', 'search_algo:nsga3!subset_selector:reference'])
all_hvs_c100, all_max_accs_c100 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar100_r0_swa20_1nets_sep_n5_evals600000_cascade_moregranular3_002',
'cifar100_r0_alpha_sep',], ['test', 'test_swa20'], 30,
algo_names=['mo-gomea', 'search_algo:nsga3!subset_selector:reference'])
all_hvs_img, all_max_accs_img = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_imagenet_r0_1nets_sep_n5_evals600000_cascade_moregranular3_002',
'imagenet_r0_alpha_sep',], ['test', 'test'], 15,
algo_names=['mo-gomea', 'search_algo:nsga3!subset_selector:reference'])
print('hv: ', get_wilcoxon_p(all_hvs_c10[0] + all_hvs_c100[0] + all_hvs_img[0],
all_hvs_c10[1] + all_hvs_c100[1] + all_hvs_img[1]))
print('max acc: ', get_wilcoxon_p(all_max_accs_c10[0] + all_max_accs_c100[0] + all_max_accs_img[0],
all_max_accs_c10[1] + all_max_accs_c100[1] + all_max_accs_img[1]))
print('1. ENCAS (5 supernetworks) > NAT (best)')
all_hvs_c10, all_max_accs_c10 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'cifar10_r0_attn_sep',], ['test', 'test_swa20'], 30,
algo_names=['mo-gomea', 'search_algo:nsga3!subset_selector:reference'])
all_hvs_c100, all_max_accs_c100 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'cifar100_r0_alpha_sep',], ['test', 'test_swa20'], 30,
algo_names=['mo-gomea', 'search_algo:nsga3!subset_selector:reference'])
all_hvs_img, all_max_accs_img = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'imagenet_r0_alpha_sep',], ['test', 'test'], 15,
algo_names=['mo-gomea', 'search_algo:nsga3!subset_selector:reference'])
print('hv: ', get_wilcoxon_p(all_hvs_c10[0] + all_hvs_c100[0] + all_hvs_img[0],
all_hvs_c10[1] + all_hvs_c100[1] + all_hvs_img[1]))
print('max acc: ', get_wilcoxon_p(all_max_accs_c10[0] + all_max_accs_c100[0] + all_max_accs_img[0],
all_max_accs_c10[1] + all_max_accs_c100[1] + all_max_accs_img[1]))
print('2. ENCAS (5 supernetworks) > ENCAS (1 supernetwork)')
all_hvs_c10, all_max_accs_c10 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar10_r0_swa20_1nets_sep_n5_evals600000_cascade_moregranular3_002',
], ['test'] * 2, 30, algo_names=['mo-gomea']*2)
all_hvs_c100, all_max_accs_c100 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar100_r0_swa20_1nets_sep_n5_evals600000_cascade_moregranular3_002',
], ['test'] * 2, 30, algo_names=['mo-gomea'] * 2)
all_hvs_img, all_max_accs_img = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_imagenet_r0_1nets_sep_n5_evals600000_cascade_moregranular3_002',
], ['test', 'test'], 15,
algo_names=['mo-gomea'] * 2)
print('hv: ', get_wilcoxon_p(all_hvs_c10[0] + all_hvs_c100[0] + all_hvs_img[0], all_hvs_c10[1] + all_hvs_c100[1] + all_hvs_img[1]))
print('max acc: ', get_wilcoxon_p(all_max_accs_c10[0] + all_max_accs_c100[0] + all_max_accs_img[0], all_max_accs_c10[1] + all_max_accs_c100[1] + all_max_accs_img[1]))
print('3. ENCAS (5 supernetworks) > GreedyCascade')
all_hvs_c10, all_max_accs_c10 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3',], ['test', 'test'], 30,
algo_names=['mo-gomea', 'greedy'])
all_hvs_c100, all_max_accs_c100 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3',], ['test', 'test'], 30,
algo_names=['mo-gomea', 'greedy'])
all_hvs_img, all_max_accs_img = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3',], ['test', 'test'], 15,
algo_names=['mo-gomea', 'greedy'])
print('hv: ', get_wilcoxon_p(all_hvs_c10[0] + all_hvs_c100[0] + all_hvs_img[0],
all_hvs_c10[1] + all_hvs_c100[1] + all_hvs_img[1]))
print('max acc: ', get_wilcoxon_p(all_max_accs_c10[0] + all_max_accs_c100[0] + all_max_accs_img[0],
all_max_accs_c10[1] + all_max_accs_c100[1] + all_max_accs_img[1]))
print('4. ENCAS-joint+ > ENCAS-joint')
all_hvs_c10, all_max_accs_c10 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar10_r0_swa20_5nets_join_n5_evals600000_cascade_moregranular3_002',
'cifar10_r0_5nets'], ['test', 'test_swa20'], 30,
algo_names=['mo-gomea', 'search_algo:mo-gomea!subset_selector:reference'])
all_hvs_c100, all_max_accs_c100 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar100_r0_swa20_5nets_join_n5_evals600000_cascade_moregranular3_002',
'cifar100_r0_5nets',], ['test', 'test_swa20'], 30,
algo_names=['mo-gomea', 'search_algo:mo-gomea!subset_selector:reference'])
print('hv: ', get_wilcoxon_p(all_hvs_c10[0] + all_hvs_c100[0], all_hvs_c10[1] + all_hvs_c100[1]))
print('max acc: ', get_wilcoxon_p(all_max_accs_c10[0] + all_max_accs_c100[0], all_max_accs_c10[1] + all_max_accs_c100[1]))
print('5. ENCAS-joint+ > ENCAS')
all_hvs_c10, all_max_accs_c10 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar10_r0_swa20_5nets_join_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002'], ['test', 'test'], 30,
algo_names=['mo-gomea', 'mo-gomea'])
all_hvs_c100, all_max_accs_c100 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar100_r0_swa20_5nets_join_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',], ['test', 'test'], 30,
algo_names=['mo-gomea', 'mo-gomea'])
print('hv: ', get_wilcoxon_p(all_hvs_c10[0] + all_hvs_c100[0], all_hvs_c10[1] + all_hvs_c100[1]))
print('max acc: ', get_wilcoxon_p(all_max_accs_c10[0] + all_max_accs_c100[0], all_max_accs_c10[1] + all_max_accs_c100[1]))
print('6. ENCAS (5 supernetworks) > ENCAS with 5 clones of the best supernet')
all_hvs_c10, all_max_accs_c10 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar10_r0_swa20_clones_5nets_sep_n5_evals600000_cascade_moregranular3_002',
], ['test', 'test'], 30,
algo_names=['mo-gomea', 'mo-gomea'])
all_hvs_c100, all_max_accs_c100 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar100_r0_swa20_clones_5nets_sep_n5_evals600000_cascade_moregranular3_002',
], ['test', 'test'], 30,
algo_names=['mo-gomea', 'mo-gomea'])
print('hv: ', get_wilcoxon_p(all_hvs_c10[0] + all_hvs_c100[0], all_hvs_c10[1] + all_hvs_c100[1]))
print('max acc: ', get_wilcoxon_p(all_max_accs_c10[0] + all_max_accs_c100[0], all_max_accs_c10[1] + all_max_accs_c100[1]))
print('7. ENCAS + MO-GOMEA > ENCAS + Random search (val)')
all_hvs_c10, all_max_accs_c10 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
], ['val', 'val'], 30,
algo_names=['mo-gomea', 'random'])
all_hvs_c100, all_max_accs_c100 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
], ['val', 'val'], 30,
algo_names=['mo-gomea', 'random'])
all_hvs_img, all_max_accs_img = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002'], ['val', 'val'], 15,
algo_names=['mo-gomea', 'random'])
print('hv: ', get_wilcoxon_p(all_hvs_c10[0] + all_hvs_c100[0] + all_hvs_img[0], all_hvs_c10[1] + all_hvs_c100[1]+ all_hvs_img[1]))
print('max acc: ', get_wilcoxon_p(all_max_accs_c10[0] + all_max_accs_c100[0] + all_max_accs_img[0], all_max_accs_c10[1] + all_max_accs_c100[1] + all_max_accs_img[1]))
print('8. ENCAS + MO-GOMEA > ENCAS + Random search (test)')
all_hvs_c10, all_max_accs_c10 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
], ['test', 'test'], 30,
algo_names=['mo-gomea', 'random'])
all_hvs_c100, all_max_accs_c100 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
], ['test', 'test'], 30,
algo_names=['mo-gomea', 'random'])
all_hvs_img, all_max_accs_img = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002'], ['test', 'test'], 15,
algo_names=['mo-gomea', 'random'])
print('hv: ', get_wilcoxon_p(all_hvs_c10[0] + all_hvs_c100[0] + all_hvs_img[0], all_hvs_c10[1] + all_hvs_c100[1]+ all_hvs_img[1]))
print('max acc: ', get_wilcoxon_p(all_max_accs_c10[0] + all_max_accs_c100[0] + all_max_accs_img[0], all_max_accs_c10[1] + all_max_accs_c100[1] + all_max_accs_img[1]))
print('9. ENCAS (5 supernetworks) > ENCAS-ensemble (5 supernetworks)')
all_hvs_c10, all_max_accs_c10 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar10_r0_swa20_5nets_sep_n5_evals600000_ensemble_moregranular3',
], ['test', 'test'], 30,
algo_names=['mo-gomea', 'mo-gomea'])
all_hvs_c100, all_max_accs_c100 = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_cifar100_r0_swa20_5nets_sep_n5_evals600000_ensemble_moregranular3',
], ['test', 'test'], 30,
algo_names=['mo-gomea', 'mo-gomea'])
all_hvs_img, all_max_accs_img = get_hypervolumes_and_max_accs_for_stat_testing([
'posthoc_imagenet_r0_5nets_sep_n5_evals600000_cascade_moregranular3_002',
'posthoc_imagenet_r0_5nets_sep_n5_evals600000_ensemble_moregranular3'], ['test', 'test'], 15,
algo_names=['mo-gomea', 'mo-gomea'])
print('hv: ', get_wilcoxon_p(all_hvs_c10[0] + all_hvs_c100[0] + all_hvs_img[0], all_hvs_c10[1] + all_hvs_c100[1] + all_hvs_img[1]))
print('max acc: ', get_wilcoxon_p(all_max_accs_c10[0] + all_max_accs_c100[0] + all_max_accs_img[0], all_max_accs_c10[1] + all_max_accs_c100[1] + all_max_accs_img[1]))
| 13,475 | 73.043956 | 170 |
py
|
ENCAS
|
ENCAS-main/plot_results/plotting_functions.py
|
import re
import os
import json
from collections import defaultdict
import glob
from pathlib import Path
import numpy as np
from matplotlib import pyplot as plt
import itertools
from textwrap import fill
from PIL import Image
import yaml
import hashlib
from pdf2image import convert_from_path
import utils
from utils_pareto import is_pareto_efficient, get_best_pareto_from_iter, get_best_pareto_up_and_including_iter
from scipy.stats import spearmanr
from evaluate import evaluate_many_configs
nsga_logs_path = utils.NAT_LOGS_PATH
from utils import images_list_to_grid_image
def eval_cumulative_pareto_front_single_run(experiment_path, dataset_type='test', **kwargs):
max_iter = kwargs.get('max_iter', 15)
max_iter_path = os.path.join(nsga_logs_path, experiment_path, f'iter_{max_iter}')
if_post_hoc_ensemble = 'posthoc' in experiment_path and 'posthocheavy' not in experiment_path
save_path_base = experiment_path if if_post_hoc_ensemble else max_iter_path
dataset_postfix = kwargs.get('dataset_postfix', '')
swa = kwargs.get('swa', None)
if if_post_hoc_ensemble:
ensemble_information_path = os.path.join(save_path_base, f'posthoc_ensemble_from_stored_filtered.yml')
ensemble_information = yaml.safe_load(open(ensemble_information_path, 'r'))
dataset_postfix = ensemble_information['dataset_postfix']
if 'swa' in dataset_postfix:
swa = int(re.search('(\d+)(?!.*\d)', dataset_postfix)[0]) # last number in the name
save_output_path = os.path.join(save_path_base, f'best_pareto_val_and_{dataset_type}{dataset_postfix}.json')
if_from_stored_posthoc = kwargs.get('if_from_stored', False)
if if_from_stored_posthoc:
save_output_path = os.path.join(save_path_base, f'posthoc_ensemble_from_stored_filtered.yml')
if not os.path.exists(save_output_path):
save_output_path = os.path.join(save_path_base, f'posthoc_ensemble_from_stored.yml')
if not (if_from_stored_posthoc and Path(save_output_path).is_file()):
# need this both when restoring & producing the stuff in the not-store scenario
nat_config_path = os.path.join(nsga_logs_path, experiment_path, 'config_msunas.yml')
nat_config = yaml.safe_load(open(nat_config_path, 'r'))
search_space_name = nat_config.get('search_space', 'ofa')
n_iters = len(glob.glob(os.path.join(nsga_logs_path, experiment_path, "iter_*.stats")))
if n_iters < max_iter and not if_post_hoc_ensemble:
print(f'Detected an unfinished run (<{max_iter} iterations) => skip')
return None
if Path(save_output_path).is_file():
if not if_from_stored_posthoc:
loaded_data = json.load(open(save_output_path))
else:
loaded_data = yaml.safe_load(open(save_output_path))
nat_config = {'dataset': loaded_data['dataset_name']}
val = loaded_data['val']
test = loaded_data[dataset_type]
flops = loaded_data['flops']
cfgs = loaded_data['cfgs']
print(cfgs[-1])
# true_errs = 100 - np.array(val)
# accs_test = 100 - np.array(test)
true_errs = np.array(val)
accs_test = np.array(test)
print(accs_test[-1])
else:
accs_test = []
run_config = None
if not if_post_hoc_ensemble:
if search_space_name == 'reproduce_nat':
cfgs, true_errs, flops = get_best_pareto_from_iter(experiment_path, max_iter)
iters = [max_iter] * len(cfgs)
else:
cfgs, true_errs, flops, iters = get_best_pareto_up_and_including_iter(experiment_path, max_iter)
if swa is not None:
iters = [max_iter] * len(cfgs)
subsample = lambda l: l#[-1:]# lambda l: l[::3]#[-2:]
cfgs, true_errs, flops, iters = subsample(cfgs), subsample(true_errs), subsample(flops), subsample(iters)
print(f'{flops=}')
if search_space_name == 'ensemble':
cfgs = [list(c) for c in cfgs] # otherwise it's an ndarray and can't be saved in json
n_cfgs = len(cfgs)
fst_same_iter = 0
last_same_iter = 0
same_iter = iters[0]
i = 1
if_predicted_all = False
flops_recomputed = []
while not if_predicted_all:
if (i == n_cfgs) or (i < n_cfgs and iters[i] != same_iter):
path_to_supernet_or_its_dir = []
for supernet_path_cur in nat_config['supernet_path']:
basename = os.path.basename(supernet_path_cur)
if swa is not None:
basename = utils.transform_supernet_name_swa(basename, swa)
path_to_supernet_or_its_dir.append(os.path.join(nsga_logs_path, experiment_path, f'iter_{same_iter}', basename))
ensemble_ss_names = nat_config['ensemble_ss_names']
accs_test_cur, info = evaluate_many_configs(path_to_supernet_or_its_dir, cfgs[fst_same_iter:last_same_iter+1],
config_msunas=nat_config,
if_test='test' in dataset_type, search_space_name=search_space_name,
ensemble_ss_names=ensemble_ss_names,
info_keys_to_return=['flops', 'run_config'], run_config=run_config,
if_use_logit_gaps=False)
accs_test += accs_test_cur
flops_recomputed += info['flops']
run_config = info['run_config'][0] # a hack for speed
fst_same_iter = i
last_same_iter = i
if i < n_cfgs: # to cover the case of the last iter
same_iter = iters[i]
else:
last_same_iter += 1
i += 1
if_predicted_all = i > n_cfgs
flops = flops_recomputed
else:
# loaded = yaml.safe_load(open(os.path.join(experiment_path, 'posthoc_ensemble.yml'), 'r'))
loaded = yaml.safe_load(open(os.path.join(experiment_path, 'posthoc_ensemble_from_stored_filtered.yml'), 'r')) # need filtered => require from_stored
cfgs, true_errs, flops, weight_paths, = loaded['cfgs'], loaded['true_errs'], loaded['flops'], loaded['weight_paths']
subsample = lambda l: l#[-1:]#[::20] + l[-2:] #[::10] #[::3]#
cfgs, true_errs, flops, weight_paths = subsample(cfgs), subsample(true_errs), subsample(flops), subsample(weight_paths)
# ensemble_ss_names = nat_config['ensemble_ss_names']
search_space_names = subsample(loaded['search_space_names'])
if_cascade = 'thresholds' in loaded
if if_cascade:
thresholds = subsample(loaded['thresholds'])
algo = loaded.get('algo', None)
print(f'{flops=}')
flops_recomputed = []
for i, (cfg, weight_path_cur_ensemble) in enumerate(zip(cfgs, weight_paths)):
path_to_supernet_or_its_dir = []
ensemble_ss_names = search_space_names[i]
ss_name_to_supernet_path = {'ofa12': 'supernet_w1.2', 'ofa10': 'supernet_w1.0',
'alphanet': 'alphanet_pretrained.pth.tar',
'attn': 'attentive_nas_pretrained.pth.tar',
'proxyless': 'ofa_proxyless_d234_e346_k357_w1.3',
'noop': 'noop',}
supernet_paths = [ss_name_to_supernet_path[ss_name] for ss_name in ensemble_ss_names]
ss_name_to_expected_ss_name = {'ofa12': 'ofa', 'ofa10': 'ofa', 'ofa': 'ofa',
'alphanet': 'alphanet', 'attn': 'alphanet',
'proxyless': 'proxyless',
'noop': 'noop'}
ensemble_ss_names = [ss_name_to_expected_ss_name[ss] for ss in ensemble_ss_names]
for supernet_path_from_config, weights_to_use_path in zip(supernet_paths, weight_path_cur_ensemble):
basename = os.path.basename(supernet_path_from_config)
if swa is not None:
basename = utils.transform_supernet_name_swa(basename, swa)
weights_to_use_path = re.sub(r'iter_\d+', f'iter_{max_iter}', weights_to_use_path)
path_to_supernet_or_its_dir.append(os.path.join(weights_to_use_path, basename))
if if_cascade:
thresholds_cur = thresholds[i]
if algo is not None and algo == 'greedy':
# remove trailing zeros
thresholds_cur = [t for t in thresholds_cur if t != 0]
accs_test_cur, info = evaluate_many_configs(path_to_supernet_or_its_dir, [cfg],
config_msunas=nat_config, if_test='test' in dataset_type,
search_space_name=search_space_name, ensemble_ss_names=ensemble_ss_names,
info_keys_to_return=['flops', 'run_config'], run_config=run_config,
thresholds=None if not if_cascade else thresholds_cur,
if_use_logit_gaps=algo is not None and algo == 'greedy')
accs_test += accs_test_cur
flops_recomputed += info['flops']
run_config = info['run_config'][0] # a hack for speed
flops = flops_recomputed
print(accs_test)
print(f'{type(true_errs)}, {type(accs_test)}, {type(flops)}, {type(cfgs)}')
print(f'{true_errs=}')
print(f'{flops=}')
print(f'{cfgs=}')
dict_to_dump = {'val': list(true_errs), dataset_type: list(accs_test), 'flops': list(flops), 'cfgs': list(cfgs)}
with open(save_output_path, 'w') as handle:
json.dump(dict_to_dump, handle)
accs_val = utils.get_metric_complement(np.array(true_errs))
plt.plot(flops, accs_val, '-o', label='val')
plt.plot(flops, accs_test, '-o', label=dataset_type)
plt.legend()
plt.title(fill(experiment_path + f'; corr={spearmanr(accs_val, accs_test)[0]:.2f}', 70))
plt.xlabel('Flops')
plt.ylabel('Accuracy')
if 'cifar100' in experiment_path:
if np.median(accs_test) > 70:
plt.xlim(0, 3700)
plt.ylim(70, 90)
# pass
elif 'cifar10' in experiment_path:
if np.median(accs_test) > 85:
plt.xlim(0, 3700)
plt.ylim(85, 100)
plt_path = os.path.join(save_path_base, f'best_pareto_val_and_{dataset_type}.png')
plt.savefig(plt_path, bbox_inches='tight', pad_inches=0)
plt.show()
plt.close()
return plt_path
def compare_test_many_experiments(experiment_names, dataset_types, max_iters=None, annotation_shifts=None,
if_plot_many_in_one=False, algo_names=None, target_runs=None, **kwargs):
if max_iters is None:
max_iters = [15] * len(experiment_names)
elif type(max_iters) is int:
max_iters = [max_iters] * len(experiment_names)
if annotation_shifts is None:
annotation_shifts = [(-10, 10 * (-1) ** (i + 1)) for i in range(len(experiment_names))]
else:
annotation_shifts = [(-10, 10 * sh) for sh in annotation_shifts]
if not (type(dataset_types) is list):
dataset_types = [dataset_types] * len(experiment_names)
nsga_path = utils.NAT_PATH
nsga_logs_path = utils.NAT_LOGS_PATH
tmp_path = os.path.join(nsga_path, '.tmp')
experiment_and_datatype_to_seed_to_obj0 = defaultdict(dict)
experiment_and_datatype_to_seed_to_obj1 = defaultdict(dict)
n_seeds = []
# 1. read data
for i_exp, (experiment_name, max_iter, dataset_type) in enumerate(zip(experiment_names, max_iters, dataset_types)):
postfix = '' if algo_names is None else algo_names[i_exp]
if 'logs_classification' not in experiment_name:
experiment_path = os.path.join(nsga_logs_path, experiment_name)
# load values // assume they are already computed
for f in reversed(sorted(os.scandir(experiment_path), key=lambda e: e.name)):
if not f.is_dir():
continue
name_cur = f.name
if algo_names is not None and algo_names[i_exp] != name_cur:
continue
n_seeds_cur = 0
for run_folder in sorted(os.scandir(f.path), key=lambda e: e.name):
if not run_folder.is_dir():
continue
run_idx = int(run_folder.name)
if target_runs is not None and run_idx not in target_runs:
continue
run_path = os.path.join(experiment_path, name_cur, str(run_idx))
if 'posthoc' in experiment_name:
stored_accs_path = os.path.join(run_path, f'best_pareto_val_and_{dataset_type}.json')
else:
stored_accs_path = os.path.join(run_path, f'iter_{max_iter}', f'best_pareto_val_and_{dataset_type}.json')
if os.path.exists(stored_accs_path):
loaded_data = json.load(open(stored_accs_path))
test = np.array(loaded_data[dataset_type])
flops = np.array(loaded_data['flops'])
else:
# maybe it's posthoc + from_stored?
# stored_accs_path = os.path.join(run_path, 'posthoc_ensemble_from_stored.yml')
stored_accs_path = os.path.join(run_path, f'posthoc_ensemble_from_stored_filtered.yml')
if not os.path.exists(stored_accs_path):
stored_accs_path = os.path.join(run_path, f'posthoc_ensemble_from_stored.yml')
loaded_data = yaml.safe_load(open(stored_accs_path))
test = np.array(loaded_data[dataset_type])
flops = np.array(loaded_data['flops'])
experiment_and_datatype_to_seed_to_obj0[experiment_name + '_' + dataset_type + postfix][run_idx] = test
experiment_and_datatype_to_seed_to_obj1[experiment_name + '_' + dataset_type + postfix][run_idx] = flops
n_seeds_cur += 1
n_seeds.append(n_seeds_cur)
else:
experiment_path = os.path.join(nsga_path, experiment_name)
n_seeds_cur = 0
for run_folder in sorted(os.scandir(experiment_path), key=lambda e: e.name):
if not run_folder.is_dir():
continue
run_idx = int(run_folder.name)
if target_runs is not None and run_idx not in target_runs:
continue
run_path = os.path.join(experiment_path, str(run_idx))
data = yaml.safe_load(open(os.path.join(run_path, 'data.yml')))
test = data['test']
flops = data['flops']
experiment_and_datatype_to_seed_to_obj0[experiment_name + '_' + dataset_type + postfix][run_idx] = test
experiment_and_datatype_to_seed_to_obj1[experiment_name + '_' + dataset_type + postfix][run_idx] = flops
n_seeds_cur += 1
n_seeds.append(n_seeds_cur)
n_seeds = min(n_seeds)
image_paths = []
map_exp_names = {'cifar10_r0_proxyless_sep': 'NAT + ProxylessNAS',
'cifar10_r0_ofa10_sep': 'NAT + OFA-w1.0',
'cifar10_r0_ofa12_sep': 'NAT + OFA-w1.2',
'cifar10_r0_attn_sep': 'NAT + AttentiveNAS',
'cifar10_r0_alpha_sep': 'NAT + AlphaNet',
'cifar10_reproducenat': 'NAT (reproduced)',
'cifar100_r0_proxyless_sep': 'NAT + ProxylessNAS',
'cifar100_r0_ofa10_sep': 'NAT + OFA-w1.0',
'cifar100_r0_ofa12_sep': 'NAT + OFA-w1.2',
'cifar100_r0_attn_sep': 'NAT + AttentiveNAS',
'cifar100_r0_alpha_sep': 'NAT + AlphaNet',
'cifar100_reproducenat': 'NAT (reproduced)',
'imagenet_r0_proxyless_sep': 'NAT + ProxylessNAS',
'imagenet_r0_ofa10_sep': 'NAT + OFA-w1.0',
'imagenet_r0_ofa12_sep': 'NAT + OFA-w1.2',
'imagenet_r0_attn_sep': 'NAT + AttentiveNAS',
'imagenet_r0_alpha_sep': 'NAT + AlphaNet',
}
# map_exp_names = {}
# experiment_names_pretty = [map_exp_names.get(name, name).replace('+', '\n+') for name in experiment_names]
experiment_names_pretty = [map_exp_names.get(name, name) for name in experiment_names]
def set_lims(exp_name):
if 'cifar100' in exp_name:
pass
# plt.xlim(0, 3800)
# plt.xlim(0, 2750)
# plt.xlim(0, 2200)
# plt.ylim(75, 90)
# plt.xscale('log')
elif 'cifar10' in exp_name:
pass
# plt.xlim(0, 2750)
# plt.xlim(0, 3700)
# plt.xlim(0, 2200)
# plt.ylim(95, 99)
elif 'imagenet' in exp_name:
pass
# plt.xlim(100, 2100)
# plt.ylim(77, 83)
# plt.xlim(left=200, right=2800)
# plt.xscale('log')
plt.ylabel('Accuracy')
if kwargs.get('if_log_scale_x', False):
plt.xscale('log')
plt.xlabel('Avg. MFLOPS - log scale')
markers = ['-o', '-X', '-+', '-_']
if_add_dataset_type_to_label = True
if_show_title = True
if_save_as_pdf = kwargs.get('pdf', False)
if not if_plot_many_in_one: # either make separate plots (this if-branch), or plot shaded area (the other branch)
for seed in range(n_seeds):
plt.figure(figsize=(10, 8))
cur_marker_idx = 0
for i, (experiment_name, dataset_type) in enumerate(zip(experiment_names, dataset_types)):
postfix = '' if algo_names is None else algo_names[i]
obj0 = experiment_and_datatype_to_seed_to_obj0[experiment_name + '_' + dataset_type + postfix][seed]
obj1 = experiment_and_datatype_to_seed_to_obj1[experiment_name + '_' + dataset_type + postfix][seed]
name_postfix = (f' ({dataset_type})' + postfix) if if_add_dataset_type_to_label else ''
plt.plot(obj1, obj0, markers[cur_marker_idx], label=experiment_names_pretty[i] + name_postfix)#, alpha=0.7)
# plt.annotate(r"$\bf{" + f'{obj0[-1]:.2f}' + '}$', xy=(obj1[-1], obj0[-1]), xytext=annotation_shifts[i],
# textcoords='offset points')
cur_marker_idx = (cur_marker_idx + 1) % len(markers)
set_lims(experiment_names[0])
plt.xlabel('FLOPS')
if if_show_title:
plt.title(f'{dataset_types}, {seed=}')
# plt.title(f'Performance on {dataset_type}')
plt.subplots_adjust(bottom=0.3)
plt.legend(bbox_to_anchor=(-0.1, -0.1), mode="expand")
# plt.legend(bbox_to_anchor=(1.12, -0.15), ncol=2)
im_path = os.path.join(tmp_path, f'{seed}.png')
plt.savefig(im_path, bbox_inches='tight', pad_inches=0)
image_paths.append(im_path)
plt.show()
else:
markers = ['-o', '-s', '-X', '-+', '-v', '-^', '-<', '->', '-D']
cur_marker_idx = 0
plt.figure(figsize=(6.6, 6.6))
for i_exp, (experiment_name, dataset_type) in enumerate(zip(experiment_names, dataset_types)):
postfix = '' if algo_names is None else algo_names[i_exp]
obj0_all, obj1_all, test_hv_all = [], [], []
for seed in range(n_seeds): # reversed because wanna plot 0-th seed 5 lines below
obj0 = np.array(experiment_and_datatype_to_seed_to_obj0[experiment_name + '_' + dataset_type + postfix][seed])
obj1 = np.array(experiment_and_datatype_to_seed_to_obj1[experiment_name + '_' + dataset_type + postfix][seed])
obj0_all.append(obj0)
obj1_all.append(obj1)
# compute test hypervolume
worst_top1_err, worst_flops = 40, 4000
ref_pt = np.array([worst_top1_err, worst_flops])
test_hv = utils.compute_hypervolume(ref_pt, np.column_stack([100 - obj0, obj1]), if_increase_ref_pt=False)
test_hv_all.append(test_hv)
idx_median = np.argsort(test_hv_all)[len(test_hv_all) // 2]
print(f'{idx_median=}')
legend_labels = kwargs.get('legend_labels', [postfix] * len(experiment_names))
postfix = legend_labels[i_exp]
# print(f'{obj1_all[idx_median]=}')
plt.plot(obj1_all[idx_median], obj0_all[idx_median], markers[cur_marker_idx], label=experiment_names_pretty[i_exp] if postfix == '' else postfix)
cur_marker_idx = (cur_marker_idx + 1) % len(markers)
if 'logs_classification' not in experiment_name:
obj0_all = list(itertools.chain(*obj0_all))
obj1_all = list(itertools.chain(*obj1_all))
idx_sort_flops = np.argsort(obj1_all)
obj0_all = np.array(obj0_all)[idx_sort_flops]
obj1_all = np.array(obj1_all)[idx_sort_flops]
objs_all_for_pareto = np.vstack((100 - obj0_all, obj1_all)).T
idx_pareto = is_pareto_efficient(objs_all_for_pareto)
pareto = objs_all_for_pareto[idx_pareto].T
# by "antipareto" I mean the bottom edge of the point set; in this terminology, all the points lie above antipareto and below pareto
objs_all_for_antipareto = np.vstack((obj0_all, -obj1_all)).T
idx_antipareto = is_pareto_efficient(objs_all_for_antipareto)
antipareto = objs_all_for_antipareto[idx_antipareto].T
fill_x = np.append(pareto[1], -antipareto[1][::-1])
fill_y = np.append(100 - pareto[0], antipareto[0][::-1])
plt.fill(fill_x, fill_y, alpha=0.5)
plt.xlabel('Avg. MFLOPS')
set_lims(experiment_names[0])
plt.legend()
name = kwargs.get('out_name', f'{str(experiment_names).replace(r"/", "_")[:100]}')
im_path = os.path.join(tmp_path, name + ('.pdf' if if_save_as_pdf else '.png'))
plt.savefig(im_path, bbox_inches='tight', pad_inches=0.01)#, dpi=300)
image_paths.append(im_path)
plt.show()
if not if_save_as_pdf:
w, h = Image.open(image_paths[0]).size
open_or_create_image = lambda path: Image.new(mode='RGB', size=(w, h)) if path is None else Image.open(path)
else:
open_or_create_image = lambda path: convert_from_path(path)[0]
ims = [open_or_create_image(p) for p in image_paths]
grid_im = images_list_to_grid_image(ims, if_draw_grid=True, n_rows=1)
grid_im.save(os.path.join(tmp_path, f'grid_many_experiments_{dataset_types}_{hashlib.sha256(str(experiment_names).encode("utf-8")).hexdigest()}.png'))
if kwargs.get('print_median_run_flops_and_accs', False): # this is for named models in the appendix
assert len(experiment_names) == 1
algo_to_seed_to_result = get_test_metrics(experiment_names[0], dataset_types[0], max_iter=max_iters[0], algo_name=algo_names[0])
print(experiment_names[0], dataset_types[0], max_iters[0], algo_names[0])
seed_to_result = algo_to_seed_to_result[algo_names[0]]
metrics = seed_to_result[idx_median]
flops = [int(x) for x in metrics['flops']]
accs = metrics[dataset_type].tolist()
print(f'{list(zip(flops, accs))=}')
print_test_metrics_best_mean_and_std_many(experiment_names, dataset_types, max_iters, algo_names, target_runs=target_runs)
def combine_runs_make_image(experiment_path, algo_name_to_seed_to_image_path, dataset_type, out_img_name_lambda, **kwargs):
image_paths = []
print(f'{algo_name_to_seed_to_image_path=}')
for seed_to_image_path in algo_name_to_seed_to_image_path.values():
print(f'{seed_to_image_path=}')
image_paths += list(seed_to_image_path.values())
w, h = Image.open(image_paths[0]).size
open_or_create_image = lambda path: Image.new(mode='RGB', size=(w, h)) if path is None else Image.open(path)
ims = [open_or_create_image(p) for p in image_paths]
grid_im = images_list_to_grid_image(ims, if_draw_grid=True, n_rows=len(algo_name_to_seed_to_image_path))
grid_im.save(os.path.join(experiment_path, out_img_name_lambda(dataset_type)))
def compare_val_and_test(experiment_name, dataset_type='test', **kwargs):
dataset_postfix = kwargs.get('dataset_postfix', '')
utils.execute_func_for_all_runs_and_combine(experiment_name, eval_cumulative_pareto_front_single_run,
func_combine=combine_runs_make_image,
dataset_type=dataset_type,
out_img_name_lambda=lambda dataset_type: f'grid_best_pareto_val_and_{dataset_type}{dataset_postfix}.png',
**kwargs)
def read_cumulative_pareto_front_metrics_single_run(experiment_path, dataset_type='test', **kwargs):
max_iter = kwargs.get('max_iter', 15) # 15 # 30
max_iter_path = os.path.join(nsga_logs_path, experiment_path, f'iter_{max_iter}')
if_post_hoc_ensemble = 'posthoc' in experiment_path and 'posthocheavy' not in experiment_path
save_path_base = experiment_path if if_post_hoc_ensemble else max_iter_path
save_output_path = os.path.join(save_path_base, f'best_pareto_val_and_{dataset_type}.json')
if not Path(save_output_path).is_file():
if if_post_hoc_ensemble:
# the path will be different if ensemble was evaluated from stored outputs
save_output_path = os.path.join(save_path_base, f'posthoc_ensemble_from_stored_filtered.yml')
if not os.path.exists(save_output_path):
save_output_path = os.path.join(save_path_base, f'posthoc_ensemble_from_stored.yml')
loaded_data = yaml.safe_load(open(save_output_path))
else:
raise FileNotFoundError(save_output_path)
else:
loaded_data = json.load(open(save_output_path))
loaded_data_acc = np.array(loaded_data[dataset_type])
if dataset_type == 'val': # crutches beget crutches beget crutches...
loaded_data_acc = 100 - loaded_data_acc
return {'test': loaded_data_acc, 'flops': loaded_data['flops']}
def get_test_metrics(experiment_name, dataset_type='test', **kwargs):
algo_name_to_seed_to_result = utils.execute_func_for_all_runs_and_combine(experiment_name,
read_cumulative_pareto_front_metrics_single_run,
dataset_type=dataset_type,
**kwargs)
return algo_name_to_seed_to_result
def get_test_metrics_best_mean_and_std(experiment_name, dataset_type='test', max_iter=15, algo_name=None, **kwargs):
if 'logs_classification' not in experiment_name and 'segmentation_logs' not in experiment_name:
algo_name_to_seed_to_result = utils.execute_func_for_all_runs_and_combine(experiment_name, read_cumulative_pareto_front_metrics_single_run,
dataset_type=dataset_type, max_iter=max_iter, **kwargs, target_algos=algo_name)
if algo_name is None:
algo_name = list(algo_name_to_seed_to_result.keys())[0]
seed_to_result = algo_name_to_seed_to_result[algo_name]
test_metrics = seed_to_result.values()
else:
nsga_path = utils.NAT_PATH
test_metrics = []
for seed_dir in sorted(os.scandir(os.path.join(nsga_path, experiment_name)), key=lambda e: e.name):
data = yaml.safe_load(open(os.path.join(nsga_path, experiment_name, seed_dir.name, 'data.yml')))
test_metrics.append({'test': data['test'], 'flops': data['flops']})
def mean_and_std_for_max(ar):
best = [np.max(x) for x in ar]
mean, std = np.mean(best), np.std(best)
return mean, std
def mean_and_std_for_last(ar):
last = [x[-1] for x in ar]
mean, std = np.mean(last), np.std(last)
return mean, std
def compute_hypervolume(dict_metrics):
test = np.array(dict_metrics['test'])
flops = np.array(dict_metrics['flops'])
worst_top1_err, worst_flops = 40, 4000
ref_pt = np.array([worst_top1_err, worst_flops])
test_hv = utils.compute_hypervolume(ref_pt, np.column_stack([100 - test, flops]), if_increase_ref_pt=False)
return test_hv
def mean_and_std(ar):
return np.mean(ar), np.std(ar)
return {'test': mean_and_std_for_last([x['test'] for x in test_metrics]),
'flops': mean_and_std_for_max([x['flops'] for x in test_metrics]),
'hv': mean_and_std([compute_hypervolume(x) for x in test_metrics])
}, len(test_metrics)
def print_test_metrics_best_mean_and_std(experiment_name, dataset_type='test', max_iter=15, algo_name=None, **kwargs):
means_and_stds, n_seeds = get_test_metrics_best_mean_and_std(experiment_name, dataset_type, max_iter, algo_name, **kwargs)
print(f'{experiment_name} ({dataset_type}): '
f'{means_and_stds["hv"][0]:.3f} ± {means_and_stds["hv"][1]:.3f} ; '
f'{means_and_stds["test"][0]:.2f} ± {means_and_stds["test"][1]:.2f} ; '
f'{int(means_and_stds["flops"][0])} ± {int(means_and_stds["flops"][1])} ({n_seeds} seeds)'
)
def print_test_metrics_best_mean_and_std_many(experiment_names, dataset_type, max_iters, algo_names, **kwargs):
if not type(dataset_type) is list:
dataset_type = [dataset_type] * len(experiment_names)
if algo_names is None:
algo_names = [None] * len(experiment_names)
for experiment_name, dataset_type_cur, max_iter, algo_name in zip(experiment_names, dataset_type, max_iters, algo_names):
print_test_metrics_best_mean_and_std(experiment_name, dataset_type_cur, max_iter, algo_name, **kwargs)
# break
def compute_hypervolumes(experiment_name, dataset_type='test', max_iter=15, algo_name=None, **kwargs):
if 'logs_classification' not in experiment_name and 'segmentation_logs' not in experiment_name:
algo_name_to_seed_to_result = utils.execute_func_for_all_runs_and_combine(experiment_name,
read_cumulative_pareto_front_metrics_single_run,
dataset_type=dataset_type,
max_iter=max_iter, **kwargs,
target_algos=algo_name)
if algo_name is None:
algo_name = list(algo_name_to_seed_to_result.keys())[0]
seed_to_result = algo_name_to_seed_to_result[algo_name]
test_metrics = seed_to_result.values()
else:
nsga_path = utils.NAT_PATH
test_metrics = []
for seed_dir in sorted(os.scandir(os.path.join(nsga_path, experiment_name)), key=lambda e: e.name):
data = yaml.safe_load(open(os.path.join(nsga_path, experiment_name, seed_dir.name, 'data.yml')))
test_metrics.append({'test': data['test'], 'flops': data['flops']})
def compute_hypervolume(dict_metrics):
test = np.array(dict_metrics['test'])
flops = np.array(dict_metrics['flops'])
worst_top1_err, worst_flops = 40, 4000
ref_pt = np.array([worst_top1_err, worst_flops])
test_hv = utils.compute_hypervolume(ref_pt, np.column_stack([100 - test, flops]), if_increase_ref_pt=False)
return test_hv
if not kwargs.get('if_return_max_accs', False):
return [compute_hypervolume(x) for x in test_metrics]
else:
return [compute_hypervolume(x) for x in test_metrics], [x['test'][-1] for x in test_metrics]
def print_hypervolumes_many(experiment_names, dataset_type, max_iters, algo_names, **kwargs):
if not type(dataset_type) is list:
dataset_type = [dataset_type] * len(experiment_names)
if algo_names is None:
algo_names = [None] * len(experiment_names)
if not type(max_iters) is list:
max_iters = [max_iters] * len(experiment_names)
for experiment_name, dataset_type_cur, max_iter, algo_name in zip(experiment_names, dataset_type, max_iters, algo_names):
hvs = compute_hypervolumes(experiment_name, dataset_type_cur, max_iter, algo_name, **kwargs)
print(f'{experiment_name} HV: {hvs}')
def plot_hypervolumes_impact_n_supernets(experiment_names, dataset_type, max_iters, algo_names, supernet_numbers,
set_xticks, label, **kwargs):
if not type(dataset_type) is list:
dataset_type = [dataset_type] * len(experiment_names)
if algo_names is None:
algo_names = [None] * len(experiment_names)
if not type(max_iters) is list:
max_iters = [max_iters] * len(experiment_names)
means = []
stds = []
for experiment_name, dataset_type_cur, max_iter, algo_name in zip(experiment_names, dataset_type, max_iters, algo_names):
hvs = compute_hypervolumes(experiment_name, dataset_type_cur, max_iter, algo_name, **kwargs)
mean, std = np.mean(hvs), np.std(hvs)
print(f'n seeds = {len(hvs)}')
means.append(mean)
stds.append(std)
if set_xticks:
plt.xticks(supernet_numbers)
plt.errorbar(supernet_numbers, means, yerr=stds, capsize=5, label=label)
def get_hypervolumes_and_max_accs_for_stat_testing(experiment_names, dataset_type, max_iters, algo_names, **kwargs):
if not type(dataset_type) is list:
dataset_type = [dataset_type] * len(experiment_names)
if algo_names is None:
algo_names = [None] * len(experiment_names)
if not type(max_iters) is list:
max_iters = [max_iters] * len(experiment_names)
all_hvs = []
all_max_accs = []
for experiment_name, dataset_type_cur, max_iter, algo_name in zip(experiment_names, dataset_type, max_iters, algo_names):
hvs, max_accs = compute_hypervolumes(experiment_name, dataset_type_cur, max_iter, algo_name, if_return_max_accs=True, **kwargs)
print(f'n seeds = {len(hvs)}')
all_hvs.append(hvs)
all_max_accs.append(max_accs)
return all_hvs, all_max_accs
| 35,222 | 52.287443 | 161 |
py
|
ENCAS
|
ENCAS-main/search_space/ensemble_ss.py
|
import itertools
class EnsembleSearchSpace:
def __init__(self, ss_names_list, ss_kwargs_list):
from search_space import make_search_space
self.search_spaces = [make_search_space(ss_name, **ss_kwargs)
for ss_name, ss_kwargs in zip(ss_names_list, ss_kwargs_list)]
self.n_ss = len(self.search_spaces)
def sample(self, n_samples=1):
return list(zip([ss.sample(n_samples) for ss in self.search_spaces]))
def initialize(self, n_doe):
return list(zip(*[ss.initialize(n_doe) for ss in self.search_spaces]))
def encode(self, configs, if_return_separate=False):
# returns concatenated encoding of all the configs as a single flat list
encoded_configs = [ss.encode(config) for ss, config in zip(self.search_spaces, configs)]
if if_return_separate:
return encoded_configs
encoded = list(itertools.chain(*encoded_configs))
return encoded
def decode(self, enc_configs):
# takes configs concatenated to a single string
# returns a list of configs, each of which is a dictionary
enc_configs_separated = []
for ss in self.search_spaces:
enc_configs_part = enc_configs[:ss.encoded_length]
enc_configs = enc_configs[ss.encoded_length:]
enc_configs_separated.append(enc_configs_part)
decoded = [ss.decode(config) for ss, config in zip(self.search_spaces, enc_configs_separated)]
return decoded
| 1,496 | 41.771429 | 102 |
py
|
ENCAS
|
ENCAS-main/search_space/ofa_ss.py
|
import numpy as np
import random
import utils
class OFASearchSpace:
def __init__(self, alphabet='2', **kwargs):
self.name = 'ofa'
self.num_blocks = 5
self.encoded_length = 22 #needed for decoding an ensemble
self.if_cascade = False
self.positions = [None]
self.thresholds = [None]
if alphabet == 'full_nat':
self.kernel_size = [3, 5, 7] # depth-wise conv kernel size
self.exp_ratio = [3, 4, 6] # expansion rate
self.depth = [2, 3, 4] # number of Inverted Residual Bottleneck layers repetition
self.resolution = list(range(192, 257, 4)) # input image resolutions
self.width_mult = [1.0, 1.2]
elif alphabet == 'full_nat_w10':
self.kernel_size = [3, 5, 7]
self.exp_ratio = [3, 4, 6]
self.depth = [2, 3, 4]
self.resolution = list(range(192, 257, 4))
self.width_mult = [1.0]
elif alphabet == 'full_nat_w12':
self.kernel_size = [3, 5, 7]
self.exp_ratio = [3, 4, 6]
self.depth = [2, 3, 4]
self.resolution = list(range(192, 257, 4))
self.width_mult = [1.2]
elif alphabet in ['full_nat_w12_cascade2', 'full_nat_w12_cascade5']: # size of the cascade is passed in kwargs
self.if_cascade = True
self.cascade_size = kwargs['ensemble_size']
self.positions = list(range(self.cascade_size))
self.n_thredsholds = len(utils.threshold_gene_to_value)
self.threshold_value_to_gene = {v: k for k, v in utils.threshold_gene_to_value.items()}
self.thresholds = [utils.threshold_gene_to_value[i] for i in range(self.n_thredsholds)]
self.encoded_length += 2 # position, threshold
self.kernel_size = [3, 5, 7]
self.exp_ratio = [3, 4, 6]
self.depth = [2, 3, 4]
self.resolution = list(range(192, 257, 4))
self.width_mult = [1.2]
elif alphabet == 'full_nat_w10_cascade5':
self.if_cascade = True
self.cascade_size = kwargs['ensemble_size']
self.positions = list(range(self.cascade_size))
self.n_thredsholds = len(utils.threshold_gene_to_value)
self.threshold_value_to_gene = {v: k for k, v in utils.threshold_gene_to_value.items()}
self.thresholds = [utils.threshold_gene_to_value[i] for i in range(self.n_thredsholds)]
self.encoded_length += 2 # position, threshold
self.kernel_size = [3, 5, 7]
self.exp_ratio = [3, 4, 6]
self.depth = [2, 3, 4]
self.resolution = list(range(192, 257, 4))
self.width_mult = [1.0]
else:
raise ValueError(f'Unknown alphabet "{alphabet}"')
def sample(self, n_samples=1, nb=None, ks=None, e=None, d=None, r=None, w=None, p=None, t=None):
""" randomly sample a architecture"""
nb = self.num_blocks if nb is None else nb
ks = self.kernel_size if ks is None else ks
e = self.exp_ratio if e is None else e
d = self.depth if d is None else d
r = self.resolution if r is None else r
w = self.width_mult if w is None else w
p = self.positions if p is None else p
t = self.thresholds if t is None else t
data = []
for n in range(n_samples):
# first sample layers
depth = np.random.choice(d, nb, replace=True).tolist()
# then sample kernel size, expansion rate and resolution
kernel_size = np.random.choice(ks, size=int(np.sum(depth)), replace=True).tolist()
exp_ratio = np.random.choice(e, size=int(np.sum(depth)), replace=True).tolist()
resolution = int(np.random.choice(r))
width = np.random.choice(w)
arch = {'ks': kernel_size, 'e': exp_ratio, 'd': depth, 'r': resolution, 'w':width}
if self.if_cascade:
arch['position'] = random.choice(p)
arch['threshold'] = random.choice(t)
while arch in data:
# first sample layers
depth = np.random.choice(d, nb, replace=True).tolist()
# then sample kernel size, expansion rate and resolution
kernel_size = np.random.choice(ks, size=int(np.sum(depth)), replace=True).tolist()
exp_ratio = np.random.choice(e, size=int(np.sum(depth)), replace=True).tolist()
resolution = int(np.random.choice(r))
width = np.random.choice(w)
arch = {'ks': kernel_size, 'e': exp_ratio, 'd': depth, 'r': resolution, 'w': width}
if self.if_cascade:
arch['position'] = random.choice(p)
arch['threshold'] = random.choice(t)
data.append(arch)
return data
def initialize(self, n_doe):
# sample one arch with least (lb of hyperparameters) and most complexity (ub of hyperparameters)
# print('Achtung! Add best NAT subnet to the initialization!')
data = [
self.sample(1, ks=[min(self.kernel_size)], e=[min(self.exp_ratio)], d=[min(self.depth)],
r=[min(self.resolution)], w=[min(self.width_mult)], p=[min(self.positions)],
t=[min(self.thresholds)])[0],
self.sample(1, ks=[max(self.kernel_size)], e=[max(self.exp_ratio)], d=[max(self.depth)],
r=[max(self.resolution)], w=[max(self.width_mult)], p=[max(self.positions)],
t=[max(self.thresholds)])[0],
# self.sample(1, ks= [7, 7, 7, 7, 7, 3, 7, 5, 7, 7, 7, 3, 7, 7, 7, 3],
# e= [3, 3, 6, 4, 6, 4, 3, 3, 6, 4, 6, 6, 6, 6, 3, 3],
# d=[2, 2, 4, 4, 4],
# r=[224], w=[1.2])[0]
]
data.extend(self.sample(n_samples=n_doe - 2))
return data
def pad_zero(self, x, depth):
# pad zeros to make bit-string of equal length
new_x, counter = [], 0
for d in depth:
for _ in range(d):
new_x.append(x[counter])
counter += 1
if d < max(self.depth):
new_x += [0] * (max(self.depth) - d)
return new_x
def encode(self, config):
"""
values of architecture parameters -> their indices
"""
layer_choices = {'[3 3]': 1, '[3 5]': 2, '[3 7]': 3,
'[4 3]': 4, '[4 5]': 5, '[4 7]': 6,
'[6 3]': 7, '[6 5]': 8, '[6 7]': 9, '[None None]': 0}
kernel_size = self.pad_zero(config['ks'], config['d'])
exp_ratio = self.pad_zero(config['e'], config['d'])
r = np.where(np.array(self.resolution) == config["r"])[0][0]
w = np.where(np.array(self.width_mult) == config["w"])[0][0]
layers = [0] * (self.num_blocks * max(self.depth))
for i, d in enumerate(config['d']):
for j in range(d):
idx = i * max(self.depth) + j
key = '[{} {}]'.format(exp_ratio[idx], kernel_size[idx])
layers[idx] = layer_choices[key]
layers = [r] + [w] + layers
if self.if_cascade:
pos = config['position']
th = config['threshold']
layers += [pos, self.threshold_value_to_gene[th]]
return layers
def decode(self, _layers):
"""
indices of values of architecture parameters -> actual values
"""
if type(_layers) is np.ndarray:
_layers = _layers.flatten()
cfg_choices = {1: (3, 3), 2: (3, 5), 3: (3, 7),
4: (4, 3), 5: (4, 5), 6: (4, 7),
7: (6, 3), 8: (6, 5), 9: (6, 7), 0: (None, None)}
depth, kernel_size, exp_ratio = [], [], []
resolution, width_mult = self.resolution[_layers[0]], self.width_mult[_layers[1]]
d = 0
layers = _layers[2:]
if self.if_cascade:
pos = int(layers[-2])
th = float(utils.threshold_gene_to_value[layers[-1]])
layers = layers[:-2]
for i, l in enumerate(layers):
e, ks = cfg_choices[l]
if (ks is not None) and (e is not None):
kernel_size.append(ks)
exp_ratio.append(e)
d += 1
if (i + 1) % max(self.depth) == 0:
if l != 0 and layers[i - 1] == 0:
# non-skip layer cannot follow skip layer
# we know the first 2 layers are non-skip, so we just need to check the 3rd one
# if it is 0, remove the current one
d -= 1
kernel_size = kernel_size[:-1]
exp_ratio = exp_ratio[:-1]
depth.append(d)
d = 0
config = {'ks': kernel_size, 'e': exp_ratio, 'd': depth, 'r': resolution, 'w': width_mult}
if self.if_cascade:
config['position'] = pos
config['threshold'] = th
return config
| 9,126 | 42.669856 | 118 |
py
|
ENCAS
|
ENCAS-main/search_space/alphanet_ss.py
|
from copy import copy
import numpy as np
import yaml
import utils
from utils import RecursiveNamespace, alphanet_config_str
class AlphaNetSearchSpace:
def __init__(self, alphabet, **kwargs):
self.supernet_config = RecursiveNamespace(**yaml.safe_load(alphanet_config_str))
self.supernet_config_dict = yaml.safe_load(alphanet_config_str)
self.if_cascade = False
if alphabet == 'full_alphanet':
self.resolutions = [192, 224, 256, 288]
self.min_config = [0] * 28
self.max_config = [len(self.resolutions) - 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 2, 1, 3, 1, 2, 2, 5, 1, 2, 3, 5, 1, 1, 1, 1, 1]
elif alphabet in ['full_alphanet_cascade2', 'full_alphanet_cascade5']: # size of the cascade is passed in kwargs
self.if_cascade = True
cascade_size = kwargs['ensemble_size']
self.positions = list(range(cascade_size))
n_thredsholds = len(utils.threshold_gene_to_value)
self.threshold_value_to_gene = {v: k for k, v in utils.threshold_gene_to_value.items()}
self.resolutions = [192, 224, 256, 288]
self.min_config = [0] * (28 + 2)
self.max_config = [len(self.resolutions) - 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 2, 1, 3, 1, 2, 2, 5, 1, 2, 3,
5, 1, 1, 1, 1, 1, cascade_size - 1, n_thredsholds - 1]
else:
raise NotImplementedError()
self.name = 'alphanet'
self.encoded_length = len(self.max_config) #needed for decoding an ensemble
def sample(self, n_samples=1):
data = []
sampled_archive = set()
for n in range(n_samples):
# sample random encoding in range from all zeroes to max_config, then decode it
sampled_encoding = [np.random.randint(lower_val, upper_val + 1)
for lower_val, upper_val in zip(self.min_config, self.max_config)]
while tuple(sampled_encoding) in sampled_archive:
sampled_encoding = [np.random.randint(lower_val, upper_val + 1)
for lower_val, upper_val in zip(self.min_config, self.max_config)]
data.append(self.decode(sampled_encoding))
sampled_archive.add(tuple(sampled_encoding))
return data
def initialize(self, n_doe):
# init with smallest & largest possible subnets + random ones
data = [
self.decode(self.min_config),
self.decode(self.max_config)
]
data.extend(self.sample(n_samples=n_doe - 2))
return data
def encode(self, config):
"""
values to their indices
"""
r = self.resolutions.index(config['r'])
layers = [r]
# first conv
layers.append(self.supernet_config.first_conv.c.index(config['w'][0]))
# blocks
for i, (w, d, k, e) in enumerate(zip(config['w'][1:-1], config['d'], config['ks'], config['e'])):
layers.append(self.supernet_config_dict[f'mb{i+1}']['c'].index(w))
layers.append(self.supernet_config_dict[f'mb{i+1}']['d'].index(d))
layers.append(self.supernet_config_dict[f'mb{i+1}']['k'].index(k))
# blocks mb1, mb6, mb7 have a single possible value for expansion rates => don't encode
if i not in [0, 5, 6]:
layers.append(self.supernet_config_dict[f'mb{i + 1}']['t'].index(e))
# last conv
layers.append(self.supernet_config.last_conv.c.index(config['w'][-1]))
if self.if_cascade:
layers.append(int(config['position'])) # encoding == value itself
layers.append(int(self.threshold_value_to_gene[config['threshold']]))
return layers
def decode(self, enc_conf):
"""
transform list of choice indices to 4 lists of actual choices; all equal len except width that has 2 more values
(note that variables with a single choice are not encoded; their values are simply added here)
"""
if type(enc_conf) is np.ndarray:
enc_conf = list(enc_conf.flatten())
enc_conf = copy(enc_conf)
depth, kernel_size, exp_ratio, width = [], [], [], []
resolution = self.resolutions[enc_conf.pop(0)]
# first conv
width.append(self.supernet_config.first_conv.c[enc_conf.pop(0)])
# blocks (code is not pretty, but I think writing it as a cycle would've been uglier)
width.append(self.supernet_config.mb1.c[enc_conf.pop(0)])
depth.append(self.supernet_config.mb1.d[enc_conf.pop(0)])
kernel_size.append(self.supernet_config.mb1.k[enc_conf.pop(0)])
exp_ratio.append(1)
width.append(self.supernet_config.mb2.c[enc_conf.pop(0)])
depth.append(self.supernet_config.mb2.d[enc_conf.pop(0)])
kernel_size.append(self.supernet_config.mb2.k[enc_conf.pop(0)])
exp_ratio.append(self.supernet_config.mb2.t[enc_conf.pop(0)])
width.append(self.supernet_config.mb3.c[enc_conf.pop(0)])
depth.append(self.supernet_config.mb3.d[enc_conf.pop(0)])
kernel_size.append(self.supernet_config.mb3.k[enc_conf.pop(0)])
exp_ratio.append(self.supernet_config.mb3.t[enc_conf.pop(0)])
width.append(self.supernet_config.mb4.c[enc_conf.pop(0)])
depth.append(self.supernet_config.mb4.d[enc_conf.pop(0)])
kernel_size.append(self.supernet_config.mb4.k[enc_conf.pop(0)])
exp_ratio.append(self.supernet_config.mb4.t[enc_conf.pop(0)])
width.append(self.supernet_config.mb5.c[enc_conf.pop(0)])
depth.append(self.supernet_config.mb5.d[enc_conf.pop(0)])
kernel_size.append(self.supernet_config.mb5.k[enc_conf.pop(0)])
exp_ratio.append(self.supernet_config.mb5.t[enc_conf.pop(0)])
width.append(self.supernet_config.mb6.c[enc_conf.pop(0)])
depth.append(self.supernet_config.mb6.d[enc_conf.pop(0)])
kernel_size.append(self.supernet_config.mb6.k[enc_conf.pop(0)])
exp_ratio.append(6)
width.append(self.supernet_config.mb7.c[enc_conf.pop(0)])
depth.append(self.supernet_config.mb7.d[enc_conf.pop(0)])
kernel_size.append(self.supernet_config.mb7.k[enc_conf.pop(0)])
exp_ratio.append(6)
# last conv
width.append(self.supernet_config.last_conv.c[enc_conf.pop(0)])
config = {'r': resolution, 'w': width, 'd': depth, 'ks': kernel_size, 'e': exp_ratio}
if self.if_cascade:
config['position'] = int(enc_conf.pop(0))
config['threshold'] = float(utils.threshold_gene_to_value[enc_conf.pop(0)])
if len(enc_conf) != 0:
raise AssertionError('not the whole config was used')
return config
if __name__ == '__main__':
ss = AlphaNetSearchSpace('full_alphanet')
conf = {'r': 288, 'w': [24, 24, 32, 40, 72, 128, 216, 224, 1984], 'd': [2, 5, 6, 6, 8, 8, 2],
'ks': [5, 5, 5, 5, 5, 5, 5], 'e': [1, 6, 6, 6, 6, 6, 6]}
encoded = ss.encode(conf)
print(f'{encoded=}')
decoded = ss.decode(encoded)
print(f'{decoded=}')
conf = {'r': 288, 'w': [16, 16, 24, 32, 64, 112, 192, 216, 1792], 'd': [2, 5, 6, 6, 8, 8, 2],
'ks': [5, 5, 5, 5, 5, 5, 5], 'e': [1, 6, 6, 6, 6, 6, 6]}
encoded = ss.encode(conf)
print(f'{encoded=}')
decoded = ss.decode(encoded)
print(f'{decoded=}')
decoded_zeros = ss.decode([0] * 28)
print(f'{decoded_zeros=}')
| 7,457 | 42.109827 | 138 |
py
|
ENCAS
|
ENCAS-main/search_space/__init__.py
|
from .ofa_ss import OFASearchSpace
from .alphanet_ss import AlphaNetSearchSpace
from .proxyless_ss import ProxylessSearchSpace
_name_to_class_dict = {'ofa': OFASearchSpace, 'alphanet': AlphaNetSearchSpace, 'proxyless': ProxylessSearchSpace}
def make_search_space(name, **kwargs):
return _name_to_class_dict[name](**kwargs)
| 328 | 40.125 | 113 |
py
|
ENCAS
|
ENCAS-main/search_space/proxyless_ss.py
|
import numpy as np
import random
import utils
class ProxylessSearchSpace:
def __init__(self, alphabet='2', **kwargs):
self.name = 'proxyless'
self.num_blocks = 5
self.encoded_length = 22 #needed for decoding an ensemble
self.if_cascade = False
self.positions = [None]
self.thresholds = [None]
if alphabet == 'full_nat_proxyless':
self.kernel_size = [3, 5, 7] # depth-wise conv kernel size
self.exp_ratio = [3, 4, 6] # expansion rate
self.depth = [2, 3, 4] # number of Inverted Residual Bottleneck layers repetition
self.resolution = list(range(192, 257, 4)) # input image resolutions
self.width_mult = [1.3]
elif alphabet == 'full_nat_proxyless_cascade5':
self.if_cascade = True
self.cascade_size = kwargs['ensemble_size']
self.positions = list(range(self.cascade_size))
self.n_thredsholds = len(utils.threshold_gene_to_value)
self.threshold_value_to_gene = {v: k for k, v in utils.threshold_gene_to_value.items()}
self.thresholds = [utils.threshold_gene_to_value[i] for i in range(self.n_thredsholds)]
self.encoded_length += 2 # position, threshold
self.kernel_size = [3, 5, 7]
self.exp_ratio = [3, 4, 6]
self.depth = [2, 3, 4]
self.resolution = list(range(192, 257, 4))
self.width_mult = [1.3]
else:
raise ValueError(f'Unknown alphabet "{alphabet}"')
def sample(self, n_samples=1, nb=None, ks=None, e=None, d=None, r=None, w=None, p=None, t=None):
""" randomly sample a architecture"""
nb = self.num_blocks if nb is None else nb
ks = self.kernel_size if ks is None else ks
e = self.exp_ratio if e is None else e
d = self.depth if d is None else d
r = self.resolution if r is None else r
w = self.width_mult if w is None else w
p = self.positions if p is None else p
t = self.thresholds if t is None else t
data = []
for n in range(n_samples):
# first sample layers
depth = np.random.choice(d, nb, replace=True).tolist()
# then sample kernel size, expansion rate and resolution
kernel_size = np.random.choice(ks, size=int(np.sum(depth)), replace=True).tolist()
exp_ratio = np.random.choice(e, size=int(np.sum(depth)), replace=True).tolist()
resolution = int(np.random.choice(r))
width = np.random.choice(w)
arch = {'ks': kernel_size, 'e': exp_ratio, 'd': depth, 'r': resolution, 'w':width}
if self.if_cascade:
arch['position'] = random.choice(p)
arch['threshold'] = random.choice(t)
while arch in data:
# first sample layers
depth = np.random.choice(d, nb, replace=True).tolist()
# then sample kernel size, expansion rate and resolution
kernel_size = np.random.choice(ks, size=int(np.sum(depth)), replace=True).tolist()
exp_ratio = np.random.choice(e, size=int(np.sum(depth)), replace=True).tolist()
resolution = int(np.random.choice(r))
width = np.random.choice(w)
arch = {'ks': kernel_size, 'e': exp_ratio, 'd': depth, 'r': resolution, 'w': width}
if self.if_cascade:
arch['position'] = random.choice(p)
arch['threshold'] = random.choice(t)
data.append(arch)
return data
def initialize(self, n_doe):
# sample one arch with least (lb of hyperparameters) and most complexity (ub of hyperparameters)
# print('Achtung! Add best NAT subnet to the initialization!')
data = [
self.sample(1, ks=[min(self.kernel_size)], e=[min(self.exp_ratio)],
d=[min(self.depth)], r=[min(self.resolution)], w=[min(self.width_mult)],
p=[min(self.positions)], t=[min(self.thresholds)])[0],
self.sample(1, ks=[max(self.kernel_size)], e=[max(self.exp_ratio)],
d=[max(self.depth)], r=[max(self.resolution)], w=[max(self.width_mult)],
p=[max(self.positions)], t=[max(self.thresholds)])[0],
# self.sample(1, ks= [7, 7, 7, 7, 7, 3, 7, 5, 7, 7, 7, 3, 7, 7, 7, 3],
# e= [3, 3, 6, 4, 6, 4, 3, 3, 6, 4, 6, 6, 6, 6, 3, 3],
# d=[2, 2, 4, 4, 4],
# r=[224], w=[1.2])[0]
]
data.extend(self.sample(n_samples=n_doe - 2))
return data
def pad_zero(self, x, depth):
# pad zeros to make bit-string of equal length
new_x, counter = [], 0
for d in depth:
for _ in range(d):
new_x.append(x[counter])
counter += 1
if d < max(self.depth):
new_x += [0] * (max(self.depth) - d)
return new_x
def encode(self, config):
"""
values of architecture parameters -> their indices
"""
layer_choices = {'[3 3]': 1, '[3 5]': 2, '[3 7]': 3,
'[4 3]': 4, '[4 5]': 5, '[4 7]': 6,
'[6 3]': 7, '[6 5]': 8, '[6 7]': 9, '[None None]': 0}
kernel_size = self.pad_zero(config['ks'], config['d'])
exp_ratio = self.pad_zero(config['e'], config['d'])
r = np.where(np.array(self.resolution) == config["r"])[0][0]
w = np.where(np.array(self.width_mult) == config["w"])[0][0]
layers = [0] * (self.num_blocks * max(self.depth))
for i, d in enumerate(config['d']):
for j in range(d):
idx = i * max(self.depth) + j
key = '[{} {}]'.format(exp_ratio[idx], kernel_size[idx])
layers[idx] = layer_choices[key]
layers = [r] + [w] + layers
if self.if_cascade:
pos = config['position']
th = config['threshold']
layers += [pos, self.threshold_value_to_gene[th]]
return layers
def decode(self, _layers):
"""
indices of values of architecture parameters -> actual values
"""
if type(_layers) is np.ndarray:
_layers = _layers.flatten()
cfg_choices = {1: (3, 3), 2: (3, 5), 3: (3, 7),
4: (4, 3), 5: (4, 5), 6: (4, 7),
7: (6, 3), 8: (6, 5), 9: (6, 7), 0: (None, None)}
depth, kernel_size, exp_ratio = [], [], []
resolution, width_mult = self.resolution[_layers[0]], self.width_mult[_layers[1]]
d = 0
layers = _layers[2:]
if self.if_cascade:
pos = int(layers[-2])
th = float(utils.threshold_gene_to_value[layers[-1]])
layers = layers[:-2]
for i, l in enumerate(layers):
e, ks = cfg_choices[l]
if (ks is not None) and (e is not None):
kernel_size.append(ks)
exp_ratio.append(e)
d += 1
if (i + 1) % max(self.depth) == 0:
if l != 0 and layers[i - 1] == 0:
# non-skip layer cannot follow skip layer
# we know the first 2 layers are non-skip, so we just need to check the 3rd one
# if it is 0, remove the current one
d -= 1
kernel_size = kernel_size[:-1]
exp_ratio = exp_ratio[:-1]
depth.append(d)
d = 0
config = {'ks': kernel_size, 'e': exp_ratio, 'd': depth, 'r': resolution, 'w': width_mult}
if self.if_cascade:
config['position'] = pos
config['threshold'] = th
return config
| 7,851 | 42.142857 | 104 |
py
|
ENCAS
|
ENCAS-main/networks/attentive_nas_dynamic_model.py
|
# taken from https://github.com/facebookresearch/AttentiveNAS
# Difference: images not resized in forward, but beforehand, in the collator (which is faster)
import copy
import random
import collections
import math
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint_sequential, checkpoint
from .modules_alphanet.dynamic_layers import DynamicMBConvLayer, DynamicConvBnActLayer, DynamicLinearLayer, DynamicShortcutLayer
from .modules_alphanet.static_layers import MobileInvertedResidualBlock
from .modules_alphanet.nn_utils import make_divisible, int2list
from .modules_alphanet.nn_base import MyNetwork
from .attentive_nas_static_model import AttentiveNasStaticModel
from utils import alphanet_config_str, RecursiveNamespace
import yaml
class AttentiveNasDynamicModel(MyNetwork):
def __init__(self, n_classes=1000, bn_param=(0., 1e-5), if_use_gradient_checkpointing=False, n_image_channels=3, **kwargs):
super(AttentiveNasDynamicModel, self).__init__()
self.supernet = RecursiveNamespace(**yaml.safe_load(alphanet_config_str)) # a config of the supernet space
self.n_classes = n_classes
self.use_v3_head = getattr(self.supernet, 'use_v3_head', False)
self.stage_names = ['first_conv', 'mb1', 'mb2', 'mb3', 'mb4', 'mb5', 'mb6', 'mb7', 'last_conv']
self.n_image_channels = n_image_channels
self.width_list, self.depth_list, self.ks_list, self.expand_ratio_list = [], [], [], []
for name in self.stage_names:
block_cfg = getattr(self.supernet, name)
self.width_list.append(block_cfg.c)
if name.startswith('mb'):
self.depth_list.append(block_cfg.d)
self.ks_list.append(block_cfg.k)
self.expand_ratio_list.append(block_cfg.t)
self.resolution_list = self.supernet.resolutions
self.cfg_candidates = {
'resolution': self.resolution_list ,
'width': self.width_list,
'depth': self.depth_list,
'kernel_size': self.ks_list,
'expand_ratio': self.expand_ratio_list
}
#first conv layer, including conv, bn, act
out_channel_list, act_func, stride = \
self.supernet.first_conv.c, self.supernet.first_conv.act_func, self.supernet.first_conv.s
self.first_conv = DynamicConvBnActLayer(
in_channel_list=int2list(self.n_image_channels), out_channel_list=out_channel_list,
kernel_size=3, stride=stride, act_func=act_func,
)
# inverted residual blocks
self.block_group_info = []
blocks = []
_block_index = 0
feature_dim = out_channel_list
for stage_id, key in enumerate(self.stage_names[1:-1]):
block_cfg = getattr(self.supernet, key)
width = block_cfg.c
n_block = max(block_cfg.d)
act_func = block_cfg.act_func
ks = block_cfg.k
expand_ratio_list = block_cfg.t
use_se = block_cfg.se
self.block_group_info.append([_block_index + i for i in range(n_block)])
_block_index += n_block
output_channel = width
for i in range(n_block):
stride = block_cfg.s if i == 0 else 1
if min(expand_ratio_list) >= 4:
expand_ratio_list = [_s for _s in expand_ratio_list if _s >= 4] if i == 0 else expand_ratio_list
mobile_inverted_conv = DynamicMBConvLayer(
in_channel_list=feature_dim,
out_channel_list=output_channel,
kernel_size_list=ks,
expand_ratio_list=expand_ratio_list,
stride=stride,
act_func=act_func,
use_se=use_se,
channels_per_group=getattr(self.supernet, 'channels_per_group', 1)
)
shortcut = DynamicShortcutLayer(feature_dim, output_channel, reduction=stride)
blocks.append(MobileInvertedResidualBlock(mobile_inverted_conv, shortcut))
feature_dim = output_channel
self.blocks = nn.ModuleList(blocks)
last_channel, act_func = self.supernet.last_conv.c, self.supernet.last_conv.act_func
if not self.use_v3_head:
self.last_conv = DynamicConvBnActLayer(
in_channel_list=feature_dim, out_channel_list=last_channel,
kernel_size=1, act_func=act_func,
)
else:
expand_feature_dim = [f_dim * 6 for f_dim in feature_dim]
self.last_conv = nn.Sequential(collections.OrderedDict([
('final_expand_layer', DynamicConvBnActLayer(
feature_dim, expand_feature_dim, kernel_size=1, use_bn=True, act_func=act_func)
),
('pool', nn.AdaptiveAvgPool2d((1,1))),
('feature_mix_layer', DynamicConvBnActLayer(
in_channel_list=expand_feature_dim, out_channel_list=last_channel,
kernel_size=1, act_func=act_func, use_bn=False,)
),
]))
#final conv layer
self.classifier = DynamicLinearLayer(
in_features_list=last_channel, out_features=n_classes, bias=True
)
# set bn param
self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])
# runtime_depth
self.runtime_depth = [len(block_idx) for block_idx in self.block_group_info]
self.zero_residual_block_bn_weights()
self.active_dropout_rate = 0
self.active_drop_connect_rate = 0
# self.active_resolution = 224
self.width_mult = [None] # for compatibility with Ofa supernet
self.if_use_gradient_checkpointing = if_use_gradient_checkpointing#True# if_use_gradient_checkpointing
""" set, sample and get active sub-networks """ #
def set_active_subnet(self, ks=None, e=None, d=None, w=None, **kwargs):
width, depth, kernel_size, expand_ratio = w, d, ks, e
assert len(depth) == len(kernel_size) == len(expand_ratio) == len(width) - 2
# set resolution
# self.active_resolution = resolution
# first conv
self.first_conv.active_out_channel = width[0]
for stage_id, (c, k, e, d) in enumerate(zip(width[1:-1], kernel_size, expand_ratio, depth)):
start_idx, end_idx = min(self.block_group_info[stage_id]), max(self.block_group_info[stage_id])
for block_id in range(start_idx, start_idx + d):
block = self.blocks[block_id]
# block output channels
block.mobile_inverted_conv.active_out_channel = c
if block.shortcut is not None:
block.shortcut.active_out_channel = c
# dw kernel size
block.mobile_inverted_conv.active_kernel_size = k
# dw expansion ration
block.mobile_inverted_conv.active_expand_ratio = e
# IRBlocks repated times
for i, d in enumerate(depth):
self.runtime_depth[i] = min(len(self.block_group_info[i]), d)
# last conv
if not self.use_v3_head:
self.last_conv.active_out_channel = width[-1]
else:
# default expansion ratio: 6
self.last_conv.final_expand_layer.active_out_channel = width[-2] * 6
self.last_conv.feature_mix_layer.active_out_channel = width[-1]
def get_active_subnet(self, preserve_weight=True):
with torch.no_grad():
first_conv = self.first_conv.get_active_subnet(3, preserve_weight)
blocks = []
input_channel = first_conv.out_channels
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
stage_blocks = []
for idx in active_idx:
stage_blocks.append(MobileInvertedResidualBlock(
self.blocks[idx].mobile_inverted_conv.get_active_subnet(input_channel, preserve_weight),
self.blocks[idx].shortcut.get_active_subnet(input_channel, preserve_weight) if self.blocks[
idx].shortcut is not None else None
))
input_channel = stage_blocks[-1].mobile_inverted_conv.out_channels
blocks += stage_blocks
if not self.use_v3_head:
last_conv = self.last_conv.get_active_subnet(input_channel, preserve_weight)
in_features = last_conv.out_channels
else:
final_expand_layer = self.last_conv.final_expand_layer.get_active_subnet(input_channel, preserve_weight)
feature_mix_layer = self.last_conv.feature_mix_layer.get_active_subnet(input_channel * 6,
preserve_weight)
in_features = feature_mix_layer.out_channels
last_conv = nn.Sequential(
final_expand_layer,
nn.AdaptiveAvgPool2d((1, 1)),
feature_mix_layer
)
classifier = self.classifier.get_active_subnet(in_features, preserve_weight)
_subnet = AttentiveNasStaticModel(
first_conv, blocks, last_conv, classifier, use_v3_head=self.use_v3_head
)
_subnet.set_bn_param(**self.get_bn_param())
return _subnet
def zero_residual_block_bn_weights(self):
with torch.no_grad():
for m in self.modules():
if isinstance(m, MobileInvertedResidualBlock):
if isinstance(m.mobile_inverted_conv, DynamicMBConvLayer) and m.shortcut is not None:
m.mobile_inverted_conv.point_linear.bn.bn.weight.zero_()
@staticmethod
def name():
return 'AttentiveNasModel'
def forward(self, x):
if not (self.if_use_gradient_checkpointing and self.training):
# first conv
x = self.first_conv(x)
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
x = self.blocks[idx](x)
else:
x = self.first_conv(x)
blocks_to_run = []
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
blocks_to_run.append(self.blocks[idx])
x = checkpoint_sequential(blocks_to_run, 7, x) # before clean-up it used to be 6
x = self.last_conv(x)
x = x.mean(3, keepdim=True).mean(2, keepdim=True) # global average pooling
x = torch.squeeze(x)
if self.active_dropout_rate > 0 and self.training:
x = torch.nn.functional.dropout(x, p = self.active_dropout_rate)
x = self.classifier(x)
return x
@property
def module_str(self):
_str = self.first_conv.module_str + '\n'
_str += self.blocks[0].module_str + '\n'
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
_str += self.blocks[idx].module_str + '\n'
if not self.use_v3_head:
_str += self.last_conv.module_str + '\n'
else:
_str += self.last_conv.final_expand_layer.module_str + '\n'
_str += self.last_conv.feature_mix_layer.module_str + '\n'
_str += self.classifier.module_str + '\n'
return _str
@property
def config(self):
return {
'name': AttentiveNasDynamicModel.__name__,
'bn': self.get_bn_param(),
'first_conv': self.first_conv.config,
'blocks': [
block.config for block in self.blocks
],
'last_conv': self.last_conv.config if not self.use_v3_head else None,
'final_expand_layer': self.last_conv.final_expand_layer if self.use_v3_head else None,
'feature_mix_layer': self.last_conv.feature_mix_layer if self.use_v3_head else None,
'classifier': self.classifier.config,
# 'resolution': self.active_resolution
}
@staticmethod
def build_from_config(config):
raise ValueError('do not support this function')
def get_active_subnet_settings(self):
# r = self.active_resolution
width, depth, kernel_size, expand_ratio= [], [], [], []
#first conv
width.append(self.first_conv.active_out_channel)
for stage_id in range(len(self.block_group_info)):
start_idx = min(self.block_group_info[stage_id])
block = self.blocks[start_idx] #first block
width.append(block.mobile_inverted_conv.active_out_channel)
kernel_size.append(block.mobile_inverted_conv.active_kernel_size)
expand_ratio.append(block.mobile_inverted_conv.active_expand_ratio)
depth.append(self.runtime_depth[stage_id])
if not self.use_v3_head:
width.append(self.last_conv.active_out_channel)
else:
width.append(self.last_conv.feature_mix_layer.active_out_channel)
return {
# 'resolution': r,
'width': width,
'kernel_size': kernel_size,
'expand_ratio': expand_ratio,
'depth': depth,
}
def set_dropout_rate(self, dropout=0, drop_connect=0, drop_connect_only_last_two_stages=True):
self.active_dropout_rate = dropout
for idx, block in enumerate(self.blocks):
if drop_connect_only_last_two_stages:
if idx not in self.block_group_info[-1] + self.block_group_info[-2]:
continue
this_drop_connect_rate = drop_connect * float(idx) / len(self.blocks)
block.drop_connect_rate = this_drop_connect_rate
def sample_min_subnet(self):
return self._sample_active_subnet(min_net=True)
def sample_max_subnet(self):
return self._sample_active_subnet(max_net=True)
def sample_active_subnet(self, compute_flops=False):
cfg = self._sample_active_subnet(
False, False
)
if compute_flops:
cfg['flops'] = self.compute_active_subnet_flops()
return cfg
def sample_active_subnet_within_range(self, targeted_min_flops, targeted_max_flops):
while True:
cfg = self._sample_active_subnet()
cfg['flops'] = self.compute_active_subnet_flops()
if cfg['flops'] >= targeted_min_flops and cfg['flops'] <= targeted_max_flops:
return cfg
def _sample_active_subnet(self, min_net=False, max_net=False):
sample_cfg = lambda candidates, sample_min, sample_max: \
min(candidates) if sample_min else (max(candidates) if sample_max else random.choice(candidates))
cfg = {}
# sample a resolution
cfg['resolution'] = sample_cfg(self.cfg_candidates['resolution'], min_net, max_net)
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
cfg[k] = []
for vv in self.cfg_candidates[k]:
cfg[k].append(sample_cfg(int2list(vv), min_net, max_net))
self.set_active_subnet(
cfg['resolution'], cfg['width'], cfg['depth'], cfg['kernel_size'], cfg['expand_ratio']
)
return cfg
def mutate_and_reset(self, cfg, prob=0.1, keep_resolution=False):
cfg = copy.deepcopy(cfg)
pick_another = lambda x, candidates: x if len(candidates) == 1 else random.choice([v for v in candidates if v != x])
# sample a resolution
r = random.random()
if r < prob and not keep_resolution:
cfg['resolution'] = pick_another(cfg['resolution'], self.cfg_candidates['resolution'])
# sample channels, depth, kernel_size, expand_ratio
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
for _i, _v in enumerate(cfg[k]):
r = random.random()
if r < prob:
cfg[k][_i] = pick_another(cfg[k][_i], int2list(self.cfg_candidates[k][_i]))
self.set_active_subnet(
cfg['resolution'], cfg['width'], cfg['depth'], cfg['kernel_size'], cfg['expand_ratio']
)
return cfg
def crossover_and_reset(self, cfg1, cfg2, p=0.5):
def _cross_helper(g1, g2, prob):
assert type(g1) == type(g2)
if isinstance(g1, int):
return g1 if random.random() < prob else g2
elif isinstance(g1, list):
return [v1 if random.random() < prob else v2 for v1, v2 in zip(g1, g2)]
else:
raise NotImplementedError
cfg = {}
cfg['resolution'] = cfg1['resolution'] if random.random() < p else cfg2['resolution']
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
cfg[k] = _cross_helper(cfg1[k], cfg2[k], p)
self.set_active_subnet(
cfg['resolution'], cfg['width'], cfg['depth'], cfg['kernel_size'], cfg['expand_ratio']
)
return cfg
def get_active_net_config(self):
raise NotImplementedError
def compute_active_subnet_flops(self):
def count_conv(c_in, c_out, size_out, groups, k):
kernel_ops = k**2
output_elements = c_out * size_out**2
ops = c_in * output_elements * kernel_ops / groups
return ops
def count_linear(c_in, c_out):
return c_in * c_out
total_ops = 0
c_in = 3
# size_out = self.active_resolution // self.first_conv.stride
c_out = self.first_conv.active_out_channel
total_ops += count_conv(c_in, c_out, size_out, 1, 3)
c_in = c_out
# mb blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
block = self.blocks[idx]
c_middle = make_divisible(round(c_in * block.mobile_inverted_conv.active_expand_ratio), 8)
# 1*1 conv
if block.mobile_inverted_conv.inverted_bottleneck is not None:
total_ops += count_conv(c_in, c_middle, size_out, 1, 1)
# dw conv
stride = 1 if idx > active_idx[0] else block.mobile_inverted_conv.stride
if size_out % stride == 0:
size_out = size_out // stride
else:
size_out = (size_out +1) // stride
total_ops += count_conv(c_middle, c_middle, size_out, c_middle, block.mobile_inverted_conv.active_kernel_size)
# 1*1 conv
c_out = block.mobile_inverted_conv.active_out_channel
total_ops += count_conv(c_middle, c_out, size_out, 1, 1)
#se
if block.mobile_inverted_conv.use_se:
num_mid = make_divisible(c_middle // block.mobile_inverted_conv.depth_conv.se.reduction, divisor=8)
total_ops += count_conv(c_middle, num_mid, 1, 1, 1) * 2
if block.shortcut and c_in != c_out:
total_ops += count_conv(c_in, c_out, size_out, 1, 1)
c_in = c_out
if not self.use_v3_head:
c_out = self.last_conv.active_out_channel
total_ops += count_conv(c_in, c_out, size_out, 1, 1)
else:
c_expand = self.last_conv.final_expand_layer.active_out_channel
c_out = self.last_conv.feature_mix_layer.active_out_channel
total_ops += count_conv(c_in, c_expand, size_out, 1, 1)
total_ops += count_conv(c_expand, c_out, 1, 1, 1)
# n_classes
total_ops += count_linear(c_out, self.n_classes)
return total_ops / 1e6
def load_weights_from_pretrained_models(self, checkpoint_path):
with open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f, map_location='cpu')
assert isinstance(checkpoint, dict)
pretrained_state_dicts = checkpoint['state_dict']
for k, v in self.state_dict().items():
name = 'module.' + k if not k.startswith('module') else k
v.copy_(pretrained_state_dicts[name])
| 20,925 | 41.189516 | 142 |
py
|
ENCAS
|
ENCAS-main/networks/ofa_mbv3_my.py
|
import copy
import torch
from ofa.imagenet_classification.elastic_nn.modules import DynamicMBConvLayer, DynamicConvLayer, DynamicLinearLayer
from ofa.imagenet_classification.elastic_nn.networks import OFAMobileNetV3
from ofa.imagenet_classification.networks import MobileNetV3
from ofa.utils import val2list, make_divisible, MyNetwork
from ofa.utils.layers import ConvLayer, MBConvLayer, ResidualBlock, IdentityLayer, LinearLayer, My2DLayer
from torch.utils.checkpoint import checkpoint_sequential
class OFAMobileNetV3My(OFAMobileNetV3):
def __init__(self, n_classes=1000, bn_param=(0.1, 1e-5), dropout_rate=0.1, base_stage_width=None, width_mult=1.0,
ks_list=3, expand_ratio_list=6, depth_list=4, if_use_gradient_checkpointing=False,
class_for_subnet=MobileNetV3, n_image_channels=3):
'''
differences to init of super:
1) several widths in each block instead of one => unneccessary, since NAT turned out to use 2 separate supernets
2) arbitrary n_image_channels => not used on cifars or imagenet
3) specify class_for_subnet => not used in classification
'''
self.width_mult = val2list(width_mult)
# self.width_mults = [1.0, 1.2]
self.ks_list = val2list(ks_list, 1)
self.expand_ratio_list = val2list(expand_ratio_list, 1)
self.depth_list = val2list(depth_list, 1)
self.n_image_channels = n_image_channels
self.ks_list.sort()
self.expand_ratio_list.sort()
self.depth_list.sort()
base_stage_width = [16, 16, 24, 40, 80, 112, 160, 960, 1280]
final_expand_width = [make_divisible(base_stage_width[-2] * w, MyNetwork.CHANNEL_DIVISIBLE) for w in
self.width_mult]
last_channel = [make_divisible(base_stage_width[-1] * w, MyNetwork.CHANNEL_DIVISIBLE) for w in self.width_mult]
stride_stages = [1, 2, 2, 2, 1, 2]
act_stages = ['relu', 'relu', 'relu', 'h_swish', 'h_swish', 'h_swish']
se_stages = [False, False, True, False, True, True]
n_block_list = [1] + [max(self.depth_list)] * 5
width_lists = []
for base_width in base_stage_width[:-2]:
width_list_cur = []
for w in self.width_mult:
width = make_divisible(base_width * w, MyNetwork.CHANNEL_DIVISIBLE)
width_list_cur.append(width)
width_lists.append(width_list_cur)
input_channel, first_block_dim = width_lists[0], width_lists[1]
# first conv layer
first_conv = DynamicConvLayer([self.n_image_channels], input_channel, kernel_size=3, stride=2, act_func='h_swish')
first_block_conv = DynamicMBConvLayer(
in_channel_list=input_channel, out_channel_list=first_block_dim, kernel_size_list=3,
stride=stride_stages[0],
expand_ratio_list=1, act_func=act_stages[0], use_se=se_stages[0],
)
first_block = ResidualBlock(
first_block_conv,
IdentityLayer(max(first_block_dim), max(first_block_dim)) if input_channel == first_block_dim else None,
)
# inverted residual blocks
self.block_group_info = []
blocks = [first_block]
_block_index = 1
feature_dim = first_block_dim
for width_list_cur, n_block, s, act_func, use_se in zip(width_lists[2:], n_block_list[1:],
stride_stages[1:], act_stages[1:], se_stages[1:]):
self.block_group_info.append([_block_index + i for i in range(n_block)])
_block_index += n_block
output_channel = width_list_cur
for i in range(n_block):
stride = s if i == 0 else 1
mobile_inverted_conv = DynamicMBConvLayer(
in_channel_list=val2list(feature_dim), out_channel_list=val2list(width_list_cur),
kernel_size_list=ks_list, expand_ratio_list=expand_ratio_list,
stride=stride, act_func=act_func, use_se=use_se,
)
if stride == 1 and feature_dim == output_channel:
shortcut = IdentityLayer(max(feature_dim), max(feature_dim))
else:
shortcut = None
blocks.append(ResidualBlock(mobile_inverted_conv, shortcut))
feature_dim = output_channel
# final expand layer, feature mix layer & classifier
final_expand_layer = DynamicConvLayer(feature_dim, final_expand_width, kernel_size=1, act_func='h_swish')
feature_mix_layer = DynamicConvLayer(
final_expand_width, last_channel, kernel_size=1, use_bn=False, act_func='h_swish',
)
classifier = DynamicLinearLayer(last_channel, n_classes, dropout_rate=dropout_rate)
super(OFAMobileNetV3, self).__init__(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)
# set bn param
self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])
# runtime_depth
self.runtime_depth = [len(block_idx) for block_idx in self.block_group_info]
self.if_use_gradient_checkpointing = if_use_gradient_checkpointing
self.class_for_subnet = class_for_subnet
def set_active_subnet(self, ks=None, e=None, d=None, w=None, **kwargs):
ks = val2list(ks, len(self.blocks) - 1)
expand_ratio = val2list(e, len(self.blocks) - 1)
depth = val2list(d, len(self.block_group_info))
width_mult = 0# since it turned out that different widths <=> different supernets, there's always just one width_mult, so we just take that.
self.first_conv.active_out_channel = self.first_conv.out_channel_list[width_mult]
self.blocks[0].conv.active_out_channel = self.blocks[0].conv.out_channel_list[width_mult]
self.final_expand_layer.active_out_channel = self.final_expand_layer.out_channel_list[width_mult]
self.feature_mix_layer.active_out_channel = self.feature_mix_layer.out_channel_list[width_mult]
for block, k, e in zip(self.blocks[1:], ks, expand_ratio):
if k is not None:
block.conv.active_kernel_size = k
if e is not None:
block.conv.active_expand_ratio = e
block.conv.active_out_channel = block.conv.out_channel_list[width_mult]
for i, d in enumerate(depth):
if d is not None:
self.runtime_depth[i] = min(len(self.block_group_info[i]), d)
def get_active_subnet(self, preserve_weight=True):
first_conv = self.first_conv.get_active_subnet(self.n_image_channels, preserve_weight)
blocks = [
ResidualBlock(self.blocks[0].conv.get_active_subnet(self.first_conv.active_out_channel),
copy.deepcopy(self.blocks[0].shortcut))
]
final_expand_layer = self.final_expand_layer.get_active_subnet(self.blocks[-1].conv.active_out_channel)
feature_mix_layer = self.feature_mix_layer.get_active_subnet(self.final_expand_layer.active_out_channel)
classifier = self.classifier.get_active_subnet(self.feature_mix_layer.active_out_channel)
input_channel = blocks[0].conv.out_channels
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
stage_blocks = []
for idx in active_idx:
stage_blocks.append(ResidualBlock(
self.blocks[idx].conv.get_active_subnet(input_channel, preserve_weight),
copy.deepcopy(self.blocks[idx].shortcut)
))
input_channel = self.blocks[idx].conv.active_out_channel
blocks += stage_blocks
_subnet = self.class_for_subnet(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)
_subnet.set_bn_param(**self.get_bn_param())
return _subnet
def forward(self, x):
if not (self.if_use_gradient_checkpointing and self.training):
# first conv
x = self.first_conv(x)
# first block
x = self.blocks[0](x)
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
x = self.blocks[idx](x)
x = self.final_expand_layer(x)
else:
x = self.first_conv(x)
blocks_to_run = [self.blocks[0]]
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
blocks_to_run.append(self.blocks[idx])
blocks_to_run.append(self.final_expand_layer)
x = checkpoint_sequential(blocks_to_run, 2, x)
x = x.mean(3, keepdim=True).mean(2, keepdim=True) # global average pooling
x = self.feature_mix_layer(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
| 9,234 | 48.12234 | 148 |
py
|
ENCAS
|
ENCAS-main/networks/proxyless_my.py
|
from ofa.imagenet_classification.elastic_nn.networks import OFAProxylessNASNets
from ofa.imagenet_classification.networks import ProxylessNASNets
import copy
from ofa.imagenet_classification.elastic_nn.modules import DynamicMBConvLayer
from ofa.utils import val2list, make_divisible, MyNetwork
from ofa.utils.layers import ConvLayer, MBConvLayer, ResidualBlock, IdentityLayer, LinearLayer
class OFAProxylessNASNetsMy(OFAProxylessNASNets):
def __init__(self, n_classes=1000, bn_param=(0.1, 1e-5), dropout_rate=0.1, base_stage_width=None, width_mult=1.0,
ks_list=3, expand_ratio_list=6, depth_list=4, if_use_gradient_checkpointing=False,
class_for_subnet=ProxylessNASNets, n_image_channels=3):
self.width_mult = width_mult
self.ks_list = val2list(ks_list, 1)
self.expand_ratio_list = val2list(expand_ratio_list, 1)
self.depth_list = val2list(depth_list, 1)
self.ks_list.sort()
self.expand_ratio_list.sort()
self.depth_list.sort()
self.n_image_channels = n_image_channels
self.class_for_subnet = class_for_subnet
if base_stage_width == 'google':
# MobileNetV2 Stage Width
base_stage_width = [32, 16, 24, 32, 64, 96, 160, 320, 1280]
else:
# ProxylessNAS Stage Width
base_stage_width = [32, 16, 24, 40, 80, 96, 192, 320, 1280]
input_channel = make_divisible(base_stage_width[0] * self.width_mult, MyNetwork.CHANNEL_DIVISIBLE)
first_block_width = make_divisible(base_stage_width[1] * self.width_mult, MyNetwork.CHANNEL_DIVISIBLE)
last_channel = make_divisible(base_stage_width[-1] * self.width_mult, MyNetwork.CHANNEL_DIVISIBLE)
# first conv layer
first_conv = ConvLayer(
self.n_image_channels, input_channel, kernel_size=3, stride=2, use_bn=True, act_func='relu6', ops_order='weight_bn_act'
)
# first block
first_block_conv = MBConvLayer(
in_channels=input_channel, out_channels=first_block_width, kernel_size=3, stride=1,
expand_ratio=1, act_func='relu6',
)
first_block = ResidualBlock(first_block_conv, None)
input_channel = first_block_width
# inverted residual blocks
self.block_group_info = []
blocks = [first_block]
_block_index = 1
stride_stages = [2, 2, 2, 1, 2, 1]
n_block_list = [max(self.depth_list)] * 5 + [1]
width_list = []
for base_width in base_stage_width[2:-1]:
width = make_divisible(base_width * self.width_mult, MyNetwork.CHANNEL_DIVISIBLE)
width_list.append(width)
for width, n_block, s in zip(width_list, n_block_list, stride_stages):
self.block_group_info.append([_block_index + i for i in range(n_block)])
_block_index += n_block
output_channel = width
for i in range(n_block):
if i == 0:
stride = s
else:
stride = 1
mobile_inverted_conv = DynamicMBConvLayer(
in_channel_list=val2list(input_channel, 1), out_channel_list=val2list(output_channel, 1),
kernel_size_list=ks_list, expand_ratio_list=expand_ratio_list, stride=stride, act_func='relu6',
)
if stride == 1 and input_channel == output_channel:
shortcut = IdentityLayer(input_channel, input_channel)
else:
shortcut = None
mb_inverted_block = ResidualBlock(mobile_inverted_conv, shortcut)
blocks.append(mb_inverted_block)
input_channel = output_channel
# 1x1_conv before global average pooling
feature_mix_layer = ConvLayer(
input_channel, last_channel, kernel_size=1, use_bn=True, act_func='relu6',
)
classifier = LinearLayer(last_channel, n_classes, dropout_rate=dropout_rate)
super(OFAProxylessNASNets, self).__init__(first_conv, blocks, feature_mix_layer, classifier)
# set bn param
self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])
# runtime_depth
self.runtime_depth = [len(block_idx) for block_idx in self.block_group_info]
self.width_mult = [self.width_mult]
def get_active_subnet(self, preserve_weight=True):
first_conv = copy.deepcopy(self.first_conv)
blocks = [copy.deepcopy(self.blocks[0])]
feature_mix_layer = copy.deepcopy(self.feature_mix_layer)
classifier = copy.deepcopy(self.classifier)
input_channel = blocks[0].conv.out_channels
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
stage_blocks = []
for idx in active_idx:
stage_blocks.append(ResidualBlock(
self.blocks[idx].conv.get_active_subnet(input_channel, preserve_weight),
copy.deepcopy(self.blocks[idx].shortcut)
))
input_channel = stage_blocks[-1].conv.out_channels
blocks += stage_blocks
_subnet = self.class_for_subnet(first_conv, blocks, feature_mix_layer, classifier)
_subnet.set_bn_param(**self.get_bn_param())
return _subnet
| 5,430 | 42.103175 | 131 |
py
|
ENCAS
|
ENCAS-main/networks/attentive_nas_static_model.py
|
# taken from https://github.com/facebookresearch/AttentiveNAS
# Difference: images not resized in forward, but beforehand, in the collator (which is faster)
import torch
import torch.nn as nn
from .modules_alphanet.nn_base import MyNetwork
class AttentiveNasStaticModel(MyNetwork):
def __init__(self, first_conv, blocks, last_conv, classifier, use_v3_head=True):
super(AttentiveNasStaticModel, self).__init__()
self.first_conv = first_conv
self.blocks = nn.ModuleList(blocks)
self.last_conv = last_conv
self.classifier = classifier
self.use_v3_head = use_v3_head
def forward(self, x):
x = self.first_conv(x)
for block in self.blocks:
x = block(x)
x = self.last_conv(x)
if not self.use_v3_head:
x = x.mean(3, keepdim=True).mean(2, keepdim=True) # global average pooling
x = torch.squeeze(x)
x = self.classifier(x)
return x
@property
def module_str(self):
_str = self.first_conv.module_str + '\n'
for block in self.blocks:
_str += block.module_str + '\n'
#_str += self.last_conv.module_str + '\n'
_str += self.classifier.module_str
return _str
@property
def config(self):
return {
'name': AttentiveNasStaticModel.__name__,
'bn': self.get_bn_param(),
'first_conv': self.first_conv.config,
'blocks': [
block.config for block in self.blocks
],
#'last_conv': self.last_conv.config,
'classifier': self.classifier.config,
# 'resolution': self.resolution
}
def weight_initialization(self):
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
@staticmethod
def build_from_config(config):
raise NotImplementedError
def reset_running_stats_for_calibration(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.SyncBatchNorm):
m.training = True
m.momentum = None # cumulative moving average
m.reset_running_stats()
| 2,694 | 31.083333 | 113 |
py
|
ENCAS
|
ENCAS-main/networks/modules_alphanet/dynamic_layers.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
from collections import OrderedDict
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .static_layers import MBInvertedConvLayer, ConvBnActLayer, LinearLayer, SELayer, ShortcutLayer
from .dynamic_ops import DynamicSeparableConv2d, DynamicPointConv2d, DynamicBatchNorm2d, DynamicLinear, DynamicSE
from .nn_utils import int2list, get_net_device, copy_bn, build_activation, make_divisible
from .nn_base import MyModule, MyNetwork
class DynamicMBConvLayer(MyModule):
def __init__(self, in_channel_list, out_channel_list,
kernel_size_list=3, expand_ratio_list=6, stride=1, act_func='relu6', use_se=False, channels_per_group=1):
super(DynamicMBConvLayer, self).__init__()
self.in_channel_list = int2list(in_channel_list)
self.out_channel_list = int2list(out_channel_list)
self.kernel_size_list = int2list(kernel_size_list, 1)
self.expand_ratio_list = int2list(expand_ratio_list, 1)
self.stride = stride
self.act_func = act_func
self.use_se = use_se
self.channels_per_group = channels_per_group
# build modules_alphanet
max_middle_channel = round(max(self.in_channel_list) * max(self.expand_ratio_list))
if max(self.expand_ratio_list) == 1:
self.inverted_bottleneck = None
else:
self.inverted_bottleneck = nn.Sequential(OrderedDict([
('conv', DynamicPointConv2d(max(self.in_channel_list), max_middle_channel)),
('bn', DynamicBatchNorm2d(max_middle_channel)),
('act', build_activation(self.act_func, inplace=True)),
]))
self.depth_conv = nn.Sequential(OrderedDict([
('conv', DynamicSeparableConv2d(max_middle_channel, self.kernel_size_list, stride=self.stride, channels_per_group=self.channels_per_group)),
('bn', DynamicBatchNorm2d(max_middle_channel)),
('act', build_activation(self.act_func, inplace=True))
]))
if self.use_se:
self.depth_conv.add_module('se', DynamicSE(max_middle_channel))
self.point_linear = nn.Sequential(OrderedDict([
('conv', DynamicPointConv2d(max_middle_channel, max(self.out_channel_list))),
('bn', DynamicBatchNorm2d(max(self.out_channel_list))),
]))
self.active_kernel_size = max(self.kernel_size_list)
self.active_expand_ratio = max(self.expand_ratio_list)
self.active_out_channel = max(self.out_channel_list)
def forward(self, x):
in_channel = x.size(1)
if self.inverted_bottleneck is not None:
self.inverted_bottleneck.conv.active_out_channel = \
make_divisible(round(in_channel * self.active_expand_ratio), 8)
self.depth_conv.conv.active_kernel_size = self.active_kernel_size
self.point_linear.conv.active_out_channel = self.active_out_channel
if self.inverted_bottleneck is not None:
x = self.inverted_bottleneck(x)
x = self.depth_conv(x)
x = self.point_linear(x)
return x
@property
def module_str(self):
if self.use_se:
return 'SE(O%d, E%.1f, K%d)' % (self.active_out_channel, self.active_expand_ratio, self.active_kernel_size)
else:
return '(O%d, E%.1f, K%d)' % (self.active_out_channel, self.active_expand_ratio, self.active_kernel_size)
@property
def config(self):
return {
'name': DynamicMBConvLayer.__name__,
'in_channel_list': self.in_channel_list,
'out_channel_list': self.out_channel_list,
'kernel_size_list': self.kernel_size_list,
'expand_ratio_list': self.expand_ratio_list,
'stride': self.stride,
'act_func': self.act_func,
'use_se': self.use_se,
'channels_per_group': self.channels_per_group,
}
@staticmethod
def build_from_config(config):
return DynamicMBConvLayer(**config)
############################################################################################
def get_active_subnet(self, in_channel, preserve_weight=True):
middle_channel = make_divisible(round(in_channel * self.active_expand_ratio), 8)
channels_per_group = self.depth_conv.conv.channels_per_group
# build the new layer
sub_layer = MBInvertedConvLayer(
in_channel, self.active_out_channel, self.active_kernel_size, self.stride, self.active_expand_ratio,
act_func=self.act_func, mid_channels=middle_channel, use_se=self.use_se, channels_per_group=channels_per_group
)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
# copy weight from current layer
if sub_layer.inverted_bottleneck is not None:
sub_layer.inverted_bottleneck.conv.weight.data.copy_(
self.inverted_bottleneck.conv.conv.weight.data[:middle_channel, :in_channel, :, :]
)
copy_bn(sub_layer.inverted_bottleneck.bn, self.inverted_bottleneck.bn.bn)
sub_layer.depth_conv.conv.weight.data.copy_(
self.depth_conv.conv.get_active_filter(middle_channel, self.active_kernel_size).data
)
copy_bn(sub_layer.depth_conv.bn, self.depth_conv.bn.bn)
if self.use_se:
se_mid = make_divisible(middle_channel // SELayer.REDUCTION, divisor=8)
sub_layer.depth_conv.se.fc.reduce.weight.data.copy_(
self.depth_conv.se.fc.reduce.weight.data[:se_mid, :middle_channel, :, :]
)
sub_layer.depth_conv.se.fc.reduce.bias.data.copy_(self.depth_conv.se.fc.reduce.bias.data[:se_mid])
sub_layer.depth_conv.se.fc.expand.weight.data.copy_(
self.depth_conv.se.fc.expand.weight.data[:middle_channel, :se_mid, :, :]
)
sub_layer.depth_conv.se.fc.expand.bias.data.copy_(self.depth_conv.se.fc.expand.bias.data[:middle_channel])
sub_layer.point_linear.conv.weight.data.copy_(
self.point_linear.conv.conv.weight.data[:self.active_out_channel, :middle_channel, :, :]
)
copy_bn(sub_layer.point_linear.bn, self.point_linear.bn.bn)
return sub_layer
def re_organize_middle_weights(self, expand_ratio_stage=0):
raise NotImplementedError
#importance = torch.sum(torch.abs(self.point_linear.conv.conv.weight.data), dim=(0, 2, 3))
#if expand_ratio_stage > 0:
# sorted_expand_list = copy.deepcopy(self.expand_ratio_list)
# sorted_expand_list.sort(reverse=True)
# target_width = sorted_expand_list[expand_ratio_stage]
# target_width = round(max(self.in_channel_list) * target_width)
# importance[target_width:] = torch.arange(0, target_width - importance.size(0), -1)
#
#sorted_importance, sorted_idx = torch.sort(importance, dim=0, descending=True)
#self.point_linear.conv.conv.weight.data = torch.index_select(
# self.point_linear.conv.conv.weight.data, 1, sorted_idx
#)
#
#adjust_bn_according_to_idx(self.depth_conv.bn.bn, sorted_idx)
#self.depth_conv.conv.conv.weight.data = torch.index_select(
# self.depth_conv.conv.conv.weight.data, 0, sorted_idx
#)
#if self.use_se:
# # se expand: output dim 0 reorganize
# se_expand = self.depth_conv.se.fc.expand
# se_expand.weight.data = torch.index_select(se_expand.weight.data, 0, sorted_idx)
# se_expand.bias.data = torch.index_select(se_expand.bias.data, 0, sorted_idx)
# # se reduce: input dim 1 reorganize
# se_reduce = self.depth_conv.se.fc.reduce
# se_reduce.weight.data = torch.index_select(se_reduce.weight.data, 1, sorted_idx)
# # middle weight reorganize
# se_importance = torch.sum(torch.abs(se_expand.weight.data), dim=(0, 2, 3))
# se_importance, se_idx = torch.sort(se_importance, dim=0, descending=True)
# se_expand.weight.data = torch.index_select(se_expand.weight.data, 1, se_idx)
# se_reduce.weight.data = torch.index_select(se_reduce.weight.data, 0, se_idx)
# se_reduce.bias.data = torch.index_select(se_reduce.bias.data, 0, se_idx)
#
## if inverted_bottleneck is None, the previous layer should be reorganized accordingly
#if self.inverted_bottleneck is not None:
# adjust_bn_according_to_idx(self.inverted_bottleneck.bn.bn, sorted_idx)
# self.inverted_bottleneck.conv.conv.weight.data = torch.index_select(
# self.inverted_bottleneck.conv.conv.weight.data, 0, sorted_idx
# )
# return None
#else:
# return sorted_idx
class DynamicConvBnActLayer(MyModule):
def __init__(self, in_channel_list, out_channel_list, kernel_size=3, stride=1, dilation=1,
use_bn=True, act_func='relu6'):
super(DynamicConvBnActLayer, self).__init__()
self.in_channel_list = int2list(in_channel_list)
self.out_channel_list = int2list(out_channel_list)
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.use_bn = use_bn
self.act_func = act_func
self.conv = DynamicPointConv2d(
max_in_channels=max(self.in_channel_list), max_out_channels=max(self.out_channel_list),
kernel_size=self.kernel_size, stride=self.stride, dilation=self.dilation,
)
if self.use_bn:
self.bn = DynamicBatchNorm2d(max(self.out_channel_list))
if self.act_func is not None:
self.act = build_activation(self.act_func, inplace=True)
self.active_out_channel = max(self.out_channel_list)
def forward(self, x):
self.conv.active_out_channel = self.active_out_channel
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.act_func is not None:
x = self.act(x)
return x
@property
def module_str(self):
return 'DyConv(O%d, K%d, S%d)' % (self.active_out_channel, self.kernel_size, self.stride)
@property
def config(self):
return {
'name': DynamicConvBnActLayer.__name__,
'in_channel_list': self.in_channel_list,
'out_channel_list': self.out_channel_list,
'kernel_size': self.kernel_size,
'stride': self.stride,
'dilation': self.dilation,
'use_bn': self.use_bn,
'act_func': self.act_func,
}
@staticmethod
def build_from_config(config):
return DynamicConvBnActLayer(**config)
def get_active_subnet(self, in_channel, preserve_weight=True):
sub_layer = ConvBnActLayer(
in_channel, self.active_out_channel, self.kernel_size, self.stride, self.dilation,
use_bn=self.use_bn, act_func=self.act_func
)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
sub_layer.conv.weight.data.copy_(self.conv.conv.weight.data[:self.active_out_channel, :in_channel, :, :])
if self.use_bn:
copy_bn(sub_layer.bn, self.bn.bn)
return sub_layer
class DynamicLinearLayer(MyModule):
def __init__(self, in_features_list, out_features, bias=True):
super(DynamicLinearLayer, self).__init__()
self.in_features_list = int2list(in_features_list)
self.out_features = out_features
self.bias = bias
#self.dropout_rate = dropout_rate
#
#if self.dropout_rate > 0:
# self.dropout = nn.Dropout(self.dropout_rate, inplace=True)
#else:
# self.dropout = None
self.linear = DynamicLinear(
max_in_features=max(self.in_features_list), max_out_features=self.out_features, bias=self.bias
)
def forward(self, x):
#if self.dropout is not None:
# x = self.dropout(x)
return self.linear(x)
@property
def module_str(self):
return 'DyLinear(%d)' % self.out_features
@property
def config(self):
return {
'name': DynamicLinear.__name__,
'in_features_list': self.in_features_list,
'out_features': self.out_features,
'bias': self.bias
}
@staticmethod
def build_from_config(config):
return DynamicLinearLayer(**config)
def get_active_subnet(self, in_features, preserve_weight=True):
#sub_layer = LinearLayer(in_features, self.out_features, self.bias, dropout_rate=self.dropout_rate)
sub_layer = LinearLayer(in_features, self.out_features, self.bias)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
sub_layer.linear.weight.data.copy_(self.linear.linear.weight.data[:self.out_features, :in_features])
if self.bias:
sub_layer.linear.bias.data.copy_(self.linear.linear.bias.data[:self.out_features])
return sub_layer
class DynamicShortcutLayer(MyModule):
def __init__(self, in_channel_list, out_channel_list, reduction=1):
super(DynamicShortcutLayer, self).__init__()
self.in_channel_list = int2list(in_channel_list)
self.out_channel_list = int2list(out_channel_list)
self.reduction = reduction
self.conv = DynamicPointConv2d(
max_in_channels=max(self.in_channel_list), max_out_channels=max(self.out_channel_list),
kernel_size=1, stride=1,
)
self.active_out_channel = max(self.out_channel_list)
def forward(self, x):
in_channel = x.size(1)
#identity mapping
if in_channel == self.active_out_channel and self.reduction == 1:
return x
#average pooling, if size doesn't match
if self.reduction > 1:
padding = 0 if x.size(-1) % 2 == 0 else 1
x = F.avg_pool2d(x, self.reduction, padding=padding)
#1*1 conv, if #channels doesn't match
if in_channel != self.active_out_channel:
self.conv.active_out_channel = self.active_out_channel
x = self.conv(x)
return x
@property
def module_str(self):
return 'DyShortcut(O%d, R%d)' % (self.active_out_channel, self.reduction)
@property
def config(self):
return {
'name': DynamicShortcutLayer.__name__,
'in_channel_list': self.in_channel_list,
'out_channel_list': self.out_channel_list,
'reduction': self.reduction,
}
@staticmethod
def build_from_config(config):
return DynamicShortcutLayer(**config)
def get_active_subnet(self, in_channel, preserve_weight=True):
sub_layer = ShortcutLayer(
in_channel, self.active_out_channel, self.reduction
)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
sub_layer.conv.weight.data.copy_(self.conv.conv.weight.data[:self.active_out_channel, :in_channel, :, :])
return sub_layer
| 15,686 | 38.916031 | 152 |
py
|
ENCAS
|
ENCAS-main/networks/modules_alphanet/static_layers.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
from collections import OrderedDict
import torch.nn as nn
from .nn_utils import get_same_padding, build_activation, make_divisible, drop_connect
from .nn_base import MyModule
from .activations import *
def set_layer_from_config(layer_config):
if layer_config is None:
return None
name2layer = {
ConvBnActLayer.__name__: ConvBnActLayer,
IdentityLayer.__name__: IdentityLayer,
LinearLayer.__name__: LinearLayer,
MBInvertedConvLayer.__name__: MBInvertedConvLayer,
}
layer_name = layer_config.pop('name')
layer = name2layer[layer_name]
return layer.build_from_config(layer_config)
class SELayer(nn.Module):
REDUCTION = 4
def __init__(self, channel):
super(SELayer, self).__init__()
self.channel = channel
self.reduction = SELayer.REDUCTION
num_mid = make_divisible(self.channel // self.reduction, divisor=8)
self.fc = nn.Sequential(OrderedDict([
('reduce', nn.Conv2d(self.channel, num_mid, 1, 1, 0, bias=True)),
('relu', nn.ReLU(inplace=True)),
('expand', nn.Conv2d(num_mid, self.channel, 1, 1, 0, bias=True)),
('h_sigmoid', Hsigmoid(inplace=True)),
]))
def forward(self, x):
#x: N, C, H, W
y = x.mean(3, keepdim=True).mean(2, keepdim=True) # N, C, 1, 1
y = self.fc(y)
return x * y
class ConvBnActLayer(MyModule):
def __init__(self, in_channels, out_channels,
kernel_size=3, stride=1, dilation=1, groups=1, bias=False,
use_bn=True, act_func='relu'):
super(ConvBnActLayer, self).__init__()
# default normal 3x3_Conv with bn and relu
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.groups = groups
self.bias = bias
self.use_bn = use_bn
self.act_func = act_func
pad = get_same_padding(self.kernel_size)
self.conv = nn.Conv2d(in_channels, out_channels, self.kernel_size,
stride, pad, dilation=dilation, groups=groups, bias=bias
)
if self.use_bn:
self.bn = nn.BatchNorm2d(out_channels)
self.act = build_activation(self.act_func, inplace=True)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.act:
x = self.act(x)
return x
@property
def module_str(self):
if isinstance(self.kernel_size, int):
kernel_size = (self.kernel_size, self.kernel_size)
else:
kernel_size = self.kernel_size
if self.groups == 1:
if self.dilation > 1:
conv_str = '%dx%d_DilatedConv' % (kernel_size[0], kernel_size[1])
else:
conv_str = '%dx%d_Conv' % (kernel_size[0], kernel_size[1])
else:
if self.dilation > 1:
conv_str = '%dx%d_DilatedGroupConv' % (kernel_size[0], kernel_size[1])
else:
conv_str = '%dx%d_GroupConv' % (kernel_size[0], kernel_size[1])
conv_str += '_O%d' % self.out_channels
return conv_str
@property
def config(self):
return {
'name': ConvBnActLayer.__name__,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'kernel_size': self.kernel_size,
'stride': self.stride,
'dilation': self.dilation,
'groups': self.groups,
'bias': self.bias,
'use_bn': self.use_bn,
'act_func': self.act_func,
}
@staticmethod
def build_from_config(config):
return ConvBnActLayer(**config)
class IdentityLayer(MyModule):
def __init__(self, ):
super(IdentityLayer, self).__init__()
def forward(self, x):
return x
@property
def module_str(self):
return 'Identity'
@property
def config(self):
return {
'name': IdentityLayer.__name__,
}
@staticmethod
def build_from_config(config):
return IdentityLayer(**config)
class LinearLayer(MyModule):
def __init__(self, in_features, out_features, bias=True):
super(LinearLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
#self.dropout_rate = dropout_rate
#if self.dropout_rate > 0:
# self.dropout = nn.Dropout(self.dropout_rate, inplace=True)
#else:
# self.dropout = None
self.linear = nn.Linear(in_features, out_features, bias)
def forward(self, x):
#if dropout is not None:
# x = self.dropout(x)
return self.linear(x)
@property
def module_str(self):
return '%dx%d_Linear' % (self.in_features, self.out_features)
@property
def config(self):
return {
'name': LinearLayer.__name__,
'in_features': self.in_features,
'out_features': self.out_features,
'bias': self.bias,
#'dropout_rate': self.dropout_rate,
}
@staticmethod
def build_from_config(config):
return LinearLayer(**config)
class ShortcutLayer(MyModule):
def __init__(self, in_channels, out_channels, reduction=1):
super(ShortcutLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.reduction = reduction
self.conv = nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False)
def forward(self, x):
if self.reduction > 1:
padding = 0 if x.size(-1) % 2 == 0 else 1
x = F.avg_pool2d(x, self.reduction, padding=padding)
if self.in_channels != self.out_channels:
x = self.conv(x)
return x
@property
def module_str(self):
if self.in_channels == self.out_channels and self.reduction == 1:
conv_str = 'IdentityShortcut'
else:
if self.reduction == 1:
conv_str = '%d-%d_Shortcut' % (self.in_channels, self.out_channels)
else:
conv_str = '%d-%d_R%d_Shortcut' % (self.in_channels, self.out_channels, self.reduction)
return conv_str
@property
def config(self):
return {
'name': ShortcutLayer.__name__,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'reduction': self.reduction,
}
@staticmethod
def build_from_config(config):
return ShortcutLayer(**config)
class MBInvertedConvLayer(MyModule):
def __init__(self, in_channels, out_channels,
kernel_size=3, stride=1, expand_ratio=6, mid_channels=None, act_func='relu6', use_se=False, channels_per_group=1):
super(MBInvertedConvLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.expand_ratio = expand_ratio
self.mid_channels = mid_channels
self.act_func = act_func
self.use_se = use_se
self.channels_per_group = channels_per_group
if self.mid_channels is None:
feature_dim = round(self.in_channels * self.expand_ratio)
else:
feature_dim = self.mid_channels
if self.expand_ratio == 1:
self.inverted_bottleneck = None
else:
self.inverted_bottleneck = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0, bias=False)),
('bn', nn.BatchNorm2d(feature_dim)),
('act', build_activation(self.act_func, inplace=True)),
]))
assert feature_dim % self.channels_per_group == 0
active_groups = feature_dim // self.channels_per_group
pad = get_same_padding(self.kernel_size)
depth_conv_modules = [
('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=active_groups, bias=False)),
('bn', nn.BatchNorm2d(feature_dim)),
('act', build_activation(self.act_func, inplace=True))
]
if self.use_se:
depth_conv_modules.append(('se', SELayer(feature_dim)))
self.depth_conv = nn.Sequential(OrderedDict(depth_conv_modules))
self.point_linear = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)),
('bn', nn.BatchNorm2d(out_channels)),
]))
def forward(self, x):
if self.inverted_bottleneck:
x = self.inverted_bottleneck(x)
x = self.depth_conv(x)
x = self.point_linear(x)
return x
@property
def module_str(self):
if self.mid_channels is None:
expand_ratio = self.expand_ratio
else:
expand_ratio = self.mid_channels // self.in_channels
layer_str = '%dx%d_MBConv%d_%s' % (self.kernel_size, self.kernel_size, expand_ratio, self.act_func.upper())
if self.use_se:
layer_str = 'SE_' + layer_str
layer_str += '_O%d' % self.out_channels
return layer_str
@property
def config(self):
return {
'name': MBInvertedConvLayer.__name__,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'kernel_size': self.kernel_size,
'stride': self.stride,
'expand_ratio': self.expand_ratio,
'mid_channels': self.mid_channels,
'act_func': self.act_func,
'use_se': self.use_se,
'channels_per_group': self.channels_per_group,
}
@staticmethod
def build_from_config(config):
return MBInvertedConvLayer(**config)
class MobileInvertedResidualBlock(MyModule):
def __init__(self, mobile_inverted_conv, shortcut, drop_connect_rate=0):
super(MobileInvertedResidualBlock, self).__init__()
self.mobile_inverted_conv = mobile_inverted_conv
self.shortcut = shortcut
self.drop_connect_rate = drop_connect_rate
def forward(self, x):
in_channel = x.size(1)
if self.mobile_inverted_conv is None: # or isinstance(self.mobile_inverted_conv, ZeroLayer):
res = x
elif self.shortcut is None: # or isinstance(self.shortcut, ZeroLayer):
res = self.mobile_inverted_conv(x)
else:
im = self.shortcut(x)
x = self.mobile_inverted_conv(x)
if self.drop_connect_rate > 0 and in_channel == im.size(1) and self.shortcut.reduction == 1:
x = drop_connect(x, p=self.drop_connect_rate, training=self.training)
res = x + im
return res
@property
def module_str(self):
return '(%s, %s)' % (
self.mobile_inverted_conv.module_str if self.mobile_inverted_conv is not None else None,
self.shortcut.module_str if self.shortcut is not None else None
)
@property
def config(self):
return {
'name': MobileInvertedResidualBlock.__name__,
'mobile_inverted_conv': self.mobile_inverted_conv.config if self.mobile_inverted_conv is not None else None,
'shortcut': self.shortcut.config if self.shortcut is not None else None,
}
@staticmethod
def build_from_config(config):
mobile_inverted_conv = set_layer_from_config(config['mobile_inverted_conv'])
shortcut = set_layer_from_config(config['shortcut'])
return MobileInvertedResidualBlock(mobile_inverted_conv, shortcut)
| 12,039 | 30.76781 | 131 |
py
|
ENCAS
|
ENCAS-main/networks/modules_alphanet/activations.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import torch
import torch.nn as nn
import torch.nn.functional as F
# A memory-efficient implementation of Swish function
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_tensors[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3., inplace=self.inplace) / 6.
#class Swish(nn.Module):
# def __init__(self, inplace=True):
# super(Swish, self).__init__()
# self.inplace = inplace
#
# def forward(self, x):
# return x * torch.sigmoid(x)
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3., inplace=self.inplace) / 6.
| 1,405 | 24.107143 | 70 |
py
|
ENCAS
|
ENCAS-main/networks/modules_alphanet/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 71 | 35 | 70 |
py
|
ENCAS
|
ENCAS-main/networks/modules_alphanet/dynamic_ops.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
from torch.autograd.function import Function
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch
from torch.nn.modules._functions import SyncBatchNorm as sync_batch_norm
import torch.distributed as dist
from .nn_utils import get_same_padding, make_divisible, sub_filter_start_end
from .static_layers import SELayer
class DynamicSeparableConv2d(nn.Module):
KERNEL_TRANSFORM_MODE = None # None or 1
def __init__(self, max_in_channels, kernel_size_list, stride=1, dilation=1, channels_per_group=1):
super(DynamicSeparableConv2d, self).__init__()
self.max_in_channels = max_in_channels
self.channels_per_group = channels_per_group
assert self.max_in_channels % self.channels_per_group == 0
self.kernel_size_list = kernel_size_list
self.stride = stride
self.dilation = dilation
self.conv = nn.Conv2d(
self.max_in_channels, self.max_in_channels, max(self.kernel_size_list), self.stride,
groups=self.max_in_channels // self.channels_per_group, bias=False,
)
self._ks_set = list(set(self.kernel_size_list))
self._ks_set.sort() # e.g., [3, 5, 7]
if self.KERNEL_TRANSFORM_MODE is not None:
# register scaling parameters
# 7to5_matrix, 5to3_matrix
scale_params = {}
for i in range(len(self._ks_set) - 1):
ks_small = self._ks_set[i]
ks_larger = self._ks_set[i + 1]
param_name = '%dto%d' % (ks_larger, ks_small)
scale_params['%s_matrix' % param_name] = Parameter(torch.eye(ks_small ** 2))
for name, param in scale_params.items():
self.register_parameter(name, param)
self.active_kernel_size = max(self.kernel_size_list)
def get_active_filter(self, in_channel, kernel_size):
out_channel = in_channel
max_kernel_size = max(self.kernel_size_list)
start, end = sub_filter_start_end(max_kernel_size, kernel_size)
filters = self.conv.weight[:out_channel, :in_channel, start:end, start:end]
if self.KERNEL_TRANSFORM_MODE is not None and kernel_size < max_kernel_size:
start_filter = self.conv.weight[:out_channel, :in_channel, :, :] # start with max kernel
for i in range(len(self._ks_set) - 1, 0, -1):
src_ks = self._ks_set[i]
if src_ks <= kernel_size:
break
target_ks = self._ks_set[i - 1]
start, end = sub_filter_start_end(src_ks, target_ks)
_input_filter = start_filter[:, :, start:end, start:end]
_input_filter = _input_filter.contiguous()
_input_filter = _input_filter.view(_input_filter.size(0), _input_filter.size(1), -1)
_input_filter = _input_filter.view(-1, _input_filter.size(2))
_input_filter = F.linear(
_input_filter, self.__getattr__('%dto%d_matrix' % (src_ks, target_ks)),
)
_input_filter = _input_filter.view(filters.size(0), filters.size(1), target_ks ** 2)
_input_filter = _input_filter.view(filters.size(0), filters.size(1), target_ks, target_ks)
start_filter = _input_filter
filters = start_filter
return filters
def forward(self, x, kernel_size=None):
if kernel_size is None:
kernel_size = self.active_kernel_size
in_channel = x.size(1)
assert in_channel % self.channels_per_group == 0
filters = self.get_active_filter(in_channel, kernel_size).contiguous()
padding = get_same_padding(kernel_size)
y = F.conv2d(
x, filters, None, self.stride, padding, self.dilation, in_channel // self.channels_per_group
)
return y
class DynamicPointConv2d(nn.Module):
def __init__(self, max_in_channels, max_out_channels, kernel_size=1, stride=1, dilation=1):
super(DynamicPointConv2d, self).__init__()
self.max_in_channels = max_in_channels
self.max_out_channels = max_out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.conv = nn.Conv2d(
self.max_in_channels, self.max_out_channels, self.kernel_size, stride=self.stride, bias=False,
)
self.active_out_channel = self.max_out_channels
def forward(self, x, out_channel=None):
if out_channel is None:
out_channel = self.active_out_channel
in_channel = x.size(1)
filters = self.conv.weight[:out_channel, :in_channel, :, :].contiguous()
padding = get_same_padding(self.kernel_size)
y = F.conv2d(x, filters, None, self.stride, padding, self.dilation, 1)
return y
class DynamicLinear(nn.Module):
def __init__(self, max_in_features, max_out_features, bias=True):
super(DynamicLinear, self).__init__()
self.max_in_features = max_in_features
self.max_out_features = max_out_features
self.bias = bias
self.linear = nn.Linear(self.max_in_features, self.max_out_features, self.bias)
self.active_out_features = self.max_out_features
def forward(self, x, out_features=None):
if out_features is None:
out_features = self.active_out_features
in_features = x.size(1)
weight = self.linear.weight[:out_features, :in_features].contiguous()
bias = self.linear.bias[:out_features] if self.bias else None
y = F.linear(x, weight, bias)
return y
class AllReduce(Function):
@staticmethod
def forward(ctx, input):
input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())]
# Use allgather instead of allreduce since I don't trust in-place operations ..
dist.all_gather(input_list, input, async_op=False)
inputs = torch.stack(input_list, dim=0)
return torch.sum(inputs, dim=0)
@staticmethod
def backward(ctx, grad_output):
dist.all_reduce(grad_output, async_op=False)
return grad_output
class DynamicBatchNorm2d(nn.Module):
'''
1. doesn't acculate bn statistics, (momentum=0.)
2. calculate BN statistics of all subnets after training
3. bn weights are shared
https://arxiv.org/abs/1903.05134
https://detectron2.readthedocs.io/_modules/detectron2/layers/batch_norm.html
'''
#SET_RUNNING_STATISTICS = False
def __init__(self, max_feature_dim):
super(DynamicBatchNorm2d, self).__init__()
self.max_feature_dim = max_feature_dim
self.bn = nn.BatchNorm2d(self.max_feature_dim)
#self.exponential_average_factor = 0 #doesn't acculate bn stats
self.need_sync = False
# reserved to tracking the performance of the largest and smallest network
self.bn_tracking = nn.ModuleList(
[
nn.BatchNorm2d(self.max_feature_dim, affine=False),
nn.BatchNorm2d(self.max_feature_dim, affine=False)
]
)
def forward(self, x):
feature_dim = x.size(1)
if not self.training:
raise ValueError('DynamicBN only supports training')
bn = self.bn
# need_sync
if not self.need_sync:
return F.batch_norm(
x, bn.running_mean[:feature_dim], bn.running_var[:feature_dim], bn.weight[:feature_dim],
bn.bias[:feature_dim], bn.training or not bn.track_running_stats,
bn.momentum, bn.eps,
)
else:
assert dist.get_world_size() > 1, 'SyncBatchNorm requires >1 world size'
B, C = x.shape[0], x.shape[1]
mean = torch.mean(x, dim=[0, 2, 3])
meansqr = torch.mean(x * x, dim=[0, 2, 3])
assert B > 0, 'does not support zero batch size'
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
invstd = torch.rsqrt(var + bn.eps)
scale = bn.weight[:feature_dim] * invstd
bias = bn.bias[:feature_dim] - mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
#if bn.num_features == feature_dim or DynamicBatchNorm2d.SET_RUNNING_STATISTICS:
# return bn(x)
#else:
# exponential_average_factor = 0.0
# if bn.training and bn.track_running_stats:
# # if statement only here to tell the jit to skip emitting this when it is None
# if bn.num_batches_tracked is not None:
# bn.num_batches_tracked += 1
# if bn.momentum is None: # use cumulative moving average
# exponential_average_factor = 1.0 / float(bn.num_batches_tracked)
# else: # use exponential moving average
# exponential_average_factor = bn.momentum
# return F.batch_norm(
# x, bn.running_mean[:feature_dim], bn.running_var[:feature_dim], bn.weight[:feature_dim],
# bn.bias[:feature_dim], bn.training or not bn.track_running_stats,
# exponential_average_factor, bn.eps,
# )
class DynamicSE(SELayer):
def __init__(self, max_channel):
super(DynamicSE, self).__init__(max_channel)
def forward(self, x):
in_channel = x.size(1)
num_mid = make_divisible(in_channel // self.reduction, divisor=8)
y = x.mean(3, keepdim=True).mean(2, keepdim=True)
# reduce
reduce_conv = self.fc.reduce
reduce_filter = reduce_conv.weight[:num_mid, :in_channel, :, :].contiguous()
reduce_bias = reduce_conv.bias[:num_mid] if reduce_conv.bias is not None else None
y = F.conv2d(y, reduce_filter, reduce_bias, 1, 0, 1, 1)
# relu
y = self.fc.relu(y)
# expand
expand_conv = self.fc.expand
expand_filter = expand_conv.weight[:in_channel, :num_mid, :, :].contiguous()
expand_bias = expand_conv.bias[:in_channel] if expand_conv.bias is not None else None
y = F.conv2d(y, expand_filter, expand_bias, 1, 0, 1, 1)
# hard sigmoid
y = self.fc.h_sigmoid(y)
return x * y
| 10,816 | 39.211896 | 106 |
py
|
ENCAS
|
ENCAS-main/networks/modules_alphanet/nn_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import math
import torch
import torch.nn as nn
try:
from fvcore.common.file_io import PathManager
except:
pass
class MyModule(nn.Module):
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
class MyNetwork(MyModule):
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
def zero_last_gamma(self):
raise NotImplementedError
""" implemented methods """
def set_bn_param(self, momentum, eps):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.SyncBatchNorm):
if momentum is not None:
m.momentum = float(momentum)
else:
m.momentum = None
m.eps = float(eps)
return
def get_bn_param(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.SyncBatchNorm):
return {
'momentum': m.momentum,
'eps': m.eps,
}
return None
def init_model(self, model_init):
""" Conv2d, BatchNorm2d, BatchNorm1d, Linear, """
for m in self.modules():
if isinstance(m, nn.Conv2d):
if model_init == 'he_fout':
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif model_init == 'he_fin':
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
else:
raise NotImplementedError
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.uniform_(-stdv, stdv)
if m.bias is not None:
m.bias.data.zero_()
def get_parameters(self, keys=None, mode='include', exclude_set=None):
if exclude_set is None:
exclude_set = {}
if keys is None:
for name, param in self.named_parameters():
if name not in exclude_set:
yield param
elif mode == 'include':
for name, param in self.named_parameters():
flag = False
for key in keys:
if key in name:
flag = True
break
if flag and name not in exclude_set:
yield param
elif mode == 'exclude':
for name, param in self.named_parameters():
flag = True
for key in keys:
if key in name:
flag = False
break
if flag and name not in exclude_set:
yield param
else:
raise ValueError('do not support: %s' % mode)
def weight_parameters(self, exclude_set=None):
return self.get_parameters(exclude_set=exclude_set)
def load_weights_from_pretrained_models(self, checkpoint_path, load_from_ema=False):
try:
with PathManager.open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f, map_location='cpu')
except:
with open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f, map_location='cpu')
assert isinstance(checkpoint, dict)
pretrained_state_dicts = checkpoint['state_dict']
if load_from_ema and 'state_dict_ema' in checkpoint:
pretrained_state_dicts = checkpoint['state_dict_ema']
for k, v in self.state_dict().items():
name = k
if not load_from_ema:
name = 'module.' + k if not k.startswith('module') else k
v.copy_(pretrained_state_dicts[name])
| 4,767 | 30.576159 | 113 |
py
|
ENCAS
|
ENCAS-main/networks/modules_alphanet/nn_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import torch.nn as nn
from .activations import *
def make_divisible(v, divisor=8, min_value=1):
"""
forked from slim:
https://github.com/tensorflow/models/blob/\
0344c5503ee55e24f0de7f37336a6e08f10976fd/\
research/slim/nets/mobilenet/mobilenet.py#L62-L69
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def sub_filter_start_end(kernel_size, sub_kernel_size):
center = kernel_size // 2
dev = sub_kernel_size // 2
start, end = center - dev, center + dev + 1
assert end - start == sub_kernel_size
return start, end
def get_net_device(net):
return net.parameters().__next__().device
def int2list(val, repeat_time=1):
if isinstance(val, list):
return val
elif isinstance(val, tuple):
return list(val)
else:
return [val for _ in range(repeat_time)]
def get_same_padding(kernel_size):
if isinstance(kernel_size, tuple):
assert len(kernel_size) == 2, 'invalid kernel size: %s' % kernel_size
p1 = get_same_padding(kernel_size[0])
p2 = get_same_padding(kernel_size[1])
return p1, p2
assert isinstance(kernel_size, int), 'kernel size should be either `int` or `tuple`'
assert kernel_size % 2 > 0, 'kernel size should be odd number'
return kernel_size // 2
def copy_bn(target_bn, src_bn):
feature_dim = target_bn.num_features
target_bn.weight.data.copy_(src_bn.weight.data[:feature_dim])
target_bn.bias.data.copy_(src_bn.bias.data[:feature_dim])
target_bn.running_mean.data.copy_(src_bn.running_mean.data[:feature_dim])
target_bn.running_var.data.copy_(src_bn.running_var.data[:feature_dim])
def build_activation(act_func, inplace=True):
if act_func == 'relu':
return nn.ReLU(inplace=inplace)
elif act_func == 'relu6':
return nn.ReLU6(inplace=inplace)
elif act_func == 'tanh':
return nn.Tanh()
elif act_func == 'sigmoid':
return nn.Sigmoid()
elif act_func == 'h_swish':
return Hswish(inplace=inplace)
elif act_func == 'h_sigmoid':
return Hsigmoid(inplace=inplace)
elif act_func == 'swish':
return MemoryEfficientSwish()
elif act_func is None:
return None
else:
raise ValueError('do not support: %s' % act_func)
def drop_connect(inputs, p, training):
"""Drop connect.
Args:
input (tensor: BCWH): Input of this structure.
p (float: 0.0~1.0): Probability of drop connection.
training (bool): The running mode.
Returns:
output: Output after drop connection.
"""
assert 0 <= p <= 1, 'p must be in range of [0,1]'
if not training:
return inputs
batch_size = inputs.shape[0]
keep_prob = 1.0 - p
# generate binary_tensor mask according to probability (p for 0, 1-p for 1)
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
| 3,431 | 29.918919 | 96 |
py
|
ENCAS
|
ENCAS-main/run_manager/run_manager_my.py
|
from collections import defaultdict
import time
import torch.nn as nn
import torch.nn.parallel
import torch.optim
from sklearn.metrics import balanced_accuracy_score
from tqdm import tqdm
import torchvision
from ofa.utils import AverageMeter, accuracy
class RunManagerMy:
def __init__(self, net, run_config, no_gpu=False, sec_obj='flops'):
self.device = torch.device('cuda:0' if torch.cuda.is_available() and (not no_gpu) else 'cpu')
self.is_ensemble = isinstance(net, list)
if self.is_ensemble:
for net_i in net:
net_i.to(self.device)
else:
net.to(self.device)
self.accuracy = accuracy
self.get_scalar_from_accuracy = lambda acc: acc[0].item()
self.if_enough_vram = False
self.sec_obj = sec_obj
self.run_config = run_config
self.test_criterion = nn.CrossEntropyLoss()
def update_metric(self, metric_dict, output, labels):
acc1 = self.accuracy(output, labels, topk=(1,))
acc1 = self.get_scalar_from_accuracy(acc1)
metric_dict['top1'].update(acc1, output.size(0))
def validate(self, is_test=False, net=None, data_loader=None, no_logs=False,
if_return_outputs=False, resolutions_list=None, thresholds=None, if_return_logit_gaps=False,
if_use_logit_gaps=False):
assert not(if_use_logit_gaps and if_return_logit_gaps) # they aren't really mutually exclusive, but it's simpler this way
if if_return_outputs:
outputs_to_return = []
if if_return_logit_gaps:
logit_gaps_to_return = []
if data_loader is None:
if is_test:
data_loader = self.run_config.test_loader
else:
data_loader = self.run_config.valid_loader
if not self.is_ensemble:
net.eval()
else:
for net_cur in net:
net_cur.eval()
losses = AverageMeter()
metric_dict = defaultdict(lambda: AverageMeter())
if_cascade = thresholds is not None
if if_cascade:
n_not_predicted_per_stage = [0 for _ in range(len(net) - 1)]
with torch.no_grad(), torch.cuda.amp.autocast():
with tqdm(total=len(data_loader), desc='Evaluate ', disable=no_logs) as t:
st = time.time()
for i, (images, labels, *_) in enumerate(data_loader):
images, labels = images.to(self.device), labels.to(self.device)
images_orig = None # don't make a backup unless I need to
if not self.is_ensemble:
output = net(images)
else:
out_logits = net[0](images)
output = torch.nn.functional.softmax(out_logits, dim=1)
if if_return_logit_gaps or if_use_logit_gaps:
# it only make sense to store the logit gaps if this is a separate network, not an ensemble/cascade
two_max_values = out_logits.topk(k=2, dim=-1).values
logit_gap = two_max_values[:, 0] - two_max_values[:, 1]
if if_cascade:
idx_more_predictions_needed = torch.ones(images.shape[0], dtype=torch.bool)
i_net = 1
for net_cur in net[1:]:
if if_cascade:
cur_threshold = thresholds[i_net - 1]
if if_use_logit_gaps:
idx_more_predictions_needed[logit_gap >= cur_threshold] = False
else:
idx_more_predictions_needed[torch.max(output, dim=1).values >= cur_threshold] = False
output_tmp = output[idx_more_predictions_needed]
if len(output_tmp) == 0:
n_not_predicted = 0
else:
if if_use_logit_gaps:
logit_gap_tmp = logit_gap[idx_more_predictions_needed]
not_predicted_idx = logit_gap_tmp < cur_threshold
else:
not_predicted_idx = torch.max(output_tmp, dim=1).values < cur_threshold
n_not_predicted = torch.sum(not_predicted_idx).item()
n_not_predicted_per_stage[i_net - 1] += n_not_predicted
if n_not_predicted == 0:
break
if resolutions_list is not None:
if resolutions_list[i_net] != resolutions_list[i_net - 1]:
if images_orig is None:
images_orig = torch.clone(images)
r = resolutions_list[i_net]
images = torchvision.transforms.functional.resize(images_orig, (r, r))
if not if_cascade:
output_cur = torch.nn.functional.softmax(net_cur(images), dim=1)
else:
out_logits = net_cur(images[idx_more_predictions_needed][not_predicted_idx])
if len(out_logits.shape) < 2: #a single image is left in the batch, need to fix dim
out_logits = out_logits[None,...]
output_cur = torch.nn.functional.softmax(out_logits, dim=1)
if not if_cascade:
output += output_cur
else:
if if_use_logit_gaps:
# firstly, need to overwrite previous predictions (because they didn't really happen if the gap was too small)
output_tmp[not_predicted_idx] = output_cur
output[idx_more_predictions_needed] = output_tmp
# secondly, need to update the logit gap
two_max_values = out_logits.topk(k=2, dim=-1).values
logit_gap_tmp[not_predicted_idx] = two_max_values[:, 0] - two_max_values[:, 1]
# note that the gap for the previously predicted values will be wrong, but it doesn't matter
# because the idx for them has already been set to False
logit_gap[idx_more_predictions_needed] = logit_gap_tmp
else:
n_nets_used_in_cascade = i_net + 1
coeff1 = ((n_nets_used_in_cascade - 1) / n_nets_used_in_cascade)
coeff2 = (1 / n_nets_used_in_cascade)
output_tmp[not_predicted_idx] = coeff1 * output_tmp[not_predicted_idx] \
+ coeff2 * output_cur #don't need idx here because had it for images passed to the net_cur
output[idx_more_predictions_needed] = output_tmp
i_net += 1
if not if_cascade:
output /= len(net)
if if_return_outputs:
outputs_to_return.append(output.detach().cpu())
if if_return_logit_gaps:
logit_gaps_to_return.append(logit_gap.detach().cpu())
loss = self.test_criterion(output, labels)
self.update_metric(metric_dict, output, labels)
losses.update(loss.item(), images.size(0))
t.set_postfix({'loss': losses.avg,
**self.get_metric_vals(metric_dict),
'img_size': images.size(2)})
t.update(1)
ed = time.time()
print(f'Forward time {ed - st}')
dict_to_return = self.get_metric_vals(metric_dict)
if if_return_outputs:
outputs_to_return = torch.cat(outputs_to_return, dim=0).numpy()
dict_to_return['output_distr'] = outputs_to_return
if if_return_logit_gaps:
logit_gaps_to_return = torch.cat(logit_gaps_to_return, dim=0).numpy()
dict_to_return['logit_gaps'] = logit_gaps_to_return
if if_cascade:
dict_to_return['n_not_predicted_per_stage'] = n_not_predicted_per_stage
return losses.avg, dict_to_return
def get_metric_vals(self, metric_dict):
return {key: metric_dict[key].avg for key in metric_dict}
| 9,055 | 50.748571 | 155 |
py
|
ENCAS
|
ENCAS-main/run_manager/run_config_my.py
|
import math
from ofa.imagenet_classification.run_manager import RunConfig
from ofa.utils import calc_learning_rate
class RunConfigMy(RunConfig):
def __init__(self, n_epochs, init_lr, lr_schedule_type, lr_schedule_param, dataset, train_batch_size,
test_batch_size, valid_size, opt_type, opt_param, weight_decay, label_smoothing, no_decay_keys,
mixup_alpha, model_init, validation_frequency, print_frequency, total_epochs):
super().__init__(n_epochs, init_lr, lr_schedule_type, lr_schedule_param, dataset, train_batch_size,
test_batch_size, valid_size, opt_type, opt_param, weight_decay, label_smoothing, no_decay_keys,
mixup_alpha, model_init, validation_frequency, print_frequency)
self.total_epochs = total_epochs
def copy(self):
return RunConfigMy(**self.config)
""" learning rate """
def adjust_learning_rate(self, optimizer, epoch, batch=0, nBatch=None, epoch_cumulative=None,
n_epochs_in_block_dynamic=None, n_epochs_in_block=None):
""" adjust learning of a given optimizer and return the new learning rate """
if self.lr_schedule_type == 'cosine_nocycle':
# cosine anneal to 0 over all the epochs
t_total = self.total_epochs * nBatch
t_cur = epoch_cumulative * nBatch + batch
new_lr = 0.5 * self.init_lr * (1 + math.cos(math.pi * t_cur / t_total))
else:
new_lr = calc_learning_rate(epoch, self.init_lr, self.n_epochs, batch, nBatch, self.lr_schedule_type)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
return new_lr
def random_sub_train_loader(self, n_images, batch_size, img_size, num_worker=None, num_replicas=None, rank=None):
return self.data_provider.build_sub_train_loader(n_images, batch_size, img_size, num_worker, num_replicas, rank)
| 1,958 | 50.552632 | 120 |
py
|
ENCAS
|
ENCAS-main/run_manager/__init__.py
|
from data_providers.imagenet import *
from data_providers.cifar import CIFAR10DataProvider, CIFAR100DataProvider
from ofa.imagenet_classification.run_manager.run_config import RunConfig
from run_manager.run_config_my import RunConfigMy
class ImagenetRunConfig(RunConfig):
def __init__(self, n_epochs=1, init_lr=1e-4, lr_schedule_type='cosine', lr_schedule_param=None,
dataset='imagenet', train_batch_size=128, test_batch_size=512, valid_size=None,
opt_type='sgd', opt_param=None, weight_decay=4e-5, label_smoothing=0.0, no_decay_keys=None,
mixup_alpha=None,
model_init='he_fout', validation_frequency=1, print_frequency=10,
n_worker=32, resize_scale=0.08, distort_color='tf', image_size=224,
data_path='/mnt/datastore/ILSVRC2012',
**kwargs):
super(ImagenetRunConfig, self).__init__(
n_epochs, init_lr, lr_schedule_type, lr_schedule_param,
dataset, train_batch_size, test_batch_size, valid_size,
opt_type, opt_param, weight_decay, label_smoothing, no_decay_keys,
mixup_alpha,
model_init, validation_frequency, print_frequency
)
self.n_worker = n_worker
self.resize_scale = resize_scale
self.distort_color = distort_color
self.image_size = image_size
self.imagenet_data_path = data_path
self.kwargs = kwargs
@property
def data_provider(self):
if self.__dict__.get('_data_provider', None) is None:
if self.dataset == ImagenetDataProvider.name():
DataProviderClass = ImagenetDataProvider
else:
raise NotImplementedError
self.__dict__['_data_provider'] = DataProviderClass(
save_path=self.imagenet_data_path,
train_batch_size=self.train_batch_size, test_batch_size=self.test_batch_size,
valid_size=self.valid_size, n_worker=self.n_worker, resize_scale=self.resize_scale,
distort_color=self.distort_color, image_size=self.image_size, **self.kwargs
)
return self.__dict__['_data_provider']
class CIFARRunConfig(RunConfigMy):
def __init__(self, n_epochs=5, init_lr=0.01, lr_schedule_type='cosine', lr_schedule_param=None,
dataset='cifar10', train_batch_size=96, test_batch_size=256, valid_size=None,
opt_type='sgd', opt_param=None, weight_decay=4e-5, label_smoothing=0.0, no_decay_keys=None,
mixup_alpha=None,
model_init='he_fout', validation_frequency=1, print_frequency=10,
n_worker=2, resize_scale=0.08, distort_color=None, image_size=224,
data_path='/mnt/datastore/CIFAR',
**kwargs):
super(CIFARRunConfig, self).__init__(
n_epochs, init_lr, lr_schedule_type, lr_schedule_param,
dataset, train_batch_size, test_batch_size, valid_size,
opt_type, opt_param, weight_decay, label_smoothing, no_decay_keys,
mixup_alpha,
model_init, validation_frequency, print_frequency, kwargs['total_epochs']
)
self.n_worker = n_worker
self.resize_scale = resize_scale
self.distort_color = distort_color
self.image_size = image_size
self.cifar_data_path = data_path
self.kwargs = kwargs
@property
def data_provider(self):
if self.__dict__.get('_data_provider', None) is None:
if self.dataset == CIFAR10DataProvider.name():
DataProviderClass = CIFAR10DataProvider
elif self.dataset == CIFAR100DataProvider.name():
DataProviderClass = CIFAR100DataProvider
else:
raise NotImplementedError
self.__dict__['_data_provider'] = DataProviderClass(
save_path=self.cifar_data_path,
train_batch_size=self.train_batch_size, test_batch_size=self.test_batch_size,
valid_size=self.valid_size, n_worker=self.n_worker, resize_scale=self.resize_scale,
distort_color=self.distort_color, image_size=self.image_size, **self.kwargs
)
return self.__dict__['_data_provider']
def get_run_config(**kwargs):
if 'init_lr' in kwargs:
if kwargs['init_lr'] is None: # use dataset-specific init_lr by default
del kwargs['init_lr']
if kwargs['dataset'] == 'imagenet':
run_config = ImagenetRunConfig(**kwargs)
elif kwargs['dataset'].startswith('cifar'):
run_config = CIFARRunConfig(**kwargs)
else:
raise NotImplementedError
return run_config
| 4,715 | 45.693069 | 108 |
py
|
ENCAS
|
ENCAS-main/acc_predictor/predictor_container.py
|
import numpy as np
class PredictorContainer:
'''
Contains several predictors
'''
def __init__(self, predictors, name, **kwargs) -> None:
self.predictors = predictors
self.name = name
self.predictor_input_keys = kwargs.get('predictor_input_keys', None)
def fit(self, X, y, **kwargs):
raise NotImplementedError('predictors assumed to be diverse => need to be fitted separately & in advance')
def predict(self, X):
if self.predictor_input_keys is None: #inputs are the same for all predictors
preds = [p.predict(X) for p in self.predictors]
else:
preds = [p.predict(X[p_key] if not type(p_key) is list else [X[p_key_i] for p_key_i in p_key])
for p, p_key in zip(self.predictors, self.predictor_input_keys)]
predictions = np.concatenate(preds, axis=1)
return predictions
| 904 | 36.708333 | 114 |
py
|
ENCAS
|
ENCAS-main/acc_predictor/rbf_ensemble.py
|
"""
Implementation based on the one provided by the NAT team, their original comment below:
The Ensemble scheme is based on the implementation from:
https://github.com/yn-sun/e2epp/blob/master/build_predict_model.py
https://github.com/HandingWang/RF-CMOCO
"""
import numpy as np
from acc_predictor.rbf import RBF
class RBFEnsemble:
def __init__(self, ensemble_size=500, alphabet=None, alphabet_lb=None, **kwargs) -> None:
self.n_models = ensemble_size
self.verbose = True
self.alphabet = alphabet
self.alphabet_lb = alphabet_lb
self.name = 'rbf_ensemble'
self.models = None
self.features = None
self.model_predictions = np.zeros(self.n_models)
def fit(self, X, y, **kwargs):
n, m = X.shape
features = []
models = []
if self.verbose:
print(f"Constructing RBF ensemble surrogate model with sample size = {n}, ensemble size = {self.n_models}")
for i in range(self.n_models):
sample_idx = np.arange(n)
np.random.shuffle(sample_idx)
X = X[sample_idx, :]
y = y[sample_idx]
feature_idx = np.arange(m)
np.random.shuffle(feature_idx)
n_feature = np.random.randint(1, m + 1)
selected_feature_ids = feature_idx[0:n_feature]
X_selected = X[:, selected_feature_ids]
# rbf fails if there are fewer training points than features => check & resample if needed
idx_unique = np.unique(X_selected, axis=0, return_index=True)[1]
while len(idx_unique) <= n_feature or len(idx_unique) == 1:
feature_idx = np.arange(m)
np.random.shuffle(feature_idx)
n_feature = np.random.randint(1, m + 1)
selected_feature_ids = feature_idx[0:n_feature]
X_selected = X[:, selected_feature_ids]
idx_unique = np.unique(X_selected, axis=0, return_index=True)[1]
features.append(selected_feature_ids)
rbf = RBF(kernel='cubic', tail='linear',
alphabet=self.alphabet[selected_feature_ids], alphabet_lb=self.alphabet_lb[selected_feature_ids])
rbf.fit(X_selected, y)
models.append(rbf)
if self.models is not None:
del self.models
if self.features is not None:
del self.features
self.models = models
self.features = features
def predict(self, X):
n = len(X)
y = np.zeros(n)
for i in range(n):
this_test_data = X[i, :]
for j, (rbf, feature) in enumerate(zip(self.models, self.features)):
self.model_predictions[j] = rbf.predict(this_test_data[feature][np.newaxis, :])[0]
y[i] = np.nanmedian(self.model_predictions)
return y[:, None]
| 2,879 | 34.555556 | 119 |
py
|
ENCAS
|
ENCAS-main/acc_predictor/predictor_subsets.py
|
import numpy as np
class PredictorSubsets:
'''
Contains several predictors, with each operating on a subset of the input. Outputs are averaged.
'''
def __init__(self, predictor_class, input_sizes, alphabet, alphabet_lb, **kwargs) -> None:
self.n_predictors = len(input_sizes)
self.input_sizes = input_sizes
self.predictors = []
input_sizes_cumsum = np.cumsum(input_sizes)
self.input_ranges = list(zip([0] + list(input_sizes_cumsum[:-1]), input_sizes_cumsum))
alphabets = [alphabet[s:e] for s, e in self.input_ranges]
alphabets_lb = [alphabet_lb[s:e] for s, e in self.input_ranges]
for input_size, a, a_lb in zip(input_sizes, alphabets, alphabets_lb):
p = predictor_class(alphabet=a, alphabet_lb=a_lb, **kwargs)
self.predictors.append(p)
self.name = 'rbf_ensemble_per_ensemble_member'
def fit(self, X, y, **kwargs):
# assume both X and y are lists with the same lengths as the number of predictors
for i, p in enumerate(self.predictors):
p.fit(X[i], y[i], **kwargs)
def predict(self, X):
X_sep = [X[:, s:e] for s, e in self.input_ranges]
out = None
for x, p in zip(X_sep, self.predictors):
preds = p.predict(x)
if out is None:
out = preds
else:
out += preds
out /= len(self.predictors)
return out
| 1,457 | 37.368421 | 100 |
py
|
ENCAS
|
ENCAS-main/acc_predictor/rbf.py
|
from pySOT.surrogate import RBFInterpolant, CubicKernel, TPSKernel, LinearTail, ConstantTail
import numpy as np
class RBF:
""" Radial Basis Function """
def __init__(self, kernel='cubic', tail='linear', alphabet=None, alphabet_lb=None):
self.kernel = kernel
self.tail = tail
self.name = 'rbf'
self.model = None
self.alphabet = alphabet
self.alphabet_lb = alphabet_lb
def fit(self, train_data, train_label):
if self.kernel == 'cubic':
kernel = CubicKernel
elif self.kernel == 'tps':
kernel = TPSKernel
else:
raise NotImplementedError("unknown RBF kernel")
if self.tail == 'linear':
tail = LinearTail
elif self.tail == 'constant':
tail = ConstantTail
else:
raise NotImplementedError("unknown RBF tail")
idx_unique = np.unique(train_data, axis=0, return_index=True)[1]
self.model = RBFInterpolant(dim=train_data.shape[1], kernel=kernel(), tail=tail(train_data.shape[1]),
lb=self.alphabet_lb, ub=self.alphabet)
for i in range(len(train_data[idx_unique, :])):
self.model.add_points(train_data[idx_unique, :][i, :], train_label[idx_unique][i])
def predict(self, test_data):
test_data = np.array(test_data)
assert len(test_data.shape) == 2
return self.model.predict(test_data)
| 1,458 | 33.738095 | 109 |
py
|
ENCAS
|
ENCAS-main/acc_predictor/predictor_subsets_combo_cascade.py
|
import numpy as np
class PredictorSubsetsComboCascade:
'''
Contains several base predictors, with each operating on a subset of the input.
A meta-predictor combines their outputs.
'''
def __init__(self, predictor_class, predictor_final, input_sizes, alphabet, alphabet_lb, **kwargs) -> None:
self.n_predictors = len(input_sizes)
self.input_sizes = input_sizes
self.predictors = []
self.predictor_final = predictor_final
input_sizes_cumsum = np.cumsum(input_sizes)
self.input_ranges = list(zip([0] + list(input_sizes_cumsum[:-1]), input_sizes_cumsum))
alphabets = [alphabet[s:e] for s, e in self.input_ranges]
alphabets_lb = [alphabet_lb[s:e] for s, e in self.input_ranges]
for input_size, a, a_lb in zip(input_sizes, alphabets, alphabets_lb):
p = predictor_class(alphabet=a, alphabet_lb=a_lb, **kwargs)
self.predictors.append(p)
self.name = 'rbf_ensemble_per_ensemble_member_combo_cascade'
def fit(self, X, targets, **kwargs):
targets_sep, targets_ens = targets['metrics_sep'], targets['metrics_ens']
# assume both X and y are lists with the same lengths as the number of predictors
for i, p in enumerate(self.predictors):
p.fit(X[i], targets_sep[i], **kwargs)
targets_sep_stacked = np.stack(targets_sep, axis=1)
positions_and_thresholds = kwargs['inputs_additional']['inputs_for_flops'][:, :-self.n_predictors]
acc_sep_and_pos_and_thr = np.hstack((targets_sep_stacked, positions_and_thresholds))
self.predictor_final.fit(acc_sep_and_pos_and_thr, targets_ens)
def predict(self, X):
X_for_acc, X_for_flops = X
X_sep = [X_for_acc[:, s:e] for s, e in self.input_ranges]
out = []
for x, p in zip(X_sep, self.predictors):
preds = p.predict(x)
out.append(preds)
out = np.hstack(out)
pos_and_thr = X_for_flops[:, :-self.n_predictors] # n_samples, N positions + N thresholds
acc_sep_and_pos_and_thr = np.hstack((out, pos_and_thr))
res = self.predictor_final.predict(acc_sep_and_pos_and_thr)
return res
| 2,195 | 46.73913 | 111 |
py
|
ENCAS
|
ENCAS-main/acc_predictor/factory.py
|
import numpy as np
from acc_predictor.predictor_container import PredictorContainer
from acc_predictor.predictor_subsets import PredictorSubsets
from acc_predictor.predictor_subsets_combo_cascade import PredictorSubsetsComboCascade
from acc_predictor.rbf import RBF
from acc_predictor.rbf_ensemble import RBFEnsemble
def get_acc_predictor(model, inputs, targets, alphabet, alphabet_lb, **kwargs):
ensemble_size = kwargs.get('ensemble_size', 500)
if model == 'rbf':
predictor = RBF(alphabet=alphabet, alphabet_lb=alphabet_lb)
predictor.fit(inputs, targets)
elif model == 'rbf_ensemble':
predictor = RBFEnsemble(ensemble_size=ensemble_size, alphabet=alphabet, alphabet_lb=alphabet_lb)
predictor.fit(inputs, targets)
elif model == 'rbf_ensemble_per_ensemble_member_cascade': # need to predict flops
input_sizes = [x.shape[1] for x in inputs]
acc_predictor = PredictorSubsets(RBFEnsemble, input_sizes, alphabet, alphabet_lb, ensemble_size=ensemble_size)
acc_predictor.fit(inputs, targets['metrics_sep'])
flops_predictor = RBFEnsemble(ensemble_size=ensemble_size,
alphabet=kwargs['inputs_additional']['inputs_for_flops_alphabet'],
alphabet_lb=kwargs['inputs_additional']['inputs_for_flops_alphabet_lb'])
flops_predictor.fit(kwargs['inputs_additional']['inputs_for_flops'], targets['flops_cascade'])
predictor = PredictorContainer([acc_predictor, flops_predictor], 'rbf_ensemble_per_ensemble_member_cascade',
predictor_input_keys=['for_acc', 'for_flops'])
elif model == 'rbf_ensemble_per_ensemble_member_cascade_combo': # need to predict flops; predict acc not by averaging but by another predictor
input_sizes = [x.shape[1] for x in inputs]
n_supernets = len(input_sizes)
flops_alphabet = kwargs['inputs_additional']['inputs_for_flops_alphabet']
flops_alphabet_lb = kwargs['inputs_additional']['inputs_for_flops_alphabet_lb']
# this predictor takes N predicted errors, N positions, N thresholds
# I wanna create the alphabets & stuff from what we already have, i.e. alphabets for flops,
# they contain N positions, then N thresholds, then N flops, ergo:
alphabet_pos_and_thr = flops_alphabet[:-n_supernets]
alphabet_lb_pos_and_thr = flops_alphabet_lb[:-n_supernets]
combo_alphabet = np.concatenate([np.array([100] * n_supernets), alphabet_pos_and_thr])
combo_alphabet_lb = np.concatenate([np.array([0] * n_supernets), alphabet_lb_pos_and_thr])
acc_predictor_combo = RBFEnsemble(ensemble_size=ensemble_size, alphabet=combo_alphabet, alphabet_lb=combo_alphabet_lb)
acc_predictor = PredictorSubsetsComboCascade(RBFEnsemble, acc_predictor_combo,
input_sizes, alphabet, alphabet_lb, ensemble_size=ensemble_size)
acc_predictor.fit(inputs, targets, inputs_additional=kwargs['inputs_additional'])
flops_predictor = RBFEnsemble(ensemble_size=ensemble_size, alphabet=flops_alphabet, alphabet_lb=flops_alphabet_lb)
flops_predictor.fit(kwargs['inputs_additional']['inputs_for_flops'], targets['flops_cascade'])
predictor = PredictorContainer([acc_predictor, flops_predictor], 'rbf_ensemble_per_ensemble_member_cascade_combo',
predictor_input_keys=[['for_acc', 'for_flops'], 'for_flops'])
else:
raise NotImplementedError
return predictor
| 3,602 | 54.430769 | 146 |
py
|
ENCAS
|
ENCAS-main/after_search/symlink_imagenet.py
|
import glob
import os
import utils
def create_symlinks(experiment_path, **kwargs):
nsga_path = utils.NAT_LOGS_PATH
full_path = os.path.join(nsga_path, experiment_path)
files_to_symlink_all = ['supernet_w1.0', 'supernet_w1.2', 'ofa_proxyless_d234_e346_k357_w1.3',
'attentive_nas_pretrained.pth.tar', 'alphanet_pretrained.pth.tar']
files_to_symlink_actual = []
for f in files_to_symlink_all:
if os.path.exists(os.path.join(full_path, f)):
files_to_symlink_actual.append(f)
for path in glob.glob(os.path.join(full_path, "iter_*/")):
for f in files_to_symlink_actual:
try:
os.symlink(os.path.join(full_path, f), os.path.join(path, f))
except FileExistsError:
pass
if __name__ == '__main__':
utils.execute_func_for_all_runs_and_combine('imagenet_v3_alpha_sep', create_symlinks)
| 912 | 37.041667 | 98 |
py
|
ENCAS
|
ENCAS-main/after_search/store_outputs.py
|
import json
import os
from concurrent.futures import ProcessPoolExecutor
from pathlib import Path
import numpy as np
import yaml
import glob
import utils
from utils import save_gz
from utils_pareto import get_best_pareto_up_and_including_iter
from evaluate import evaluate_many_configs
def store_cumulative_pareto_front_outputs_single_run(experiment_path, dataset_type='test', **kwargs):
max_iter = kwargs.get('max_iter', 15)
if_store_float16 = True
save_path_base = experiment_path
swa = kwargs.get('swa', None)
postfix = kwargs.get('postfix', '')
if swa is not None:
postfix = f'_swa{swa}' + postfix
if_store_logit_gaps = kwargs.get('if_store_logit_gaps', True)
out_folder_path = os.path.join(save_path_base, f'output_distrs_{dataset_type}{postfix}')
if os.path.exists(out_folder_path) and not kwargs.get('overwrite', True):
print('Skipping')
return
Path(out_folder_path).mkdir(exist_ok=True)
process_pool = ProcessPoolExecutor(max_workers=1)
info_dict_path = os.path.join(out_folder_path, f'info.json')
futures = []
cnt = 0
if if_store_logit_gaps:
out_folder_path_logit_gaps = os.path.join(save_path_base, f'logit_gaps_{dataset_type}{postfix}')
Path(out_folder_path_logit_gaps).mkdir(exist_ok=True)
msunas_config_path = os.path.join(utils.NAT_LOGS_PATH, experiment_path, 'config_msunas.yml')
msunas_config = yaml.safe_load(open(msunas_config_path, 'r'))
search_space_name = msunas_config.get('search_space', 'ofa')
n_iters = len(glob.glob(os.path.join(utils.NAT_LOGS_PATH, experiment_path, "iter_*.stats")))
if_post_hoc_ensemble = 'posthoc' in experiment_path and 'posthocheavy' not in experiment_path
if n_iters < max_iter and not if_post_hoc_ensemble:
print(f'Detected an unfinished run (<{max_iter} iterations) => skip')
return None
if 'fun_to_get_subnets' in kwargs:
print('Using fun_to_get_subnets from kwargs!')
fun_to_get_subnets = kwargs['fun_to_get_subnets']
else:
fun_to_get_subnets = get_best_pareto_up_and_including_iter
cfgs, true_errs, flops, iters = fun_to_get_subnets(experiment_path, max_iter)
if swa is not None:
iters = [max_iter] * len(cfgs)
subsample = lambda l: l# lambda l: l[::3]#[-2:]
cfgs, true_errs, flops, iters = subsample(cfgs), subsample(true_errs), subsample(flops), subsample(iters)
print(f'{flops=}')
cfgs = [list(c) for c in cfgs] # otherwise it's an ndarray and can't be saved in json
n_cfgs = len(cfgs)
fst_same_iter = 0
last_same_iter = 0
same_iter = iters[0]
i = 1
ensemble_ss_names = None
if_stored_all = False
flops_recomputed = []
accs_test = []
run_config = None
while not if_stored_all:
if (i == n_cfgs) or (i < n_cfgs and iters[i] != same_iter):
if search_space_name == 'ensemble':
path_to_supernet_or_its_dir = []
for supernet_path_cur in msunas_config['supernet_path']:
basename = os.path.basename(supernet_path_cur)
if swa is not None:
basename = utils.transform_supernet_name_swa(basename, swa)
path_to_supernet_or_its_dir.append(os.path.join(utils.NAT_LOGS_PATH, experiment_path, f'iter_{same_iter}', basename))
ensemble_ss_names = msunas_config['ensemble_ss_names']
else:
raise NotImplementedError()
keys_to_return = ['output_distr', 'run_config', 'flops']
if if_store_logit_gaps:
keys_to_return.append('logit_gaps')
accs_test_cur, info = evaluate_many_configs(path_to_supernet_or_its_dir, cfgs[fst_same_iter:last_same_iter+1],
config_msunas=msunas_config, if_test='test' in dataset_type,
search_space_name=search_space_name,
ensemble_ss_names=ensemble_ss_names, info_keys_to_return=keys_to_return, run_config=run_config,
if_use_logit_gaps=False)
output_distr_per_model = info['output_distr']
flops_recomputed += info['flops']
accs_test += accs_test_cur
if if_store_logit_gaps:
logit_gaps_per_model = info['logit_gaps']
if if_store_float16:
output_distr_per_model = [o.astype(np.float16) for o in output_distr_per_model]
if if_store_logit_gaps:
logit_gaps_per_model = [l.astype(np.float16) for l in logit_gaps_per_model]
for i_output in range(len(output_distr_per_model)):
future = process_pool.submit(save_gz, path=os.path.join(out_folder_path, f'{cnt}.npy.gz'),
data=output_distr_per_model[i_output])
futures.append(future)
if if_store_logit_gaps:
future = process_pool.submit(save_gz, path=os.path.join(out_folder_path_logit_gaps, f'{cnt}.npy.gz'),
data=logit_gaps_per_model[i_output])
futures.append(future)
cnt += 1
run_config = info['run_config'][0] # a hack for speed
fst_same_iter = i
last_same_iter = i
if i < n_cfgs: # to cover the case of the last iter
same_iter = iters[i]
else:
last_same_iter += 1
i += 1
if_stored_all = i > n_cfgs
info_dict = {'val': list(true_errs), dataset_type: list(accs_test), 'flops': list(flops_recomputed),
'cfgs': list(cfgs), 'flops_old': list(flops)}
json.dump(info_dict, open(info_dict_path, 'w'))
for f in futures:
f.result() # wait on everything
def store_cumulative_pareto_front_outputs(experiment_name, dataset_type, **kwargs):
utils.execute_func_for_all_runs_and_combine(experiment_name, store_cumulative_pareto_front_outputs_single_run,
dataset_type=dataset_type, **kwargs)
if __name__ == '__main__':
for d in ['val', 'test']:
store_cumulative_pareto_front_outputs('cifar100_r0_alphaofa_EXTRACT_alpha', d, max_iter=30, swa='20')
store_cumulative_pareto_front_outputs('cifar100_r0_alphaofa_EXTRACT_ofa12', d, max_iter=30, swa='20')
| 6,451 | 47.149254 | 139 |
py
|
ENCAS
|
ENCAS-main/after_search/store_outputs_timm.py
|
import copy
import utils
from collections import defaultdict
import pandas as pd
import timm
import json
import os
from concurrent.futures import ProcessPoolExecutor
from pathlib import Path
from os.path import join
import numpy as np
from ofa.utils import AverageMeter, accuracy
from timm.data import create_dataset, create_loader, resolve_data_config
from tqdm import tqdm
import torch
from fvcore.nn import FlopCountAnalysis
from ptflops import get_model_complexity_info
path_timm_csv = os.path.join(utils.NAT_DATA_PATH, 'timm-results-imagenet.csv')
df_timm = pd.read_csv(path_timm_csv)
def compute_outputs_single_network(net_name, data_provider_kwargs, dataset_type, **kwargs):
model = timm.create_model(net_name, pretrained=True)
model.cuda()
model.eval()
df_row = df_timm[df_timm['model'] == net_name]
image_size = int(df_row['img_size'].values[0])
# here I use timm loader to get exactly the results reported in the repo
if 'val' in dataset_type:
split_name = 'imagenetv2_all'
elif 'test' in dataset_type:
split_name = 'val' # "validation" of ImageNet is used for test
dataset = create_dataset(
root=data_provider_kwargs['data'], name='', split=split_name,
download=False, load_bytes=False, class_map='')
args_for_data_config = {'model': 'beit_base_patch16_224', 'img_size': None,
'input_size': None, 'crop_pct': None, 'mean': None, 'std': None, 'interpolation': '',
'num_classes': 1000, 'class_map': '', 'gp': None, 'pretrained': True,
'test_pool': False, 'no_prefetcher': False, 'pin_mem': False, 'channels_last': False,
'tf_preprocessing': False, 'use_ema': False, 'torchscript': False, 'legacy_jit': False,
'prefetcher': True}
data_config = resolve_data_config(args_for_data_config, model=model, use_test_size=True, verbose=True)
crop_pct = data_config['crop_pct']
loader = create_loader(dataset,
input_size=data_config['input_size'],
batch_size=data_provider_kwargs['vld_batch_size'],
use_prefetcher=True,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=data_provider_kwargs['n_workers'],
crop_pct=crop_pct,
pin_memory=True,
tf_preprocessing=False)
n_batches = len(loader)
# model = torch.nn.DataParallel(model)
metric_dict = defaultdict(lambda: AverageMeter())
outputs_to_return = []
with tqdm(total=n_batches, desc=dataset_type, ncols=130) as t, torch.no_grad():
for i, (images, labels, *_) in enumerate(loader):
images, labels = images.cuda(), labels.cuda()
with torch.cuda.amp.autocast():
output = model(images)
outputs_to_return.append(output.detach().cpu())
acc1 = accuracy(output, labels, topk=(1,))[0].item()
metric_dict['acc'].update(acc1, output.size(0))
t.set_postfix({**{key: metric_dict[key].avg for key in metric_dict},
'img_size': images.size(2)})
t.update(1)
outputs_to_return = torch.cat(outputs_to_return, dim=0)
if True:
flops = FlopCountAnalysis(model.cuda(), torch.randn(1, 3, image_size, image_size).cuda())
metric_dict['flops'] = flops.total() / 10 ** 6
else:
# used this to double-check the results - they are consistent between the libraries
flops = get_model_complexity_info(model.cuda(), (3, image_size, image_size),
print_per_layer_stat=False, as_strings=False, verbose=False)[0]
metric_dict['flops'] = flops / 10 ** 6
return outputs_to_return, metric_dict
def store_outputs_many_networks(net_names, data_provider_kwargs, dataset_type, dir_name, **kwargs):
save_path_base = join(utils.NAT_LOGS_PATH, dir_name)
Path(save_path_base).mkdir(exist_ok=True)
postfix = kwargs.get('postfix', '')
out_folder_path = join(save_path_base, 'pretrained')
Path(out_folder_path).mkdir(exist_ok=True) # need to create all the dirs in the hierarchy
out_folder_path = join(out_folder_path, '0')
Path(out_folder_path).mkdir(exist_ok=True)
out_folder_path = join(out_folder_path, f'output_distrs_{dataset_type}{postfix}')
Path(out_folder_path).mkdir(exist_ok=True)
info_dict_path = os.path.join(out_folder_path, f'info.json')
out_folder_logits_path = join(save_path_base, 'pretrained', '0', f'logit_gaps_{dataset_type}{postfix}')
Path(out_folder_logits_path).mkdir(exist_ok=True)
process_pool = ProcessPoolExecutor(max_workers=1)
futures = []
accs_all = []
flops_all = []
for i, net_name in enumerate(net_names):
print(f'{net_name=}')
if 'efficientnet' not in net_name: # large effnets use a lot of VRAM => use smaller batch
logits, metric_dict = compute_outputs_single_network(net_name, data_provider_kwargs, dataset_type)
else:
data_provider_kwargs_smaller_batch = copy.deepcopy(data_provider_kwargs)
data_provider_kwargs_smaller_batch['vld_batch_size'] = 20
logits, metric_dict = compute_outputs_single_network(net_name, data_provider_kwargs_smaller_batch, dataset_type)
logits_float32 = torch.tensor(logits, dtype=torch.float32)
acc, flops = metric_dict['acc'].avg, metric_dict['flops']
accs_all.append(acc)
flops_all.append(flops)
two_max_values = logits_float32.topk(k=2, dim=-1).values
logit_gap = two_max_values[:, 0] - two_max_values[:, 1]
future = process_pool.submit(utils.save_gz, path=os.path.join(out_folder_logits_path, f'{i}.npy.gz'),
data=logit_gap.numpy().astype(np.float16))
futures.append(future)
outputs = torch.softmax(logits_float32, dim=-1)
future = process_pool.submit(utils.save_gz, path=os.path.join(out_folder_path, f'{i}.npy.gz'),
data=outputs.numpy().astype(np.float16))
futures.append(future)
info_dict = {dataset_type: accs_all, 'flops': flops_all, 'net_names': net_names}
json.dump(info_dict, open(info_dict_path, 'w'))
for f in futures:
f.result() # wait on everything
if __name__ == '__main__':
IMAGENET_PATH = '/projects/0/einf2071/data/imagenet/' #'/export/scratch2/aleksand/data/imagenet/'
# You can set the download location of the model checkpoints like this:
# torch.hub.set_dir('/export/scratch2/aleksand/torch_hub/')
# torch.hub.set_dir('/projects/0/einf2071/torch_hub/')
all_timm_models_without_regnetz = ['beit_large_patch16_512', 'beit_large_patch16_384', 'tf_efficientnet_l2_ns', 'tf_efficientnet_l2_ns_475', 'beit_large_patch16_224', 'swin_large_patch4_window12_384', 'vit_large_patch16_384', 'tf_efficientnet_b7_ns', 'beit_base_patch16_384', 'cait_m48_448', 'tf_efficientnet_b6_ns', 'swin_base_patch4_window12_384', 'tf_efficientnetv2_xl_in21ft1k', 'swin_large_patch4_window7_224', 'tf_efficientnetv2_l_in21ft1k', 'vit_large_r50_s32_384', 'dm_nfnet_f6', 'tf_efficientnet_b5_ns', 'cait_m36_384', 'vit_base_patch16_384', 'xcit_large_24_p8_384_dist', 'vit_large_patch16_224', 'xcit_medium_24_p8_384_dist', 'dm_nfnet_f5', 'xcit_large_24_p16_384_dist', 'dm_nfnet_f4', 'tf_efficientnetv2_m_in21ft1k', 'xcit_small_24_p8_384_dist', 'dm_nfnet_f3', 'tf_efficientnetv2_l', 'cait_s36_384', 'ig_resnext101_32x48d', 'xcit_medium_24_p16_384_dist', 'deit_base_distilled_patch16_384', 'xcit_large_24_p8_224_dist', 'tf_efficientnet_b8_ap', 'tf_efficientnet_b8', 'swin_base_patch4_window7_224', 'beit_base_patch16_224', 'tf_efficientnet_b4_ns', 'tf_efficientnet_b7_ap', 'xcit_small_24_p16_384_dist', 'ig_resnext101_32x32d', 'xcit_small_12_p8_384_dist', 'xcit_medium_24_p8_224_dist', 'dm_nfnet_f2', 'tf_efficientnetv2_m', 'cait_s24_384', 'resnetrs420', 'ecaresnet269d', 'vit_base_r50_s16_384', 'resnetv2_152x4_bitm', 'tf_efficientnet_b7', 'xcit_large_24_p16_224_dist', 'xcit_small_24_p8_224_dist', 'efficientnetv2_rw_m', 'tf_efficientnet_b6_ap', 'eca_nfnet_l2', 'xcit_small_12_p16_384_dist', 'resnetrs350', 'dm_nfnet_f1', 'vit_base_patch16_224', 'resnest269e', 'resnetv2_152x2_bitm', 'vit_large_r50_s32_224', 'resnetrs270', 'resnetv2_101x3_bitm', 'resmlp_big_24_224_in22ft1k', 'xcit_large_24_p8_224', 'seresnet152d', 'tf_efficientnetv2_s_in21ft1k', 'xcit_medium_24_p16_224_dist', 'vit_base_patch16_224_miil', 'swsl_resnext101_32x8d', 'tf_efficientnet_b5_ap', 'xcit_small_12_p8_224_dist', 'crossvit_18_dagger_408', 'ig_resnext101_32x16d', 'pit_b_distilled_224', 'tf_efficientnet_b6', 'resnetrs200', 'cait_xs24_384', 'vit_small_r26_s32_384', 'tf_efficientnet_b3_ns', 'eca_nfnet_l1', 'resnetv2_50x3_bitm', 'resnet200d', 'tf_efficientnetv2_s', 'xcit_small_24_p16_224_dist', 'resnest200e', 'xcit_small_24_p8_224', 'resnetv2_152x2_bit_teacher_384', 'efficientnetv2_rw_s', 'crossvit_15_dagger_408', 'tf_efficientnet_b5', 'vit_small_patch16_384', 'xcit_tiny_24_p8_384_dist', 'xcit_medium_24_p8_224', 'resnetrs152', 'regnety_160', 'twins_svt_large', 'resnet152d', 'resmlp_big_24_distilled_224', 'jx_nest_base', 'cait_s24_224', 'efficientnet_b4', 'deit_base_distilled_patch16_224', 'dm_nfnet_f0', 'swsl_resnext101_32x16d', 'xcit_small_12_p16_224_dist', 'vit_base_patch32_384', 'xcit_small_12_p8_224', 'tf_efficientnet_b4_ap', 'swsl_resnext101_32x4d', 'swin_small_patch4_window7_224', 'twins_pcpvt_large', 'twins_svt_base', 'jx_nest_small', 'deit_base_patch16_384', 'tresnet_m', 'tresnet_xl_448', 'tf_efficientnet_b4', 'resnet101d', 'resnetv2_152x2_bit_teacher', 'xcit_large_24_p16_224', 'resnest101e', 'resnetv2_50x1_bit_distilled', 'pnasnet5large', 'nfnet_l0', 'regnety_032', 'twins_pcpvt_base', 'ig_resnext101_32x8d', 'nasnetalarge', 'xcit_medium_24_p16_224', 'eca_nfnet_l0', 'levit_384', 'xcit_small_24_p16_224', 'xcit_tiny_24_p8_224_dist', 'xcit_tiny_24_p16_384_dist', 'resnet61q', 'crossvit_18_dagger_240', 'gc_efficientnetv2_rw_t', 'pit_b_224', 'crossvit_18_240', 'xcit_tiny_12_p8_384_dist', 'tf_efficientnet_b2_ns', 'resnet51q', 'ecaresnet50t', 'efficientnetv2_rw_t', 'resnetv2_101x1_bitm', 'crossvit_15_dagger_240', 'coat_lite_small', 'mixer_b16_224_miil', 'resnetrs101', 'convit_base', 'tresnet_l_448', 'efficientnet_b3', 'crossvit_base_240', 'cait_xxs36_384', 'ecaresnet101d', 'swsl_resnext50_32x4d', 'visformer_small', 'tresnet_xl', 'resnetv2_101', 'pit_s_distilled_224', 'deit_base_patch16_224', 'xcit_small_12_p16_224', 'tf_efficientnetv2_b3', 'xcit_tiny_24_p8_224', 'ssl_resnext101_32x16d', 'vit_small_r26_s32_224', 'tf_efficientnet_b3_ap', 'tresnet_m_448', 'twins_svt_small', 'tf_efficientnet_b3', 'rexnet_200', 'ssl_resnext101_32x8d', 'halonet50ts', 'tf_efficientnet_lite4', 'crossvit_15_240', 'halo2botnet50ts_256', 'tnt_s_patch16_224', 'vit_large_patch32_384', 'levit_256', 'tresnet_l', 'wide_resnet50_2', 'jx_nest_tiny', 'lamhalobotnet50ts_256', 'convit_small', 'swin_tiny_patch4_window7_224', 'vit_small_patch16_224', 'tf_efficientnet_b1_ns', 'convmixer_1536_20', 'gernet_l', 'legacy_senet154', 'efficientnet_el', 'coat_mini', 'seresnext50_32x4d', 'gluon_senet154', 'xcit_tiny_12_p8_224_dist', 'deit_small_distilled_patch16_224', 'lambda_resnet50ts', 'resmlp_36_distilled_224', 'swsl_resnet50', 'resnest50d_4s2x40d', 'twins_pcpvt_small', 'pit_s_224', 'haloregnetz_b', 'resmlp_big_24_224', 'crossvit_small_240', 'gluon_resnet152_v1s', 'resnest50d_1s4x24d', 'sehalonet33ts', 'resnest50d', 'cait_xxs24_384', 'xcit_tiny_12_p16_384_dist', 'gcresnet50t', 'ssl_resnext101_32x4d', 'gluon_seresnext101_32x4d', 'gluon_seresnext101_64x4d', 'efficientnet_b3_pruned', 'ecaresnet101d_pruned', 'regnety_320', 'resmlp_24_distilled_224', 'vit_base_patch32_224', 'gernet_m', 'nf_resnet50', 'gluon_resnext101_64x4d', 'ecaresnet50d', 'efficientnet_b2', 'gcresnext50ts', 'resnet50d', 'repvgg_b3', 'vit_small_patch32_384', 'gluon_resnet152_v1d', 'mixnet_xl', 'xcit_tiny_24_p16_224_dist', 'ecaresnetlight', 'inception_resnet_v2', 'resnetv2_50', 'gluon_resnet101_v1d', 'regnety_120', 'resnet50', 'seresnet33ts', 'resnetv2_50x1_bitm', 'gluon_resnext101_32x4d', 'rexnet_150', 'tf_efficientnet_b2_ap', 'ssl_resnext50_32x4d', 'efficientnet_el_pruned', 'gluon_resnet101_v1s', 'regnetx_320', 'tf_efficientnet_el', 'seresnet50', 'vit_base_patch16_sam_224', 'legacy_seresnext101_32x4d', 'repvgg_b3g4', 'tf_efficientnetv2_b2', 'dpn107', 'convmixer_768_32', 'inception_v4', 'skresnext50_32x4d', 'eca_resnet33ts', 'gcresnet33ts', 'tf_efficientnet_b2', 'cspresnext50', 'cspdarknet53', 'dpn92', 'ens_adv_inception_resnet_v2', 'gluon_seresnext50_32x4d', 'gluon_resnet152_v1c', 'efficientnet_b2_pruned', 'xception71', 'regnety_080', 'resnetrs50', 'deit_small_patch16_224', 'levit_192', 'ecaresnet26t', 'regnetx_160', 'dpn131', 'tf_efficientnet_lite3', 'resnext50_32x4d', 'resmlp_36_224', 'cait_xxs36_224', 'regnety_064', 'xcit_tiny_12_p8_224', 'ecaresnet50d_pruned', 'gluon_xception65', 'gluon_resnet152_v1b', 'resnext50d_32x4d', 'dpn98', 'gmlp_s16_224', 'regnetx_120', 'cspresnet50', 'xception65', 'gluon_resnet101_v1c', 'rexnet_130', 'tf_efficientnetv2_b1', 'hrnet_w64', 'xcit_tiny_24_p16_224', 'dla102x2', 'resmlp_24_224', 'repvgg_b2g4', 'gluon_resnext50_32x4d', 'tf_efficientnet_cc_b1_8e', 'hrnet_w48', 'resnext101_32x8d', 'ese_vovnet39b', 'gluon_resnet101_v1b', 'resnetblur50', 'nf_regnet_b1', 'pit_xs_distilled_224', 'tf_efficientnet_b1_ap', 'eca_botnext26ts_256', 'botnet26t_256', 'efficientnet_em', 'ssl_resnet50', 'regnety_040', 'regnetx_080', 'dpn68b', 'resnet33ts', 'res2net101_26w_4s', 'halonet26t', 'lambda_resnet26t', 'coat_lite_mini', 'legacy_seresnext50_32x4d', 'gluon_resnet50_v1d', 'regnetx_064', 'xception', 'resnet32ts', 'res2net50_26w_8s', 'mixnet_l', 'lambda_resnet26rpt_256', 'hrnet_w40', 'hrnet_w44', 'wide_resnet101_2', 'eca_halonext26ts', 'tf_efficientnet_b1', 'efficientnet_b1', 'gluon_inception_v3', 'repvgg_b2', 'tf_mixnet_l', 'dla169', 'gluon_resnet50_v1s', 'legacy_seresnet152', 'tf_efficientnet_b0_ns', 'xcit_tiny_12_p16_224_dist', 'res2net50_26w_6s', 'xception41', 'dla102x', 'regnetx_040', 'resnest26d', 'levit_128', 'dla60_res2net', 'vit_tiny_patch16_384', 'hrnet_w32', 'dla60_res2next', 'coat_tiny', 'selecsls60b', 'legacy_seresnet101', 'repvgg_b1', 'cait_xxs24_224', 'tf_efficientnetv2_b0', 'tv_resnet152', 'bat_resnext26ts', 'efficientnet_b1_pruned', 'dla60x', 'res2next50', 'hrnet_w30', 'pit_xs_224', 'regnetx_032', 'tf_efficientnet_em', 'res2net50_14w_8s', 'hardcorenas_f', 'efficientnet_es', 'gmixer_24_224', 'dla102', 'gluon_resnet50_v1c', 'res2net50_26w_4s', 'selecsls60', 'seresnext26t_32x4d', 'resmlp_12_distilled_224', 'mobilenetv3_large_100_miil', 'tf_efficientnet_cc_b0_8e', 'resnet26t', 'regnety_016', 'tf_inception_v3', 'rexnet_100', 'seresnext26ts', 'gcresnext26ts', 'xcit_nano_12_p8_384_dist', 'hardcorenas_e', 'efficientnet_b0', 'legacy_seresnet50', 'tv_resnext50_32x4d', 'repvgg_b1g4', 'seresnext26d_32x4d', 'adv_inception_v3', 'gluon_resnet50_v1b', 'res2net50_48w_2s', 'coat_lite_tiny', 'tf_efficientnet_lite2', 'inception_v3', 'eca_resnext26ts', 'hardcorenas_d', 'tv_resnet101', 'densenet161', 'tf_efficientnet_cc_b0_4e', 'densenet201', 'mobilenetv2_120d', 'mixnet_m', 'selecsls42b', 'xcit_tiny_12_p16_224', 'resnet34d', 'tf_efficientnet_b0_ap', 'legacy_seresnext26_32x4d', 'hardcorenas_c', 'dla60', 'crossvit_9_dagger_240', 'tf_mixnet_m', 'regnetx_016', 'convmixer_1024_20_ks9_p14', 'skresnet34', 'gernet_s', 'tf_efficientnet_b0', 'ese_vovnet19b_dw', 'resnext26ts', 'hrnet_w18', 'resnet26d', 'tf_efficientnet_lite1', 'resmlp_12_224', 'mixer_b16_224', 'tf_efficientnet_es', 'densenetblur121d', 'levit_128s', 'hardcorenas_b', 'mobilenetv2_140', 'repvgg_a2', 'xcit_nano_12_p8_224_dist', 'regnety_008', 'dpn68', 'tv_resnet50', 'vit_small_patch32_224', 'mixnet_s', 'vit_tiny_r_s16_p8_384', 'hardcorenas_a', 'densenet169', 'mobilenetv3_large_100', 'tf_mixnet_s', 'mobilenetv3_rw', 'densenet121', 'tf_mobilenetv3_large_100', 'resnest14d', 'efficientnet_lite0', 'xcit_nano_12_p16_384_dist', 'vit_tiny_patch16_224', 'semnasnet_100', 'resnet26', 'regnety_006', 'repvgg_b0', 'fbnetc_100', 'resnet34', 'hrnet_w18_small_v2', 'regnetx_008', 'mobilenetv2_110d', 'efficientnet_es_pruned', 'tf_efficientnet_lite0', 'legacy_seresnet34', 'tv_densenet121', 'mnasnet_100', 'dla34', 'gluon_resnet34_v1b', 'pit_ti_distilled_224', 'deit_tiny_distilled_patch16_224', 'vgg19_bn', 'spnasnet_100', 'regnety_004', 'ghostnet_100', 'crossvit_9_240', 'xcit_nano_12_p8_224', 'regnetx_006', 'vit_base_patch32_sam_224', 'tf_mobilenetv3_large_075', 'vgg16_bn', 'crossvit_tiny_240', 'tv_resnet34', 'swsl_resnet18', 'convit_tiny', 'skresnet18', 'mobilenetv2_100', 'pit_ti_224', 'ssl_resnet18', 'regnetx_004', 'vgg19', 'hrnet_w18_small', 'xcit_nano_12_p16_224_dist', 'resnet18d', 'tf_mobilenetv3_large_minimal_100', 'deit_tiny_patch16_224', 'mixer_l16_224', 'vit_tiny_r_s16_p8_224', 'legacy_seresnet18', 'vgg16', 'vgg13_bn', 'gluon_resnet18_v1b', 'vgg11_bn', 'regnety_002', 'xcit_nano_12_p16_224', 'vgg13', 'resnet18', 'vgg11', 'regnetx_002', 'tf_mobilenetv3_small_100', 'dla60x_c', 'dla46x_c', 'tf_mobilenetv3_small_075', 'dla46_c', 'tf_mobilenetv3_small_minimal_100']
# it's convenient to download all the models in advance:
for net_name in reversed(all_timm_models_without_regnetz):
try:
model = timm.create_model(net_name, pretrained=True, num_classes=1000)
del model
except:
print(f'Failed {net_name}')
data_provider_kwargs = {'data': IMAGENET_PATH, 'dataset': 'imagenet', 'n_workers': 8, 'vld_batch_size': 128}
# Snellius:
store_outputs_many_networks(all_timm_models_without_regnetz, data_provider_kwargs, 'val', 'timm_all')
store_outputs_many_networks(all_timm_models_without_regnetz, data_provider_kwargs, 'test', 'timm_all')
| 18,030 | 109.619632 | 10,476 |
py
|
ENCAS
|
ENCAS-main/after_search/extract_supernet_from_joint.py
|
import glob
import json
import numpy as np
import os
from os.path import join
from pathlib import Path
from shutil import copy
import re
import yaml
import utils
import utils_pareto
from utils import NAT_LOGS_PATH
def extract(experiment_name, out_experiment_name, idx_snet, if_joint_pareto_only=False, **kwargs): # idx_snet == idx of supernet to extract
experiment_path = join(NAT_LOGS_PATH, experiment_name)
out_path = join(NAT_LOGS_PATH, out_experiment_name)
Path(out_path).mkdir(exist_ok=True)
for f in reversed(sorted(os.scandir(experiment_path), key=lambda e: e.name)):
if not f.is_dir():
continue
name_cur = f.name
out_path_algo_cur = join(out_path, name_cur)
Path(out_path_algo_cur).mkdir(exist_ok=True)
for run_folder in os.scandir(f.path):
if not run_folder.is_dir():
continue
run_idx = int(run_folder.name)
run_path = join(experiment_path, name_cur, str(run_idx))
run_path_new = join(out_path_algo_cur, str(run_idx))
Path(run_path_new).mkdir(exist_ok=True)
# copy & modify config_msunas
msunas_config_new_path = join(run_path_new, 'config_msunas.yml')
copy(join(run_path, 'config_msunas.yml'), msunas_config_new_path)
msunas_config = yaml.safe_load(open(msunas_config_new_path, 'r'))
for key in ['ensemble_ss_names', 'supernet_path', 'alphabet']:
msunas_config[key] = [msunas_config[key][idx_snet]]
yaml.dump(msunas_config, open(msunas_config_new_path, 'w'))
# copy & modify iter_*.stats
stats_paths = glob.glob(os.path.join(run_path, "iter_*.stats"))
regex = re.compile(r'\d+')
iters = np.array([int(regex.findall(p)[-1]) for p in stats_paths])
idx = np.argsort(iters)
iters = iters[idx]
stats_paths = np.array(stats_paths)[idx].tolist()
if if_joint_pareto_only:
print(f'{iters[-1]=}')
cfgs_jointpareto, _, _, iters_jointpareto = utils_pareto.get_best_pareto_up_and_including_iter(run_path, iters[-1])
for it, p in enumerate(stats_paths):
print(p)
data = json.load(open(p, 'r'))
data_new = {}
for k, v in data.items():
if k != 'archive':
data_new[k] = v
else:
archive = v
new_archive = []
for (cfg_ensemble, top1_ens, flops_ens, top1s_and_flops_sep) in archive:
if if_joint_pareto_only:
cfgs_jointpareto_curiter = cfgs_jointpareto[iters_jointpareto == it]
if cfg_ensemble not in cfgs_jointpareto_curiter:
continue
cfg = cfg_ensemble[idx_snet]
top1s_sep = top1s_and_flops_sep[0]
flops_sep = top1s_and_flops_sep[1]
new_archive_member = [[cfg], top1s_sep[idx_snet], flops_sep[idx_snet]]
new_archive.append(new_archive_member)
data_new['archive'] = new_archive
# store data_new
path_new = p.replace(run_path, run_path_new)
with open(path_new, 'w') as handle:
json.dump(data_new, handle)
# create iter_* folders, softlink weights
# also softlink swa weights
swa = kwargs.get('swa', None)
for it in iters:
it_path = join(run_path, f'iter_{it}')
it_path_new = join(run_path_new, f'iter_{it}')
Path(it_path_new).mkdir(exist_ok=True)
supernet_name = os.path.basename(msunas_config['supernet_path'][0])
if not os.path.exists(os.path.join(it_path_new, supernet_name)):
os.symlink(os.path.join(it_path, supernet_name), os.path.join(it_path_new, supernet_name))
if swa is not None: # it will be stored in the last folder
supernet_name = utils.transform_supernet_name_swa(os.path.basename(msunas_config['supernet_path'][0]), swa)
if not os.path.exists(os.path.join(it_path_new, supernet_name)):
os.symlink(os.path.join(it_path, supernet_name), os.path.join(it_path_new, supernet_name))
def extract_all(experiment_name, out_name_suffixes, **kwargs):
for i, out_name_suffix in enumerate(out_name_suffixes):
extract(experiment_name, experiment_name + f'_EXTRACT_{out_name_suffix}', i, **kwargs)
| 4,759 | 45.213592 | 139 |
py
|
ENCAS
|
ENCAS-main/after_search/evaluate_stored_outputs.py
|
import copy
import json
import numpy as np
import os
import torch
import glob
import gzip
import yaml
from matplotlib import pyplot as plt
import utils
from utils import execute_func_for_all_runs_and_combine
labels_path_prefix = utils.NAT_DATA_PATH
def evaluate_stored_one_run(run_path, dataset_type, path_labels, **kwargs):
labels = torch.tensor(np.load(os.path.join(labels_path_prefix, path_labels))).cuda() # load, .cuda()
dataset_postfix = kwargs.get('dataset_postfix', '')
path_stored_outputs = os.path.join(run_path, f'output_distrs_{dataset_type}{dataset_postfix}')
max_iter = kwargs.get('max_iter', 15)
max_iter_path = os.path.join(run_path, f'iter_{max_iter}')
n_nets = len(glob.glob(os.path.join(path_stored_outputs, '*.npy.gz')))
info_path = glob.glob(os.path.join(path_stored_outputs, 'info.json'))[0]
info_path_new = os.path.join(max_iter_path, f'best_pareto_val_and_{dataset_type}{dataset_postfix}.json')
accs = []
for i_net in range(n_nets):
path = os.path.join(path_stored_outputs, f'{i_net}.npy.gz')
with gzip.GzipFile(path, 'r') as f:
outs = np.asarray(np.load(f))
outs = torch.tensor(outs).cuda()
preds = torch.argmax(outs, axis=-1)
acc = torch.sum(preds == labels) / len(labels) * 100
accs.append(acc.item())
print(accs)
info = json.load(open(info_path))
info[dataset_type + dataset_postfix] = accs
json.dump(info, open(info_path_new, 'w'))
return accs
def evaluate_stored_whole_experiment(experiment_name, dataset_type, path_labels, **kwargs):
execute_func_for_all_runs_and_combine(experiment_name, evaluate_stored_one_run, dataset_type=dataset_type,
path_labels=path_labels, **kwargs)
def evaluate_stored_one_run_cascade(run_path, dataset_type, path_labels, **kwargs):
labels = torch.tensor(np.load(os.path.join(labels_path_prefix, path_labels)))
cascade_info_name = kwargs.get('cascade_info_name', 'posthoc_ensemble.yml')
cascade_info_name_new = kwargs.get('cascade_info_name_new', 'posthoc_ensemble_from_stored.yml')
cascade_info_path = glob.glob(os.path.join(run_path, cascade_info_name))[0]
info = yaml.safe_load(open(cascade_info_path))
cfgs, thresholds, original_indices, weight_paths, flops_all, ensemble_ss_names, search_space_names = info['cfgs'], info.get('thresholds', None), info['original_indices'], info['weight_paths'], info['flops_all'], info['ensemble_ss_names'], info['search_space_names']
if thresholds is None:
# for ensembles (from ENCAS-ensemble) there are no thresholds, which is equivalent to all thresholds being 1
# could have a separate method for ENCAS-ensemble, but this fix seems simpler and should lead to the same result
thresholds = [[1.0] * len(cfgs[0])] * len(cfgs)
postfix = info.get('dataset_postfix', '')
if_use_logit_gaps = info['algo'] == 'greedy'
cascade_info_path_new = cascade_info_path.replace(cascade_info_name, cascade_info_name_new)
dataset_type_for_path = dataset_type
labels = labels.cuda()
outs_cache = {}
accs = []
flops_new = []
for i_cascade, (cfgs_cascade, thresholds_cascade, orig_indices_cascade, weight_paths_cascade, ss_names_cascade) in enumerate(zip(cfgs, thresholds, original_indices, weight_paths, search_space_names)):
outs = None
flops_cur = 0
n_nets_used_in_cascade = 0
idx_more_predictions_needed = None
# threshold_idx = 0
for i_net, (cfg, orig_idx, weight_path, ss_name) in enumerate(zip(cfgs_cascade, orig_indices_cascade, weight_paths_cascade, ss_names_cascade)):
if cfg is None: #noop
continue
# 1. load
base_path = weight_path[:weight_path.find('/iter')] #need run path
path = os.path.join(base_path, f'output_distrs_{dataset_type_for_path}{postfix}', f'{orig_idx}.npy.gz')
if path not in outs_cache:
with gzip.GzipFile(path, 'r') as f:
outs_cur = np.asarray(np.load(f))
outs_cur = torch.tensor(outs_cur).cuda()
outs_cache[path] = outs_cur
outs_cur = torch.clone(outs_cache[path])
if if_use_logit_gaps:
path = os.path.join(base_path, f'logit_gaps_{dataset_type_for_path}{postfix}', f'{orig_idx}.npy.gz')
with gzip.GzipFile(path, 'r') as f:
logit_gaps_cur = np.asarray(np.load(f))
logit_gaps_cur = torch.tensor(logit_gaps_cur)
# 2. predict
if idx_more_predictions_needed is None:
idx_more_predictions_needed = torch.ones(outs_cur.shape[0], dtype=torch.bool)
outs = outs_cur
n_nets_used_in_cascade = 1
flops_cur += flops_all[ensemble_ss_names.index(ss_name) + 1][orig_idx] # "+1" because the first one is nooop
if if_use_logit_gaps:
logit_gaps = logit_gaps_cur
else:
threshold = thresholds_cascade[i_net - 1]
if not if_use_logit_gaps:
idx_more_predictions_needed[torch.max(outs, dim=1).values > threshold] = False
else:
idx_more_predictions_needed[logit_gaps > threshold] = False
outs_tmp = outs[idx_more_predictions_needed] # outs_tmp is needed because I wanna do (in the end) x[idx1][idx2] = smth, and that doesn't modify the original x
if not if_use_logit_gaps:
not_predicted_idx = torch.max(outs_tmp, dim=1).values <= threshold
else:
logit_gap_tmp = logit_gaps[idx_more_predictions_needed]
not_predicted_idx = logit_gap_tmp <= threshold
n_not_predicted = torch.sum(not_predicted_idx).item()
if n_not_predicted == 0:
break
if not if_use_logit_gaps:
n_nets_used_in_cascade += 1
coeff1 = (n_nets_used_in_cascade - 1) / n_nets_used_in_cascade # for the current predictions that may already be an average
coeff2 = 1 / n_nets_used_in_cascade # for the predictions of the new model
outs_tmp[not_predicted_idx] = coeff1 * outs_tmp[not_predicted_idx] \
+ coeff2 * outs_cur[idx_more_predictions_needed][not_predicted_idx]
outs[idx_more_predictions_needed] = outs_tmp
else:
# firstly, need to overwrite previous predictions (because they didn't really happen if the gap was too small)
outs_tmp[not_predicted_idx] = outs_cur[idx_more_predictions_needed][not_predicted_idx]
outs[idx_more_predictions_needed] = outs_tmp
# secondly, need to update the logit gap
logit_gap_tmp[not_predicted_idx] = logit_gaps_cur[idx_more_predictions_needed][not_predicted_idx]
# note that the gap for the previously predicted values will be wrong, but it doesn't matter
# because the idx for them has already been set to False
logit_gaps[idx_more_predictions_needed] = logit_gap_tmp
flops_cur += flops_all[ensemble_ss_names.index(ss_name) + 1][orig_idx] * (n_not_predicted / len(labels))
assert outs is not None
preds = torch.argmax(outs, axis=-1)
acc = torch.sum(preds == labels) / len(labels) * 100
accs.append(acc.item())
print(f'{i_cascade}: {accs[-1]}')
flops_new.append(flops_cur)
print(accs)
info['val'] = info['true_errs'] # this whole thing is a mess; in plot_results, 'val' is read, but assumed to be true_errs
info['flops_old'] = info['flops']
info['flops'] = flops_new
info[dataset_type] = accs
yaml.safe_dump(info, open(cascade_info_path_new, 'w'), default_flow_style=None)
return accs
def evaluate_stored_whole_experiment_cascade(experiment_name, dataset_type, path_labels, **kwargs):
execute_func_for_all_runs_and_combine(experiment_name, evaluate_stored_one_run_cascade, dataset_type=dataset_type,
path_labels=path_labels, **kwargs)
def filter_one_run_cascade(run_path, cascade_info_name, cascade_info_name_new, **kwargs):
# filter indices based on val
# assume that in 'info' the pareto front for 'val' is stored
# note that even though test was computed for all the cascades for convenience, no selection is done on test.
cascade_info_path = glob.glob(os.path.join(run_path, cascade_info_name))[0]
info = yaml.safe_load(open(cascade_info_path))
cfgs = info['cfgs']
def create_idx(key_for_filt):
val_for_filt = info[key_for_filt]
if_round = True
if if_round:
val_for_filt_new = []
for v in val_for_filt:
if kwargs.get('subtract_from_100', True):
v = 100 - v
v = round(v * 10) / 10 # wanna have unique digit after comma
val_for_filt_new.append(v)
val_for_filt = val_for_filt_new
cur_best = 0.
idx_new_pareto = np.zeros(len(cfgs), dtype=bool)
for i in range(len(cfgs)):
if val_for_filt[i] > cur_best:
idx_new_pareto[i] = True
cur_best = val_for_filt[i]
return idx_new_pareto
def filter_by_idx(l, idx):
return np.array(l)[idx].tolist()
def filter_info(key_list, idx):
info_new = copy.deepcopy(info)
for k in key_list:
if k in info_new:
info_new[k] = filter_by_idx(info[k], idx)
return info_new
val_key = kwargs.get('val_key', 'val')
idx_new_pareto = create_idx(val_key)
info_new = filter_info(['cfgs', 'flops', 'flops_old', 'test', 'thresholds', 'true_errs', 'val', 'weight_paths',
'search_space_names', 'original_indices'], idx_new_pareto)
plt.plot(info['flops'], info['test'], '-o')
plt.plot(info_new['flops'], info_new['test'], '-o')
plt.savefig(os.path.join(run_path, 'filtered.png'))
plt.show()
plt.close()
cascade_info_path_new = cascade_info_path.replace(cascade_info_name, cascade_info_name_new)
yaml.safe_dump(info_new, open(cascade_info_path_new, 'w'), default_flow_style=None)
def filter_whole_experiment_cascade(experiment_name, cascade_info_name, cascade_info_name_new, **kwargs):
execute_func_for_all_runs_and_combine(experiment_name, filter_one_run_cascade, cascade_info_name=cascade_info_name,
cascade_info_name_new=cascade_info_name_new, **kwargs)
| 10,795 | 50.409524 | 269 |
py
|
ENCAS
|
ENCAS-main/after_search/average_weights.py
|
import os
import torch
import utils
def swa(run_path, iters, supernet_name_in, supernet_name_out):
checkpoint_paths = [os.path.join(run_path, f'iter_{i}', supernet_name_in) for i in iters]
# read checkpoints
checkpoints = [torch.load(p, map_location='cpu') for p in checkpoint_paths]
state_dicts = [c['model_state_dict'] for c in checkpoints]
# for all keys, average
out_state_dict = {}
for k, v in state_dicts[0].items():
if v.data.dtype in [torch.int, torch.long]:
out_state_dict[k] = state_dicts[-1][k] #num batches tracked => makes sense to take the last value
continue
for state_dict in state_dicts:
if k in out_state_dict:
out_state_dict[k] += state_dict[k]
else:
out_state_dict[k] = state_dict[k]
out_state_dict[k] /= len(state_dicts)
# save the result
out_checkpoint = checkpoints[-1]
out_checkpoint['model_state_dict'] = out_state_dict
torch.save(out_checkpoint, os.path.join(run_path, f'iter_{iters[-1]}', supernet_name_out))
def swa_for_whole_experiment(experiment_name, iters, supernet_name_in, target_runs=None):
nsga_logs_path = utils.NAT_LOGS_PATH
experiment_path = os.path.join(nsga_logs_path, experiment_name)
algo_names = []
image_paths = []
for f in reversed(sorted(os.scandir(experiment_path), key=lambda e: e.name)):
if not f.is_dir():
continue
name_cur = f.name
algo_names.append(name_cur)
for run_folder in os.scandir(f.path):
if not run_folder.is_dir():
continue
run_idx = int(run_folder.name)
if target_runs is not None and run_idx not in target_runs:
continue
run_path = os.path.join(experiment_path, name_cur, str(run_idx))
im_path = swa(run_path, iters, supernet_name_in, utils.transform_supernet_name_swa(supernet_name_in, len(iters)))
image_paths.append(im_path)
| 2,015 | 39.32 | 125 |
py
|
ENCAS
|
ENCAS-main/after_search/extract_store_eval.py
|
from plot_results.plotting_functions import compare_val_and_test
from after_search.evaluate_stored_outputs import evaluate_stored_whole_experiment
from after_search.extract_supernet_from_joint import extract_all
from after_search.store_outputs import store_cumulative_pareto_front_outputs
def extract_store_eval(dataset, exp_name, supernets, swa, **kwargs):
'''
A single function for ENCAS-joint that creates per-supernetwork trade-off fronts, evaluates them,
and stores the outputs.
'''
extract_all(exp_name, supernets, swa=swa)
max_iter = kwargs.get('max_iter', 30)
target_runs = kwargs.get('target_runs', None)
dataset_to_label_path = {'cifar100': 'labels_cifar100_test.npy', 'cifar10': 'labels_cifar10_test.npy',
'imagenet': 'labels_imagenet_test.npy'}
for i, out_name_suffix in enumerate(supernets):
out_name = exp_name + f'_EXTRACT_{out_name_suffix}'
store_cumulative_pareto_front_outputs(out_name, 'val', max_iter=max_iter, swa=swa, target_runs=target_runs)
store_cumulative_pareto_front_outputs(out_name, 'test', max_iter=max_iter, swa=swa, target_runs=target_runs)
evaluate_stored_whole_experiment(out_name, f'test_swa{swa}', dataset_to_label_path[dataset],
max_iter=max_iter, target_runs=target_runs)
compare_val_and_test(out_name, f'test_swa{swa}', max_iter=max_iter, target_runs=target_runs)
if __name__=='__main__':
extract_store_eval('cifar100', 'cifar100_r0_5nets', ['alpha', 'ofa12', 'ofa10', 'attn', 'proxyless'], 20, max_iter=30)
| 1,602 | 56.25 | 122 |
py
|
ENCAS
|
ENCAS-main/searcher_wrappers/base_wrapper.py
|
class BaseSearcherWrapper:
def __init__(self):
pass
def search(self, archive, predictor, iter_current):
pass
| 133 | 21.333333 | 55 |
py
|
ENCAS
|
ENCAS-main/searcher_wrappers/nsga3_wrapper.py
|
import os
import time
from pathlib import Path
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
from networks.attentive_nas_dynamic_model import AttentiveNasDynamicModel
from networks.ofa_mbv3_my import OFAMobileNetV3My
from networks.proxyless_my import OFAProxylessNASNetsMy
from utils import get_net_info, SupernetworkWrapper
from searcher_wrappers.base_wrapper import BaseSearcherWrapper
from pymoo.model.problem import Problem
import numpy as np
from pymoo.factory import get_crossover, get_mutation, get_reference_directions
from pymoo.optimize import minimize
from pymoo.algorithms.nsga3 import NSGA3
class Nsga3Wrapper(BaseSearcherWrapper):
def __init__(self, search_space, sec_obj, path_logs,
n_classes, supernet_paths, n_evals, if_add_archive_to_candidates,
**kwargs):
super().__init__()
self.search_space = search_space
self.n_obj = 2
self.problem = NatProblem(self.search_space, None, n_classes, sec_obj, supernet_paths,
self.n_obj, **kwargs)
self.pop_size = 100
self.n_gen = n_evals // self.pop_size
self.if_add_archive_to_candidates = if_add_archive_to_candidates
self.path_logs = path_logs
self.ref_dirs = get_reference_directions("riesz", self.n_obj, 100)
print(f'{self.ref_dirs.shape=}')
def search(self, archive, predictor, iter_current, **kwargs):
workdir = os.path.join(self.path_logs, f'iter_{iter_current}')
Path(workdir).mkdir(exist_ok=True)
F = np.column_stack(list(zip(*archive))[1:])
front = NonDominatedSorting().do(F, only_non_dominated_front=True)
archive_encoded = np.array([self.search_space.encode(x[0]) for x in archive])
nd_X = archive_encoded[front]
# initialize the candidate finding optimization problem
self.problem.predictor = predictor
method = NSGA3(
pop_size=self.pop_size, ref_dirs=self.ref_dirs, sampling=archive_encoded,
crossover=get_crossover("int_ux", prob=0.9),
mutation=get_mutation("int_pm", prob=0.1, eta=1.0),
eliminate_duplicates=True
)
res = minimize(self.problem, method, termination=('n_gen', self.n_gen), save_history=False, verbose=True)
genomes = res.pop.get('X')
objs = res.pop.get('F')
if self.if_add_archive_to_candidates:
archive_unzipped = list(zip(*archive))
configs_archive = archive_unzipped[0]
if 'rbf_ensemble_per_ensemble_member' not in predictor.name:
objs_archive = np.vstack(archive_unzipped[1:]).T # archive needs to contain all objective functions
else:
objs_archive = np.vstack(archive_unzipped[1:-1]).T # ignore objective function that is the tuples of ensemblee metrics
# we can do it because this objs_archive will be used exclusively for subset selection, and we probably don't wanna select on that
genomes_archive = [self.search_space.encode(c) for c in configs_archive]
# need to check for duplicates
for i in range(genomes.shape[0]):
if not any((genomes[i] == x).all() for x in genomes_archive):
genomes_archive.append(genomes[i])
objs_archive = np.vstack((objs_archive, objs[i]))
genomes = np.array(genomes_archive)
objs = np.array(objs_archive)
# useful for debugging:
np.save(os.path.join(workdir, 'genomes.npy'), genomes)
np.save(os.path.join(workdir, 'objs.npy'), objs)
return genomes, objs
class NatProblem(Problem):
'''
an optimization problem for pymoo
'''
def __init__(self, search_space, predictor, n_classes, sec_obj='flops', supernet_paths=None,
n_obj=2, alphabet=None, **kwargs):
super().__init__(n_var=len(alphabet), n_obj=n_obj, n_constr=0, type_var=np.int)
self.ss = search_space
self.predictor = predictor
self.xl = np.array(kwargs['alphabet_lb'])
self.xu = np.array(alphabet, dtype=np.float) - 1 # "-1" because pymoo wants inclusive range
self.sec_obj = sec_obj
self.lut = {'cpu': 'data/i7-8700K_lut.yaml'}
search_space_name = kwargs['search_space_name']
self.search_space_name = search_space_name
dataset = kwargs['dataset']
self.n_image_channels = kwargs['n_image_channels']
if search_space_name == 'reproduce_nat':
ev1_0 = SupernetworkWrapper(n_classes=n_classes, model_path=supernet_paths[0],
engine_class_to_use=OFAMobileNetV3My,
n_image_channels=self.n_image_channels, if_ignore_decoder=True, dataset=dataset,
search_space_name='ofa')
ev1_2 = SupernetworkWrapper(n_classes=n_classes, model_path=supernet_paths[1],
engine_class_to_use=OFAMobileNetV3My,
n_image_channels=self.n_image_channels, if_ignore_decoder=True, dataset=dataset,
search_space_name='ofa')
self.get_engine = lambda config: {
1.0: ev1_0,
1.2: ev1_2,
}[config['w']]
elif search_space_name == 'ensemble':
# assume supernet_paths is a list of paths, 1 per supernet
ensemble_ss_names = kwargs['ensemble_ss_names']
# since I don't use NSGA-3 for joint training & search, there should be only one supernet
assert len(ensemble_ss_names) == 1
ss_name_to_class = {'alphanet': AttentiveNasDynamicModel, 'ofa': OFAMobileNetV3My,
'proxyless': OFAProxylessNASNetsMy}
classes_to_use = [ss_name_to_class[ss_name] for ss_name in ensemble_ss_names]
self.evaluators = [SupernetworkWrapper(n_classes=n_classes, model_path=supernet_path,
engine_class_to_use=encoder_class,
n_image_channels=self.n_image_channels, if_ignore_decoder=False,
dataset=dataset, search_space_name=ss_name)
for supernet_path, ss_name, encoder_class in
zip(supernet_paths, ensemble_ss_names, classes_to_use)]
def _evaluate(self, x, out, *args, **kwargs):
st = time.time()
f = np.full((x.shape[0], self.n_obj), np.nan)
top1_err = self.predictor.predict(x)[:, 0]
for i, (_x, err) in enumerate(zip(x, top1_err)):
config = self.ss.decode(_x)
if self.search_space_name == 'reproduce_nat':
subnet, _ = self.get_engine(config).sample({'ks': config['ks'], 'e': config['e'],
'd': config['d'], 'w':config['w']})
info = get_net_info(subnet, (self.n_image_channels, config['r'], config['r']),
measure_latency=self.sec_obj, print_info=False, clean=True, lut=self.lut)
f[i, 1] = info[self.sec_obj]
else:
sec_obj_sum = 0
for conf, evaluator in zip(config, self.evaluators):
subnet, _ = evaluator.sample({'ks': conf['ks'], 'e': conf['e'], 'd': conf['d'], 'w':conf['w']})
info = get_net_info(subnet, (self.n_image_channels, conf['r'], conf['r']),
measure_latency=self.sec_obj, print_info=False, clean=True, lut=self.lut)
sec_obj_sum += info[self.sec_obj]
f[i, 1] = sec_obj_sum
f[i, 0] = err
out["F"] = f
ed = time.time()
print(f'Fitness time = {ed - st}')
| 7,978 | 49.821656 | 146 |
py
|
ENCAS
|
ENCAS-main/searcher_wrappers/mo_gomea_wrapper.py
|
import os
import pickle
from pathlib import Path
import numpy as np
from searcher_wrappers.base_wrapper import BaseSearcherWrapper
from mo_gomea import MoGomeaCInterface
from utils import get_metric_complement
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
class MoGomeaWrapper(BaseSearcherWrapper):
def __init__(self, search_space, sec_obj, path_logs,
n_classes, supernet_paths, n_evals, if_add_archive_to_candidates,
**kwargs):
super().__init__()
self.n_objectives = 2
self.path_logs = path_logs
self.n_classes = n_classes
self.supernet_paths = supernet_paths
self.n_evals = n_evals
self.sec_obj = sec_obj
self.if_add_archive_to_candidates = if_add_archive_to_candidates
self.search_space = search_space
self.alphabet_name = kwargs['alphabet_name'] # actually, many names
self.alphabet_size = len(kwargs['alphabet'])
# construct a dynamic alphabet for the cascade
self.alphabet = kwargs['alphabet']
self.alphabet_lb = kwargs['alphabet_lb']
self.alphabet_path = os.path.join(self.path_logs, 'dynamic_ensemble_alphabet.txt')
with open(self.alphabet_path, mode='w', newline='') as f:
to_print = ''
for x in self.alphabet:
to_print += f'{x} '
to_print = to_print.rstrip()
f.write(to_print)
f.flush()
self.alphabet_lower_bound_path = os.path.join(self.path_logs, 'dynamic_ensemble_alphabet_lb.txt')
with open(self.alphabet_lower_bound_path, mode='w', newline='') as f:
to_print = ''
for x in self.alphabet_lb:
to_print += f'{x} '
to_print = to_print.rstrip()
f.write(to_print)
f.flush()
if 'init_with_nd_front_size' in kwargs:
self.init_with_nd_front_size = kwargs['init_with_nd_front_size']
else:
self.init_with_nd_front_size = 0
if 'gomea_exe_path' not in kwargs:
raise ValueError('Need to pass a path to the appropriate MO-GOMEA executable')
self.gomea_exe_path = kwargs['gomea_exe_path']
self.n_image_channels = kwargs['n_image_channels']
self.dataset = kwargs['dataset']
self.search_space_name = kwargs['search_space_name']
self.ensemble_ss_names = kwargs['ensemble_ss_names']
assert self.gomea_exe_path is not None
def search(self, archive, predictor, iter_current, **kwargs):
workdir_mo_gomea = os.path.join(self.path_logs, f'iter_{iter_current}')
Path(workdir_mo_gomea).mkdir(exist_ok=True)
archive_path = os.path.join(self.path_logs, f'iter_{iter_current - 1}.stats')
path_data_for_c_api = os.path.join(workdir_mo_gomea, 'save_for_c_api')
with open(path_data_for_c_api, 'wb') as file_data_for_c_api:
pickle.dump((predictor, self.n_classes, self.supernet_paths, archive_path, self.sec_obj, '',
self.alphabet_name, self.n_image_channels, self.dataset, self.search_space_name,
self.ensemble_ss_names, ''),
file_data_for_c_api)
path_init = None
if self.init_with_nd_front_size > 0:
F = np.column_stack(list(zip(*archive))[1:])
front = NonDominatedSorting().do(F, only_non_dominated_front=True)
nd_X = np.array([self.search_space.encode(x[0]) for x in archive], dtype=np.int)[front]
n_to_use_for_init = self.init_with_nd_front_size#5#16
if nd_X.shape[0] > n_to_use_for_init:
chosen = np.random.choice(nd_X.shape[0], n_to_use_for_init, replace=False)
else:
chosen = np.ones((nd_X.shape[0]), dtype=bool)
idx = np.zeros((nd_X.shape[0]), dtype=np.bool)
idx[chosen] = True
path_init = os.path.join(workdir_mo_gomea, 'init_nd_front')
with open(path_init, 'wb') as f:
np.savetxt(f, nd_X[idx], delimiter=' ', newline='\n', header='', footer='', comments='# ', fmt='%d')
# remove last empty line:
NEWLINE_SIZE_IN_BYTES = -1
f.seek(NEWLINE_SIZE_IN_BYTES, 2)
f.truncate()
mo_gomea = MoGomeaCInterface('NatFitness', workdir_mo_gomea, path_data_for_c_api, self.n_objectives,
n_genes=self.alphabet_size,
alphabet=self.alphabet_path,
alphabet_lower_bound_path=self.alphabet_lower_bound_path,
init_path=path_init, gomea_executable_path=self.gomea_exe_path)
genomes, objs = mo_gomea.search(n_evaluations=self.n_evals, seed=kwargs['seed'])
if self.sec_obj in ['flops', 'cpu', 'gpu']:
objs[:, 1] *= -1 # because in MO-Gomea I maximize "-flops"
objs[:, 0] = get_metric_complement(objs[:, 0]) # because in MO-Gomea I maximize objective, not minimize error
if self.if_add_archive_to_candidates:
archive_unzipped = list(zip(*archive))
configs_archive = archive_unzipped[0]
if 'rbf_ensemble_per_ensemble_member' not in predictor.name:
objs_archive = np.vstack(archive_unzipped[1:]).T # archive needs to contain all objective functions
else:
objs_archive = np.vstack(archive_unzipped[1:-1]).T # ignore objective function that is the tuples of ensemblee metrics
# we can do it because this objs_archive will be used exclusively for subset selection, and we probably don't wanna select on that
genomes_archive = [self.search_space.encode(c) for c in configs_archive]
# need to check for duplicates
for i in range(genomes.shape[0]):
# if genomes[i] not in genomes_archive:
if not any((genomes[i] == x).all() for x in genomes_archive):
genomes_archive.append(genomes[i])
objs_archive = np.vstack((objs_archive, objs[i]))
genomes = np.array(genomes_archive)
objs = objs_archive
# delete 'save_for_c_api' file of the PREVIOUS iteration (otherwise problems when process is interrupted
# after save_for_c_api has been deleted but before a new one is created)
if iter_current > 1:
data_prev = os.path.join(self.path_logs, f'iter_{iter_current - 1}', 'save_for_c_api')
try:
os.remove(data_prev)
except:
pass
return genomes, objs
| 6,682 | 47.781022 | 146 |
py
|
ENCAS
|
ENCAS-main/searcher_wrappers/random_search_wrapper.py
|
import os
from pathlib import Path
from networks.attentive_nas_dynamic_model import AttentiveNasDynamicModel
from networks.ofa_mbv3_my import OFAMobileNetV3My
from networks.proxyless_my import OFAProxylessNASNetsMy
from searcher_wrappers.base_wrapper import BaseSearcherWrapper
import numpy as np
from utils import CsvLogger, get_net_info, SupernetworkWrapper
class RandomSearchWrapper(BaseSearcherWrapper):
def __init__(self, search_space, sec_obj, path_logs,
n_classes, supernet_paths, n_evals, if_add_archive_to_candidates,
**kwargs):
super().__init__()
self.search_space = search_space
self.n_obj = 2
self.problem = RandomSearch(
self.search_space, None, n_classes, sec_obj,
supernet_paths, self.n_obj, n_evals, **kwargs)
self.if_add_archive_to_candidates = if_add_archive_to_candidates
self.path_logs = path_logs
def search(self, archive, predictor, iter_current, **kwargs):
workdir = os.path.join(self.path_logs, f'iter_{iter_current}')
Path(workdir).mkdir(exist_ok=True)
# initialize the candidate finding optimization problem
self.problem.predictor = predictor
self.problem.logger = CsvLogger(workdir, 'random.csv')
genomes, objs = self.problem.perform_random_search()
if self.if_add_archive_to_candidates:
archive_unzipped = list(zip(*archive))
configs_archive = archive_unzipped[0]
if 'rbf_ensemble_per_ensemble_member' not in predictor.name:
objs_archive = np.vstack(archive_unzipped[1:]).T # archive needs to contain all objective functions
else:
objs_archive = np.vstack(archive_unzipped[1:-1]).T # ignore objective function that is the tuples of ensemblee metrics
# we can do it because this objs_archive will be used exclusively for subset selection, and we probably don't wanna select on that
genomes_archive = [self.search_space.encode(c) for c in configs_archive]
# need to check for duplicates
for i in range(genomes.shape[0]):
# if genomes[i] not in genomes_archive:
if not any((genomes[i] == x).all() for x in genomes_archive):
genomes_archive.append(genomes[i])
objs_archive = np.vstack((objs_archive, objs[i]))
genomes = np.array(genomes_archive)
objs = np.array(objs_archive)
np.save(os.path.join(workdir, 'genomes.npy'), genomes)
np.save(os.path.join(workdir, 'objs.npy'), objs)
return genomes, objs
class RandomSearch:
def __init__(self, search_space, predictor, n_classes, sec_obj='flops', supernet_paths=None,
n_obj=2, n_evals=1, alphabet=None, **kwargs):
# super().__init__(n_var=46, n_obj=n_obj, n_constr=0, type_var=np.int)
# super().__init__(n_var=36, n_obj=2, n_constr=0, type_var=np.int)# ACHTUNG! modified for the original binary
self.ss = search_space
self.predictor = predictor
print('Achtung! lower bound')
xl = np.array(kwargs['alphabet_lb'])#np.array([0, 0] + [1, 1, 0, 0] * 5)#np.zeros(self.n_var)
xu = np.array(alphabet) #- 1 # remove "-1" because np.random.choise has exclusive upper bound
self.alphabet_lb_and_ub = list(zip(xl, xu))
self.sec_obj = sec_obj
self.lut = {'cpu': 'data/i7-8700K_lut.yaml'}
search_space_name = kwargs['search_space_name']
self.search_space_name = search_space_name
dataset = kwargs['dataset']
self.n_image_channels = kwargs['n_image_channels']
assert self.search_space_name == 'ensemble'
# assume supernet_paths is a list of paths, 1 per supernet
ensemble_ss_names = kwargs['ensemble_ss_names']
ss_name_to_class = {'alphanet': AttentiveNasDynamicModel, 'ofa': OFAMobileNetV3My,
'proxyless': OFAProxylessNASNetsMy}
classes_to_use = [ss_name_to_class[ss_name] for ss_name in ensemble_ss_names]
self.evaluators = [SupernetworkWrapper(n_classes=n_classes, model_path=supernet_path,
engine_class_to_use=encoder_class,
n_image_channels=self.n_image_channels, if_ignore_decoder=False, dataset=dataset,
search_space_name=ss_name)
for supernet_path, ss_name, encoder_class in zip(supernet_paths, ensemble_ss_names, classes_to_use)]
self.logger = None
self.n_evals = n_evals
def _evaluate(self, solution):
if self.sec_obj in ['flops', 'cpu', 'gpu']:
config = self.ss.decode(solution)
sec_objs = []
for conf, evaluator in zip(config, self.evaluators):
subnet, _ = evaluator.sample({'ks': conf['ks'], 'e': conf['e'], 'd': conf['d'], 'w': conf['w']})
info = get_net_info(subnet, (self.n_image_channels, conf['r'], conf['r']),
measure_latency=self.sec_obj, print_info=False, clean=True, lut=self.lut)
sec_objs.append(info[self.sec_obj])
input_acc = np.array(solution)[np.newaxis, :]
solution_reencoded_sep = self.ss.encode(config, if_return_separate=True)
input_flops = np.concatenate(
[sol_sep[-2:] for sol_sep in solution_reencoded_sep] + [[int(f) for f in sec_objs]])[np.newaxis, :]
top1_err_and_other_obj = self.predictor.predict({'for_acc': input_acc, 'for_flops': input_flops})[0]
top1_err = top1_err_and_other_obj[0]
other_obj = top1_err_and_other_obj[1]
return top1_err, other_obj
def perform_random_search(self):
all_solutions = []
all_objectives = []
evals_performed = 0
evaluated_solutions = set()
while evals_performed < self.n_evals:
solution = np.array([np.random.choice(range(lb, ub), 1) for lb, ub in self.alphabet_lb_and_ub])
solution = solution[:, 0].tolist()
while tuple(solution) in evaluated_solutions:
solution = np.array([np.random.choice(range(lb, ub), 1) for lb, ub in self.alphabet_lb_and_ub])
solution = solution[:, 0].tolist()
top1_err, other_obj = self._evaluate(solution)
true_objs = (top1_err, other_obj)
true_objs_str = str(true_objs).replace(' ', '')
self.logger.log([evals_performed, 0, ','.join([str(s) for s in solution]), true_objs_str])
evals_performed += 1
# print(f'{evals_performed}: New solution! {true_objs=}')
if evals_performed % 1000 == 0:
print(f'{evals_performed=}')
all_solutions.append(solution)
all_objectives.append(list(true_objs))
evaluated_solutions.add(tuple(solution))
all_solutions = np.vstack(all_solutions)
all_objectives = np.array(all_objectives)
return all_solutions, all_objectives
| 7,158 | 48.034247 | 146 |
py
|
ENCAS
|
ENCAS-main/encas/encas_api.py
|
import glob
import pickle
import gzip
import numpy as np
import os
import torch
from utils import threshold_gene_to_value_moregranular as threshold_gene_to_value
class EncasAPI:
def __init__(self, filename):
self.use_cache = True
kwargs = pickle.load(open(filename, 'rb'))
self.if_allow_noop = kwargs['if_allow_noop']
self.subnet_to_flops = kwargs['subnet_to_flops']
self.search_goal = kwargs['search_goal'] # ensemble or cascade
labels_path = kwargs['labels_path']
self.labels = torch.tensor(np.load(labels_path)).cuda()
output_distr_paths = kwargs['output_distr_paths']
outputs_all = []
for p in output_distr_paths:
output_distr = []
n_files = len(glob.glob(os.path.join(p, "*.npy.gz")))
for j in range(n_files):
print(os.path.join(p, f'{j}.npy.gz'))
with gzip.GzipFile(os.path.join(p, f'{j}.npy.gz'), 'r') as f:
output_distr.append(np.asarray(np.load(f), dtype=np.float16)[None, ...])
outputs_all += output_distr
if self.if_allow_noop:
output_distr_onenet_noop = np.zeros_like(output_distr[0]) # copy arbitrary one to get the shape
if len(output_distr_onenet_noop.shape) < 3:
output_distr_onenet_noop = output_distr_onenet_noop[None, ...]
outputs_all = [output_distr_onenet_noop] + outputs_all
if True: #pre-allocation & concatenation on CPU is helpful when there's not enough VRAM
preallocated_array = np.zeros((len(outputs_all), outputs_all[-1].shape[1], outputs_all[-1].shape[2]), dtype=np.float16)
np.concatenate((outputs_all), axis=0, out=preallocated_array)
self.subnet_to_output_distrs = torch.tensor(preallocated_array).cuda()
else:
outputs_all = [torch.tensor(o).cuda() for o in outputs_all]
self.subnet_to_output_distrs = torch.cat(outputs_all, dim=0)
print(f'{self.subnet_to_output_distrs.shape=}')
print(f'{len(self.subnet_to_flops)=}')
def _fitness_ensemble(self, solution):
solution_size = len(solution)
nets_used = solution_size
if self.if_allow_noop:
n_noops = sum([1 if g == 0 else 0 for g in solution])
if n_noops == nets_used:
return (-100, -1e5)
nets_used -= n_noops
preds = torch.clone(self.subnet_to_output_distrs[solution[0]])
for j in range(1, solution_size):
preds_cur = self.subnet_to_output_distrs[solution[j]]
preds += preds_cur
preds /= nets_used
output = torch.argmax(preds, 1)
err = (torch.sum(self.labels != output).item() / len(self.labels)) * 100
flops = sum([self.subnet_to_flops[solution[j]] for j in range(solution_size)])
obj0_proper_form, obj1_proper_form = -err, -flops
return (obj0_proper_form, obj1_proper_form)
def _fitness_cascade(self, solution):
max_n_nets = (len(solution) + 1) // 2
solution_nets, solution_thresholds = solution[:max_n_nets], solution[max_n_nets:]
n_nets_not_noops = max_n_nets
if self.if_allow_noop:
n_noops = sum([1 if g == 0 else 0 for g in solution_nets])
if n_noops == n_nets_not_noops:
return (-100, -1e5)
n_nets_not_noops -= n_noops
n_nets_used_in_cascade = 0
preds = torch.tensor(self.subnet_to_output_distrs[solution_nets[0]])
flops = self.subnet_to_flops[solution_nets[0]]
n_nets_used_in_cascade += int(solution_nets[0] != 0)
idx_more_predictions_needed = torch.ones(preds.shape[0], dtype=torch.bool)
for j in range(1, max_n_nets):
if solution_nets[j] == 0: # noop
continue
cur_threshold = threshold_gene_to_value[solution_thresholds[j - 1]]
idx_more_predictions_needed[torch.max(preds, dim=1).values > cur_threshold] = False
preds_tmp = preds[idx_more_predictions_needed] #preds_tmp is needed because I wanna do (in the end) x[idx1][idx2] = smth, and that doesn't modify the original x
not_predicted_idx = torch.max(preds_tmp, dim=1).values <= cur_threshold
# not_predicted_idx = torch.max(preds, dim=1).values < cur_threshold
n_not_predicted = torch.sum(not_predicted_idx).item()
# print(f'{n_not_predicted=}')
if n_not_predicted == 0:
break
n_nets_used_in_cascade += 1 #it's guaranteed to not be a noop
preds_cur = self.subnet_to_output_distrs[solution_nets[j]]
if_average_outputs = True
if if_average_outputs:
coeff1 = (n_nets_used_in_cascade - 1) / n_nets_used_in_cascade # for the current predictions that may already be an average
coeff2 = 1 / n_nets_used_in_cascade # for the predictions of the new model
preds_tmp[not_predicted_idx] = coeff1 * preds_tmp[not_predicted_idx] \
+ coeff2 * preds_cur[idx_more_predictions_needed][not_predicted_idx]
preds[idx_more_predictions_needed] = preds_tmp
else:
preds_tmp[not_predicted_idx] = preds_cur[idx_more_predictions_needed][not_predicted_idx]
preds[idx_more_predictions_needed] = preds_tmp
flops += self.subnet_to_flops[solution_nets[j]] * (n_not_predicted / len(self.labels))
output = torch.argmax(preds, dim=1)
err = (torch.sum(self.labels != output).item() / len(self.labels)) * 100
obj0_proper_form, obj1_proper_form = -err, -flops
return (obj0_proper_form, obj1_proper_form)
def fitness(self, solution):
# st = time.time()
solution = [int(x) for x in solution]
if self.search_goal == 'ensemble':
res = self._fitness_ensemble(solution)
elif self.search_goal == 'cascade':
res = self._fitness_cascade(solution)
else:
raise NotImplementedError(f'Unknown {self.search_goal=}')
# ed = time.time()
# self.avg_time.update(ed - st)
# if self.avg_time.count % 500 == 0:
# print(f'Avg fitness time is {self.avg_time.avg} @ {self.avg_time.count}')
return res
| 6,361 | 42.278912 | 172 |
py
|
ENCAS
|
ENCAS-main/encas/greedy_search.py
|
# implementation of the algorithm from the paper http://proceedings.mlr.press/v80/streeter18a/streeter18a.pdf
import time
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import torch
import utils
class GreedySearchWrapperEnsembleClassification:
def __init__(self, alphabet, subnet_to_output_distrs, subnet_to_flops, labels, if_allow_noop, ensemble_size, logit_gaps_all, **kwargs):
super().__init__()
# ConfidentModel is implemented as boolean idx atop model's predictions
torch.multiprocessing.set_start_method('spawn')
# need to set up logging in subprocesses, so:
self.log_file_path = kwargs['log_file_path']
self.subnet_to_output_distrs = torch.tensor(subnet_to_output_distrs)
self.subnet_to_output_distrs_argmaxed = torch.argmax(self.subnet_to_output_distrs, -1)
self.subnet_to_logit_gaps = torch.tensor(logit_gaps_all) + 1e-7 # float16 => some are zeroes, and that breaks the 0 threshold
subnet_to_logit_gaps_fp32 = torch.tensor(self.subnet_to_logit_gaps, dtype=torch.float32)
self.subnet_to_logit_gaps_unique_sorted = {}
for i in range(len(self.subnet_to_logit_gaps)):
tmp = torch.sort(torch.unique(subnet_to_logit_gaps_fp32[i]))[0]
self.subnet_to_logit_gaps_unique_sorted[i] = tmp
self.labels = torch.tensor(labels)
self.subnet_to_flops = subnet_to_flops
self.size_to_pad_to = ensemble_size # to pad to this size - doesn't influence the algorithm, only the ease of saving
# In a bunch of places in the code I make the assumption that noop is included
@staticmethod
def acc_constraint_holds(preds1, preds2, labels, multiplier_ref_acc):
def accuracy(preds, labels):
return torch.sum(labels == preds) / len(labels) * 100
acc1 = accuracy(preds1, labels)
acc2 = accuracy(preds2, labels)
if acc1 >= multiplier_ref_acc * acc2 and acc1 != 0:
return acc1.item()
return None
@staticmethod
def confident_model_set(validation_unpredicted_idx, idx_subnet_ref_acc, multiplier_ref_acc, already_used_subnets,
subnet_to_output_distrs_argmaxed, subnet_to_logit_gaps, subnet_to_logit_gaps_unique_sorted, labels):
subnet_to_predicted_idx = {}
subnet_to_threshold = {}
subnet_to_acc = {}
for i_model in range(subnet_to_output_distrs_argmaxed.shape[0]):
if i_model in already_used_subnets:
continue
logit_gaps_cur = subnet_to_logit_gaps[i_model] # (n_samples, 1)
logit_gaps_cur_unique_sorted = subnet_to_logit_gaps_unique_sorted[i_model]
# first check zero, for speed
t = 0.0
predicted_idx = logit_gaps_cur >= t
predicted_idx = predicted_idx * validation_unpredicted_idx # only interested in predictions on yet-unpredicted images
cur_subnet_to_output_distr_argmaxed = subnet_to_output_distrs_argmaxed[i_model]
ref_subnet_to_output_distr_argmaxed = subnet_to_output_distrs_argmaxed[idx_subnet_ref_acc]
acc = GreedySearchWrapperEnsembleClassification.acc_constraint_holds(cur_subnet_to_output_distr_argmaxed[predicted_idx],
ref_subnet_to_output_distr_argmaxed[predicted_idx],
labels[predicted_idx], multiplier_ref_acc)
if acc is None:
for ind in range(logit_gaps_cur_unique_sorted.shape[0]):
t = logit_gaps_cur_unique_sorted[ind]
predicted_idx = logit_gaps_cur >= t
predicted_idx = predicted_idx * validation_unpredicted_idx # only interested in predictions on yet-unpredicted images
acc = GreedySearchWrapperEnsembleClassification.acc_constraint_holds(cur_subnet_to_output_distr_argmaxed[predicted_idx],
ref_subnet_to_output_distr_argmaxed[predicted_idx],
labels[predicted_idx], multiplier_ref_acc)
if acc is not None:
if ind > 0:
t = 1e-7 + logit_gaps_cur_unique_sorted[ind - 1].item()
else:
t = t.item()
break
subnet_to_predicted_idx[i_model] = predicted_idx
subnet_to_threshold[i_model] = t
subnet_to_acc[i_model] = acc
return subnet_to_predicted_idx, subnet_to_threshold, subnet_to_acc
@staticmethod
def _search_for_model_and_multiplier(kwargs):
torch.set_num_threads(1)
self, multiplier_ref_acc, idx_subnet_ref_acc = kwargs['self'], kwargs['multiplier_ref_acc'], kwargs['idx_subnet_ref_acc']
utils.setup_logging(self.log_file_path)
st = time.time()
subnet_to_output_distrs = self.subnet_to_output_distrs
subnet_to_output_distrs_argmaxed = self.subnet_to_output_distrs_argmaxed
subnet_to_logit_gaps = self.subnet_to_logit_gaps
labels = self.labels
all_solutions = []
all_objectives = []
validation_unpredicted_idx = torch.ones_like(labels, dtype=bool)
cur_cascade = [0] # don't actually need noop, but helpful for saving (i.e. this is a crutch)
cur_thresholds = []
cur_flops = 0
cur_predictions = torch.zeros_like(
subnet_to_output_distrs[idx_subnet_ref_acc]) # idx is not important, they all have the same shape
while torch.sum(validation_unpredicted_idx) > 0:
subnet_to_predicted_idx, subnet_to_threshold, subnet_to_acc = \
GreedySearchWrapperEnsembleClassification.confident_model_set(validation_unpredicted_idx,
idx_subnet_ref_acc,
multiplier_ref_acc, set(cur_cascade),
subnet_to_output_distrs_argmaxed,
subnet_to_logit_gaps, self.subnet_to_logit_gaps_unique_sorted, labels)
best_new_subnet_index = -1
best_r = 0
for i_model in subnet_to_predicted_idx.keys():
n_predicted_cur = torch.sum(subnet_to_predicted_idx[i_model])
if n_predicted_cur == 0:
continue
# filter to M_useful
if subnet_to_acc[i_model] is None:
continue
# the "confident_model_set", as described in the paper, already generates only models that satisfy
# the accuracy constraint, thus M_useful and M_accurate are the same
r = n_predicted_cur / self.subnet_to_flops[i_model]
if r > best_r:
best_r = r
best_new_subnet_index = i_model
cur_cascade.append(best_new_subnet_index)
cur_thresholds.append(subnet_to_threshold[best_new_subnet_index])
cur_flops += (torch.sum(validation_unpredicted_idx) / len(labels)) * self.subnet_to_flops[
best_new_subnet_index]
predicted_by_best_subnet_idx = subnet_to_predicted_idx[best_new_subnet_index]
cur_predictions[predicted_by_best_subnet_idx] = subnet_to_output_distrs[best_new_subnet_index][
predicted_by_best_subnet_idx]
validation_unpredicted_idx[predicted_by_best_subnet_idx] = False
cur_flops = cur_flops.item()
cur_cascade = cur_cascade[1:] # I think this should work
if len(cur_cascade) < self.size_to_pad_to:
n_to_add = self.size_to_pad_to - len(cur_cascade)
cur_cascade += [0] * n_to_add
cur_thresholds += [0] * n_to_add
all_solutions.append(cur_cascade + cur_thresholds)
# compute true error for the constructed cascade
output = torch.argmax(cur_predictions, 1)
true_err = (torch.sum(labels != output) / len(labels) * 100).item()
all_objectives.append((true_err, cur_flops))
ed = time.time()
print(f'{multiplier_ref_acc=} {idx_subnet_ref_acc=} {cur_cascade=} {cur_thresholds=} {cur_flops=:.2f} time={ed-st}')
return all_solutions, all_objectives
def search(self, seed):
# Seed is not useful and not used because the algorithm is deterministic.
st = time.time()
all_solutions = []
all_objectives = []
kwargs = []
for idx_subnet_ref_acc in range(1, len(self.subnet_to_output_distrs)):
for multiplier_ref_acc in [1 - i / 100 for i in range(0, 5 + 1)]:
kwargs.append({'self': self, 'multiplier_ref_acc': multiplier_ref_acc,
'idx_subnet_ref_acc': idx_subnet_ref_acc})
n_workers = 32
with ProcessPoolExecutor(max_workers=n_workers) as executor:
futures = [executor.submit(GreedySearchWrapperEnsembleClassification._search_for_model_and_multiplier, kws) for kws in kwargs]
for f in futures:
cur_solutions, cur_objectives = f.result() # the order is not important
all_solutions += cur_solutions
all_objectives += cur_objectives
all_solutions = np.vstack(all_solutions)
all_objectives = np.array(all_objectives)
print(all_solutions)
print(all_objectives)
ed = time.time()
print(f'GreedyCascade time = {ed - st}')
return all_solutions, all_objectives
| 9,745 | 50.294737 | 148 |
py
|
ENCAS
|
ENCAS-main/encas/post_hoc_search_run_many.py
|
import glob
import itertools
import os
import json
import gzip
import argparse
import utils_pareto
from encas.mo_gomea_search import MoGomeaWrapperEnsembleClassification
from encas.random_search import RandomSearchWrapperEnsembleClassification
from greedy_search import GreedySearchWrapperEnsembleClassification
# os.environ['OMP_NUM_THREADS'] = '32'
from os.path import join
from pathlib import Path
import shutil
import yaml
from matplotlib import pyplot as plt
import utils
from after_search.evaluate_stored_outputs import evaluate_stored_whole_experiment_cascade, filter_whole_experiment_cascade
from plot_results.plotting_functions import compare_val_and_test
from utils import set_seed
from utils_pareto import get_best_pareto_up_and_including_iter, get_everything_from_iter
from utils import threshold_gene_to_value_moregranular as threshold_gene_to_value
import numpy as np
def create_problem_data(experiment_paths, max_iters, dataset_type, if_allow_noop, ensemble_ss_names,
funs_to_get_subnets_names, if_load_output_distr, dataset_postfix,
if_return_logit_gaps=False, if_timm=False):
cfgs_all, flops_all, iters_all, outputs_all, ss_names_all, experiment_paths_all, logit_gaps_all, original_indices_all = [], [], [], [], [], [], [], []
alphabet = 0
assert len(experiment_paths) == len(max_iters)
if funs_to_get_subnets_names is None:
funs_to_get_subnets_names = ['cum_pareto'] * len(max_iters)
elif type(funs_to_get_subnets_names) is not list:
funs_to_get_subnets_names = [funs_to_get_subnets_names] * len(max_iters)
name2fun = {'cum_pareto': get_best_pareto_up_and_including_iter, 'last_iter': get_everything_from_iter,
'all': utils_pareto.get_everything_up_and_including_iter}
for i, (experiment_path, max_iter, fun_get_subnets_name) in enumerate(zip(experiment_paths, max_iters, funs_to_get_subnets_names)):
if not if_timm:
cfgs, _, flops, iters = name2fun[fun_get_subnets_name](experiment_path, max_iter)
else:
path_info = os.path.join(utils.NAT_LOGS_PATH, experiment_path, f'output_distrs_{dataset_type}{dataset_postfix}', 'info.json')
loaded = json.load(open(path_info))
cfgs = np.array([[x] for x in loaded['net_names']])
flops = np.array(loaded['flops'])
iters = np.array([0] * len(cfgs))
subsample = lambda l: l#[l[0], l[-1]]
# Note that the subsampling won't work for mo-gomea (at least) because the output_distr is not subsampled, so indexes will refer to wrong outputs
cfgs, flops, iters = subsample(cfgs), subsample(flops), subsample(iters)
output_distr_path = os.path.join(utils.NAT_LOGS_PATH, experiment_path, f'output_distrs_{dataset_type}{dataset_postfix}')
logit_gaps_path = os.path.join(utils.NAT_LOGS_PATH, experiment_path, f'logit_gaps_{dataset_type}{dataset_postfix}')
if if_load_output_distr:
assert os.path.isdir(output_distr_path)
output_distr = []
n_files = len(glob.glob(os.path.join(output_distr_path, "*.npy.gz")))
for j in range(n_files):
print(j)
with gzip.GzipFile(os.path.join(output_distr_path, f'{j}.npy.gz'), 'r') as f:
output_distr.append(np.asarray(np.load(f), dtype=np.float16)[None, ...])
outputs_all += output_distr
if if_return_logit_gaps:
logit_gaps = []
for j in range(n_files):
print(j)
with gzip.GzipFile(os.path.join(logit_gaps_path, f'{j}.npy.gz'), 'r') as f:
logit_gaps.append(np.asarray(np.load(f), dtype=np.float16)[None, ...])
logit_gaps_all += logit_gaps
original_indices_all += list(range(len(output_distr)))
else:
outputs_all.append(output_distr_path)
n_files = len(glob.glob(os.path.join(output_distr_path, '*.npy.gz')))
original_indices_all += list(range(n_files))
# if if_return_logit_gaps:
# logit_gaps_path = os.path.join('/export/scratch3/aleksand/nsganetv2/logs', experiment_path,
# f'logit_gaps_cum_pareto_{dataset_type}_logitgaps.npy')
# logit_gaps = np.load(logit_gaps_path)
# logit_gaps_all.append(logit_gaps)
cfgs_all += cfgs.tolist()
flops_all.append(flops.tolist())
iters_all += iters.tolist()
alphabet += len(cfgs)
ss_names_all += [ensemble_ss_names[i]] * len(cfgs)
experiment_paths_all += [experiment_path] * len(cfgs)
if if_allow_noop:
cfgs_all = [[None]] + cfgs_all
flops_all = [[0]] + flops_all
iters_all = [iters_all[-1]] + iters_all
alphabet += 1
ss_names_all = ['noop'] + ss_names_all
experiment_paths_all = ['noop'] + experiment_paths_all
original_indices_all = [None] + original_indices_all
if if_load_output_distr:
output_distr_onenet_noop = np.zeros_like(output_distr[0]) # copy arbitrary one to get the shape
if len(output_distr_onenet_noop.shape) < 3:
output_distr_onenet_noop = output_distr_onenet_noop[None, ...]
# output_distr = np.concatenate((output_distr_onenet_noop, output_distr), axis=0)
outputs_all = [output_distr_onenet_noop] + outputs_all
if if_return_logit_gaps:
logit_gaps_onenet_noop = np.zeros_like(logit_gaps_all[0][0]) # copy arbitrary one to get the shape
if len(logit_gaps_onenet_noop.shape) < 2:
logit_gaps_onenet_noop = logit_gaps_onenet_noop[None, ...]
logit_gaps_all = [logit_gaps_onenet_noop] + logit_gaps_all
if if_load_output_distr:
preallocated_array = np.zeros((len(outputs_all), outputs_all[0].shape[1], outputs_all[0].shape[2]), dtype=np.float16)
np.concatenate((outputs_all), axis=0, out=preallocated_array)
outputs_all = preallocated_array
if if_return_logit_gaps:
preallocated_array = np.zeros((len(logit_gaps_all), logit_gaps_all[0].shape[1]), dtype=np.float16)
np.concatenate((logit_gaps_all), axis=0, out=preallocated_array)
logit_gaps_all = preallocated_array
return cfgs_all, flops_all, iters_all, alphabet, outputs_all, ss_names_all, experiment_paths_all, logit_gaps_all, original_indices_all
def create_msunas_config():
msunas_config_path_starter = os.path.join(nat_logs_path, experiment_paths[0], 'config_msunas.yml')
msunas_config_path = join(run_path, 'config_msunas.yml')
shutil.copy(msunas_config_path_starter, msunas_config_path)
msunas_config = yaml.load(open(msunas_config_path, 'r'), yaml.SafeLoader)
msunas_config['ensemble_ss_names'] = ensemble_ss_names
msunas_config['supernet_path'] = supernet_paths
yaml.dump(msunas_config, open(msunas_config_path, 'w'))
return msunas_config
def create_pareto(true_errs_out, flops_out, cfgs_out, weight_paths_out, ss_names_out, thresholds_values_out, original_indices_out):
idx_sort_flops = np.argsort(flops_out)
true_errs_out = np.array(true_errs_out)[idx_sort_flops]
flops_out = np.array(flops_out)[idx_sort_flops]
all_objs = list(zip(true_errs_out, flops_out))
all_objs = np.array(all_objs)
pareto_best_cur_idx = utils_pareto.is_pareto_efficient(all_objs)
true_errs_out = true_errs_out[pareto_best_cur_idx]
flops_out = flops_out[pareto_best_cur_idx]
def sort(l):
return np.array(l)[idx_sort_flops][pareto_best_cur_idx]
cfgs_out = sort(cfgs_out)
weight_paths_out = sort(weight_paths_out)
ss_names_out = sort(ss_names_out)
thresholds_values_out = sort(thresholds_values_out)
original_indices_out = sort(original_indices_out)
return true_errs_out, flops_out, cfgs_out, weight_paths_out, ss_names_out, thresholds_values_out, original_indices_out
if __name__ == '__main__':
plt.rcParams.update({'font.size': 10})
plt.rcParams['axes.grid'] = True
p = argparse.ArgumentParser()
p.add_argument(f'--config', default='configs_encas/cifar100_DEBUG.yml', type=str)
p.add_argument(f'--target_run', default=None, type=str)
parsed_args = vars(p.parse_args())
encas_config_path = parsed_args['config']
cfg = yaml.safe_load(open(os.path.join(utils.NAT_PATH, encas_config_path)))
nat_logs_path, nat_data_path = utils.NAT_LOGS_PATH, utils.NAT_DATA_PATH
funs_to_get_subnets_names = cfg['funs_to_get_subnets_names']
dataset_name, dataset_postfix, label_postfix = cfg['dataset'], cfg['dataset_postfix'], cfg['label_postfix']
SEED = cfg['random_seed']
n_evals = cfg['n_evals']
gomea_exe_path = cfg.get('gomea_exe_path', None)
experiment_names = cfg['input_experiment_names']
ensemble_size, if_allow_noop = cfg['ensemble_size'], cfg['if_allow_noop']
max_iters, algo, search_goal = cfg['max_iters'], cfg['algo'], cfg['search_goal']
dataset_type = cfg['dataset_type']
target_runs = cfg.get('target_runs', None)
if parsed_args['target_run'] is not None:
target_runs = [int(parsed_args['target_run'])]
ensemble_ss_names, join_or_sep = cfg['input_search_spaces'], 'join' if 'extract' in experiment_names[0].lower() else 'sep'
out_name_template = cfg['out_name_template']
cfg['join_or_sep'] = join_or_sep
cfg['n_inputs'] = len(experiment_names)
out_name = out_name_template.format(**cfg)
cfg['out_name'] = out_name
if algo == 'greedy':
ensemble_size = 30 # to pad to this size - doesn't influence the algorithm, only the ease of saving
# define here in order not to screw up out_name
Path(join(nat_logs_path, out_name)).mkdir(exist_ok=True)
Path(join(nat_logs_path, out_name, algo)).mkdir(exist_ok=True)
yaml.safe_dump(cfg, open(join(nat_logs_path, out_name, algo, 'config_encas.yml'), 'w'), default_flow_style=None)
if_timm = 'timm' in out_name
if not if_timm:
supernet_paths = [join(nat_data_path, utils.ss_name_to_supernet_path[ss]) for ss in ensemble_ss_names]
exp_name_to_get_algo_names = experiment_names[0]
searcher_class = {'mo-gomea': MoGomeaWrapperEnsembleClassification,
'random': RandomSearchWrapperEnsembleClassification,
'greedy': GreedySearchWrapperEnsembleClassification}[algo]
if_load_data = algo == 'greedy'
labels_path = join(nat_data_path, f'labels_{dataset_name}_{dataset_type}{label_postfix}.npy')
labels = np.load(labels_path) if if_load_data else labels_path
for i_algo_folder, f in enumerate(reversed(sorted(os.scandir(join(nat_logs_path, exp_name_to_get_algo_names)), key=lambda e: e.name))):
if not f.is_dir():
continue
nat_algo_name = f.name
assert i_algo_folder == 0
# nat_algo_name is whatever method was used in NAT (e.g. "search_algo:mo-gomea!subset_selector:reference"),
# algo is the algorithm used here (mo-gomea, random, greedy)
for run_folder in os.scandir(f.path):
if not run_folder.is_dir():
continue
run_idx = run_folder.name
if target_runs is not None and int(run_idx) not in target_runs:
print(f'Skipping run {run_idx}')
continue
run_path = join(nat_logs_path, out_name, algo, run_idx)
Path(run_path).mkdir(exist_ok=True)
log_file_path = os.path.join(run_path, '_log.txt')
utils.setup_logging(log_file_path)
experiment_paths = [join(exp, nat_algo_name, run_idx) for exp in experiment_names]
if not if_timm:
msunas_config = create_msunas_config()
cfgs_all, flops_all, iters_all, alphabet, output_distr, ss_names_all, experiment_paths_all, logit_gaps_all, original_indices_all = \
create_problem_data(experiment_paths, max_iters, dataset_type, if_allow_noop, ensemble_ss_names,
funs_to_get_subnets_names, if_load_data, dataset_postfix,
if_return_logit_gaps=algo == 'greedy', if_timm=if_timm)
cur_seed = SEED + int(run_idx)
set_seed(cur_seed)
flops_all_flattened = list(itertools.chain.from_iterable(flops_all))
genomes, objs = searcher_class(alphabet, output_distr, flops_all_flattened, labels, if_allow_noop,
ensemble_size, run_path=run_path, search_goal=search_goal,
logit_gaps_all=logit_gaps_all, n_evals=n_evals, log_file_path=log_file_path,
gomea_exe_path=gomea_exe_path).search(cur_seed)
print(f'{genomes.shape=}')
plt.plot(objs[:, 1], utils.get_metric_complement(objs[:, 0]), '.', markersize=1)
cfgs_out, true_errs_out, flops_out, weight_paths_out, ss_names_out, thresholds_values_out, original_indices_out = [], [], [], [], [], [], []
for i_genome, genome in enumerate(genomes):
cur_cfg, cur_true_errs, cur_flops, cur_weight_paths, cur_ss_names, thresholds_values, cur_original_indices = [], 0, 0, [], [], [], []
genome_nets, genome_thresholds = genome[:ensemble_size], genome[ensemble_size:] # thresholds will be empty if not cascade
for gene in genome_nets:
gene = int(gene) # in greedy search the genome is float because half of it are thresholds values
cur_cfg.append(cfgs_all[gene][0]) # "[0]" because it's an array of 1 element
cur_weight_paths.append(join(nat_logs_path, experiment_paths_all[gene], f'iter_{iters_all[gene]}'))
cur_ss_names.append(ss_names_all[gene])
cur_original_indices.append(original_indices_all[gene])
if search_goal == 'cascade':
if algo != 'greedy':
thresholds_values = [threshold_gene_to_value[x] for x in genome_thresholds]
else:
#thresholds are not encoded; also, they are logit gaps
thresholds_values = genome_thresholds
cur_true_errs = objs[i_genome, 0]
cur_flops = objs[i_genome, 1]
cfgs_out.append(cur_cfg)
true_errs_out.append(cur_true_errs)
flops_out.append(cur_flops)
weight_paths_out.append(cur_weight_paths)
ss_names_out.append(cur_ss_names)
thresholds_values_out.append(thresholds_values)
original_indices_out.append(cur_original_indices)
true_errs_out, flops_out, cfgs_out, weight_paths_out, ss_names_out, \
thresholds_values_out, original_indices_out = create_pareto(true_errs_out, flops_out, cfgs_out,
weight_paths_out, ss_names_out,
thresholds_values_out, original_indices_out)
plt.plot(flops_out, utils.get_metric_complement(true_errs_out), '-o')
plt.savefig(join(run_path, 'out.png'), bbox_inches='tight', pad_inches=0) ; plt.show() ; plt.close()
dict_to_dump = {'true_errs': true_errs_out.tolist(), 'flops': flops_out.tolist(),
'cfgs': cfgs_out.tolist(), 'weight_paths': weight_paths_out.tolist(),
'search_space_names': ss_names_out.tolist(), 'algo': algo, 'labels_path': labels_path,
'dataset_name': dataset_name, 'dataset_type': dataset_type,
'original_indices': original_indices_out.tolist(), 'flops_all': flops_all,
'ensemble_ss_names': ensemble_ss_names, 'dataset_postfix': dataset_postfix}
if search_goal == 'cascade':
dict_to_dump['thresholds'] = thresholds_values_out.tolist()
yaml.safe_dump(dict_to_dump, open(join(run_path, 'posthoc_ensemble.yml'), 'w'), default_flow_style=None)
evaluate_stored_whole_experiment_cascade(out_name, 'test', f'labels_{dataset_name}_test.npy', target_algos=[algo], target_runs=target_runs)
filter_whole_experiment_cascade(out_name, target_algos=[algo], target_runs=target_runs,
cascade_info_name='posthoc_ensemble_from_stored.yml',
cascade_info_name_new='posthoc_ensemble_from_stored_filtered.yml')
compare_val_and_test(out_name, 'test', if_from_stored=True, target_algos=[algo], target_runs=target_runs)
| 16,698 | 52.351438 | 154 |
py
|
ENCAS
|
ENCAS-main/encas/mo_gomea_search.py
|
import dill as pickle
import os
import numpy as np
from mo_gomea import MoGomeaCInterface
from utils import threshold_gene_to_value_moregranular as threshold_gene_to_value
def write_np_to_text_file_for_mo_gomea(path, arr):
with open(path, 'wb') as f:
np.savetxt(f, arr, delimiter=' ', newline='\n', header='', footer='', comments='# ', fmt='%d')
# remove last empty line:
NEWLINE_SIZE_IN_BYTES = -1
f.seek(NEWLINE_SIZE_IN_BYTES, 2)
f.truncate()
class MoGomeaWrapperEnsembleClassification:
def __init__(self, alphabet, subnet_to_output_distrs, subnet_to_flops, labels, if_allow_noop, ensemble_size, **kwargs):
super().__init__()
self.n_evals = kwargs['n_evals']
# gomea_exe_path = '/export/scratch3/aleksand/MO_GOMEA/exes/MO_GOMEA_default_ndinit_lb_lessoutput_intsolution_dontcountcache_usepythonpath'
# gomea_exe_path = '/home/chebykin/MO_GOMEA/exes/MO_GOMEA_default_ndinit_lb_lessoutput_intsolution_dontcountcache_usepythonpath'
gomea_exe_path = kwargs['gomea_exe_path']
workdir_mo_gomea = kwargs['run_path']
n_genes = ensemble_size
if kwargs['search_goal'] == 'cascade':
#dump alphabet
alphabet = np.array([alphabet] * ensemble_size + [len(threshold_gene_to_value)] * (ensemble_size - 1))
path_alphabet = os.path.join(workdir_mo_gomea, 'alphabet.txt')
write_np_to_text_file_for_mo_gomea(path_alphabet, alphabet) # "+1" because mo-gomea wants non-inclusive upper bound
n_genes = ensemble_size + (ensemble_size - 2) + 1 # net indices, threshold diff, baseline threshold
# dump additional data
path_data_dump = os.path.join(workdir_mo_gomea, 'data_dump_for_gomea')
with open(path_data_dump, 'wb') as file_data_dump:
pickle.dump({'if_allow_noop': if_allow_noop, 'subnet_to_flops': subnet_to_flops,
'labels_path': labels, 'output_distr_paths': subnet_to_output_distrs,
'search_goal': kwargs['search_goal']}, file_data_dump)
self.mo_gomea = MoGomeaCInterface('EncasFitness', workdir_mo_gomea, path_data_dump, 2,
n_genes=n_genes,
alphabet=str(alphabet) if kwargs['search_goal'] != 'cascade' else path_alphabet,
alphabet_lower_bound_path='0',
gomea_executable_path=gomea_exe_path)
def search(self, seed):
genomes, objs = self.mo_gomea.search(self.n_evals, seed)
# because in MO-Gomea I maximize "-flops" and "-error"
objs *= -1
return genomes, objs
| 2,725 | 47.678571 | 147 |
py
|
ENCAS
|
ENCAS-main/encas/random_search.py
|
import dill as pickle
import os
import numpy as np
from encas.encas_api import EncasAPI
from utils import threshold_gene_to_value_moregranular as threshold_gene_to_value, CsvLogger
class RandomSearchWrapperEnsembleClassification:
def __init__(self, alphabet, subnet_to_output_distrs, subnet_to_flops, labels, if_allow_noop, ensemble_size, **kwargs):
super().__init__()
self.n_evals = kwargs['n_evals']
workdir = kwargs['run_path']
if kwargs['search_goal'] == 'cascade':
self.alphabet = np.array([alphabet] * ensemble_size + [len(threshold_gene_to_value)] * (ensemble_size - 1))
# dump additional data
path_data_dump = os.path.join(workdir, 'data_dump_for_gomea')
with open(path_data_dump, 'wb') as file_data_dump:
pickle.dump({'if_allow_noop': if_allow_noop, 'subnet_to_flops': subnet_to_flops,
'labels_path': labels, 'output_distr_paths': subnet_to_output_distrs, 'search_goal': kwargs['search_goal']}, file_data_dump)
self.fitness_api = EncasAPI(path_data_dump)
self.logger = CsvLogger(workdir, 'random.csv')
def search(self, seed, **kwargs):
all_solutions = []
all_objectives = []
evals_performed = 0
evaluated_solutions = set()
while evals_performed < self.n_evals:
solution = np.array([np.random.choice(omega_i, 1) for omega_i in self.alphabet])
solution = solution[:, 0].tolist()
while tuple(solution) in evaluated_solutions:
solution = np.array([np.random.choice(omega_i, 1) for omega_i in self.alphabet])
solution = solution[:, 0].tolist()
top1_err, other_obj = self.fitness_api.fitness(solution)
top1_err, other_obj = -top1_err, -other_obj # because in the API I maximize "-obj"
true_objs = (top1_err, other_obj)
# [cur_solution_idx, elapsed_time, x, fitness_str]
true_objs_str = str(true_objs).replace(' ', '')
self.logger.log([evals_performed, 0, ','.join([str(s) for s in solution]), true_objs_str])
evals_performed += 1
# print(f'{evals_performed}: New solution! {true_objs=}')
if evals_performed % 1000 == 0:
print(f'{evals_performed=}')
all_solutions.append(solution)
all_objectives.append(list(true_objs))
evaluated_solutions.add(tuple(solution))
all_solutions = np.vstack(all_solutions)
all_objectives = np.array(all_objectives)
print(all_solutions)
print(all_objectives)
return all_solutions, all_objectives
| 2,675 | 43.6 | 149 |
py
|
ENCAS
|
ENCAS-main/subset_selectors/base_subset_selector.py
|
class BaseSubsetSelector:
def __init__(self):
pass
def select(self, archive, objs_cur):
pass
| 117 | 18.666667 | 40 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.