repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
MERL-LB
|
MERL-LB-main/sp_train_nn_dqn.py
|
import os
import random
import numpy as np
import torch
from collections import namedtuple, deque
from itertools import count
from config.dqn import *
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from envs.datacenter_env.env import DatacenterEnv
from utils import *
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from torch.utils.tensorboard import SummaryWriter
Transition = namedtuple(
"Transition",
(
"state",
"action_mask",
"action",
"next_state",
"next_action_mask",
"reward",
"done",
),
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class ReplayMemory(object):
def __init__(self, capacity):
self.memory = deque([], maxlen=capacity)
def push(self, *args):
self.memory.append(Transition(*args))
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
fc = []
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
class EpsScheduler:
def __init__(self, max, mini, step) -> None:
self.max = max
self.mini = mini
self.curr = max
self.step = (max - mini) / step
def update(self):
self.max = self.max - self.step
@property
def eps(self):
return self.max
class DoubelDQN:
def __init__(self, args) -> None:
self.args = args
self.learn_step_counter = 0
self.action_index = 0
self.steps_done = 0
self._build_net()
self.eps = EpsScheduler(args.eps_start, args.eps_end, args.num_episodes)
def _build_net(self):
self.policy_net = Actor().to(device)
self.target_net = Actor().to(device)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=1e-3)
self.memory = ReplayMemory(5000)
def choose_action(self, obs, absolute=False):
self.steps_done += 1
state, action_mask = self.obs_format(obs)
if not absolute and random.random() < self.eps.eps:
random_prob = torch.rand((1, self.args.machine_num)).to(device)
random_prob[action_mask == False] += -1e9
action = torch.argmax(random_prob, dim=-1).cpu().item()
else:
predict = self.policy_net(state)
predict[action_mask == False] += -1e9
action = torch.argmax(predict, dim=1).cpu().item()
return action
def obs_format(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
state = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
return state.to(device), action_mask.to(device)
def remember(self, obs, action, next_obs, reward, done):
state, action_mask = self.obs_format(obs)
if next_obs is None:
# 避免为None报错 会导致bug吗?
next_state, next_action_mask = state, action_mask
else:
next_state, next_action_mask = self.obs_format(next_obs)
action = torch.tensor(np.array([[action]]), dtype=torch.int64).to(device)
reward = torch.tensor(np.array([reward]), dtype=torch.float).to(device)
done = torch.tensor(np.array([done]), dtype=torch.bool).to(device)
self.memory.push(
state,
action_mask,
action,
next_state,
next_action_mask,
reward,
done,
)
def update_target_net(self):
self.target_net.load_state_dict(self.policy_net.state_dict())
def learn(self):
if len(self.memory) < self.args.batch_size:
return
transitions = self.memory.sample(self.args.batch_size)
batch = Transition(*zip(*transitions))
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward) # n*2
# reward 归一化
reward_batch = (reward_batch - torch.mean(reward_batch, dim=0)) / (
torch.std(reward_batch, dim=0) + 1e-7
)
# 两个目标的均值作为reward
reward_batch = torch.mean(reward_batch, dim=-1)
# 单目标 std 或者 运行时长
# reward_batch = reward_batch[:, 0]
non_final_mask = torch.cat(batch.done) == False
non_final_next_states = torch.cat(batch.state)[non_final_mask]
non_final_next_action_mask = torch.cat(batch.next_action_mask)[non_final_mask]
# for each batch state according to policy_net
policy_predict = self.policy_net(state_batch)
state_action_values = policy_predict.gather(1, action_batch)
# state value or 0 in case the state was final.
next_state_values = torch.zeros(self.args.batch_size, device=device)
# action mask
target_predict = self.target_net(non_final_next_states) # B*10
target_predict[non_final_next_action_mask == False] = -torch.inf
next_state_values[non_final_mask] = target_predict.max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * self.args.gamma) + reward_batch
# Compute Huber loss
criterion = nn.SmoothL1Loss()
loss = criterion(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
self.optimizer.zero_grad()
loss.backward()
# for param in self.policy_net.parameters():
# param.grad.data.clamp_(-1, 1)
self.optimizer.step()
def save(self, save_path):
torch.save(self.target_net.state_dict(), save_path + "_target_net.pth")
torch.save(self.policy_net.state_dict(), save_path + "_policy_net.pth")
if __name__ == "__main__":
args = parse_args()
args.method = "dqn"
args.tag = "run_02"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
model_save_dir = os.path.join(save_dir, "model")
os.makedirs(model_save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
env = DatacenterEnv(args)
dqn = DoubelDQN(args)
score_list = []
fitness_list = []
EP = []
for i_episode in range(args.num_episodes):
print("i_episode: ", i_episode)
# Initialize the environment and state
seq_index = i_episode % args.job_seq_num
env.seq_index = seq_index
obs = env.reset()
score = np.zeros(2)
for t in count():
# Select and perform an action
action = dqn.choose_action(obs)
next_obs, reward, done, info = env.step(action)
score += reward
if done:
print("done")
# Store the transition in memory
dqn.remember(obs, action, next_obs, reward, done)
# Move to the next state
obs = next_obs
# Perform one step of the optimization (on the policy network)
dqn.learn()
if done:
dqn.eps.update()
print("eps: ", dqn.eps.eps)
break
score_list.append(score)
# 收集fitness
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record)
fitness = np.array([runtime_fitness, std_fitness])
# 记录fitness
writer.add_scalar("current/duration_score", fitness[0], i_episode)
writer.add_scalar("current/balance_score", fitness[1], i_episode)
print("train fitness", fitness)
fitness_list.append(fitness)
fitness_mean = np.mean(fitness_list[-args.job_seq_num :], axis=0)
print("train mean fitness", fitness_mean)
# 记录最优非支配曲面
d_n = 0
remove_list = []
for item in EP:
_, item_fitness = item
if np.all(fitness_mean < item_fitness):
remove_list.append(item)
if np.all(fitness_mean > item_fitness):
d_n += 1
if d_n != 0:
break
if d_n == 0:
for item in remove_list:
EP.remove(item)
EP.append((i_episode, fitness_mean))
# 打印曲面
EP_fitness = np.array([i[1] for i in EP])
x = EP_fitness[:, 1]
y = EP_fitness[:, 0]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, i_episode)
plt.close()
# 记录fitness
writer.add_scalar("mean/duration_score", fitness_mean[0], i_episode)
writer.add_scalar("mean/balance_score", fitness_mean[1], i_episode)
# 保存模型
model_save_path = os.path.join(
model_save_dir,
f"e{i_episode}_s{seq_index}_d{fitness_mean[0]:.4f}_b{fitness_mean[1]:.4f}",
)
dqn.save(model_save_path)
if i_episode % args.target_update == 0:
dqn.update_target_net()
print("Complete")
| 12,168 | 31.97832 | 96 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_fixed_rr_sigma.py
|
import os
import numpy as np
import pandas as pd
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class RR:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
self.action_index = 0
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action = self.action_index
for i in range(self.machine_num):
action = (action + 1) % self.machine_num
if action_mask[action] == True:
self.action_index = action
break
return action
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = RR(args.machine_num)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "rr"
args.tag = "user_sigam_test"
args.actual = False
user_sigam_list = np.linspace(0, 7.5 * 60 // 3, 10, dtype=np.int32)
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for user_sigma in user_sigam_list:
print(f"Test user sigma {user_sigma}")
save_dir = os.path.join(
root_dir,
f"user_sigma_{user_sigma}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.user_sigma = user_sigma
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_sigma, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_sigma",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"{ args.method}_user_sigma_exp.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_sigma",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"{ args.method}_user_sigma_exp2.csv"))
| 6,391 | 30.643564 | 93 |
py
|
MERL-LB
|
MERL-LB-main/mp_train_nn_ga.py
|
import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from config.ga import *
from typing import List
from envs.datacenter_env.env import DatacenterEnv
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def choose_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
class Individual:
def __init__(self, job_genes=None):
self.agent = Agent()
self.param_num = self.agent.job_actor.param_num
self.job_genes = job_genes
self.train_fitness = None
self.eval_fitness = None
self.std_fitness = np.inf
self.steps = 0
def init(self):
self.job_genes = np.random.uniform(-1, 1, self.param_num)
def update(self):
self.agent.update(self.job_genes.copy())
def run_individual_in_env(id, args, genes, seq_index):
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
individual = Individual(genes)
individual.update()
obs = env.reset()
done = False
action_list = []
reward_list = []
while not done:
action = individual.agent.choose_action(obs)
obs, reward, done, _ = env.step(action)
action_list.append(action)
reward_list.append(reward)
if args.ga_fitness_type == "std":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.sum(machines_occupancy_mean_std)
fitness = -std_fitness
elif args.ga_fitness_type == "runtime":
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.sum(machines_finish_time_record / 60) # 避免过大
fitness = -runtime_fitness
elif args.ga_fitness_type == "double":
# 计算标准差
# machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record)
fitness = np.array([-runtime_fitness, -std_fitness])
return id, fitness
# def eval_individual_in_env(args, genes, seq_index):
# args.seed = 5
# env = DatacenterEnv(args)
# env.seq_index = seq_index
# env.reset()
# individual = Individual(genes)
# individual.update()
# obs = env.reset()
# done = False
# action_list = []
# reward_list = []
# while not done:
# action = individual.agent.choose_action(obs)
# obs, reward, done, _ = env.step(action)
# action_list.append(action)
# reward_list.append(reward)
# if args.ga_fitness_type == "std":
# # 计算标准差
# machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
# machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
# machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
# std_fitness = np.sum(machines_occupancy_mean_std)
# fitness = -std_fitness
# elif args.ga_fitness_type == "runtime":
# # 计算运行时长
# machines_finish_time_record = np.array(env.machines_finish_time_record)
# runtime_fitness = np.sum(machines_finish_time_record / 60) # 避免过大
# fitness = -runtime_fitness
# elif args.ga_fitness_type == "double":
# # 计算标准差
# machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
# machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
# machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
# std_fitness = np.mean(machines_occupancy_mean_std)
# # 计算运行时长
# machines_finish_time_record = np.array(env.machines_finish_time_record)
# runtime_fitness = np.mean(machines_finish_time_record) # 避免过大
# fitness = np.array([-runtime_fitness, -std_fitness])
# print("eval", fitness)
# individual.eval_fitness = fitness
# return individual
class GA:
def __init__(self, args):
self.args = args
self.p_size = args.ga_parent_size
self.c_size = args.ga_children_size
self.job_genes_len = 0
self.mutate_rate = args.ga_mutate_rate
self.mutate_scale = args.ga_mutate_scale
self.population: List[Individual] = []
self.elitism_population: List[Individual] = []
self.avg_fitness = 0
self.seq_index = 0
self.seq_num = args.job_seq_num
self.generation = 0
def setup_seed(self):
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def generate_ancestor(self):
for _ in range(self.p_size):
individual = Individual()
individual.init()
self.population.append(individual)
self.job_genes_len = individual.param_num
def inherit_ancestor(self):
"""Load genes(nn model parameters) from file."""
for i in range(self.p_size):
pth = os.path.join("model", "all_individual", str(i) + "_nn.pth")
nn = torch.load(pth)
genes = []
with torch.no_grad():
for parameters in nn.parameters():
genes.extend(parameters.numpy().flatten())
self.population.append(Individual(np.array(genes)))
def crossover(self, c1_genes, c2_genes):
"""Single point crossover."""
p1_genes = c1_genes.copy()
p2_genes = c2_genes.copy()
point = np.random.randint(0, (self.job_genes_len))
c1_genes[: point + 1] = p2_genes[: point + 1]
c2_genes[: point + 1] = p1_genes[: point + 1]
def mutate(self, c_genes):
"""Gaussian mutation with scale"""
mutation_array = np.random.random(c_genes.shape) < self.mutate_rate
mutation = np.random.normal(size=c_genes.shape)
mutation[mutation_array] *= self.mutate_scale
c_genes[mutation_array] += mutation[mutation_array]
def elitism_selection(self):
# 归一化
fitness_list = []
for individual in self.population:
fitness_list.append(individual.train_fitness)
fitness_list = np.array(fitness_list)
norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
)
# 权重相加排序
norm_fitness_list = np.sum(norm_fitness_list * self.args.ga_fitness_wight, axis=-1)
population_sorted_index = np.argsort(norm_fitness_list) # 升序取后面几位
population_sorted_index = population_sorted_index[-self.p_size :]
elitism_population = [self.population[index] for index in population_sorted_index]
# 检查精英变化数量
elite_change_num = len(elitism_population)
for elite in elitism_population:
if elite in self.elitism_population:
elite_change_num -= 1
self.elitism_population = elitism_population
self.avg_fitness = np.mean(fitness_list[population_sorted_index], axis=0)
self.elitism_norm_fitness_list = norm_fitness_list[population_sorted_index]
return elite_change_num
def roulette_wheel_selection(self, size) -> List[Individual]:
selection = []
wheel = sum(self.elitism_norm_fitness_list)
for _ in range(size):
pick = np.random.uniform(0, wheel)
current = 0
for i, individual_fitness in enumerate(self.elitism_norm_fitness_list):
current += individual_fitness
if current > pick:
selection.append(self.elitism_population[i])
break
return selection
def generate_children(self):
children_population = []
while len(children_population) < self.c_size:
p1, p2 = self.roulette_wheel_selection(2)
c1_genes, c2_genes = p1.job_genes.copy(), p2.job_genes.copy()
self.crossover(c1_genes, c2_genes)
self.mutate(c1_genes)
self.mutate(c2_genes)
c1 = Individual(c1_genes)
c2 = Individual(c2_genes)
children_population.extend([c1, c2])
self.children_population = children_population
def save_population(self, population: list[Individual], label=""):
save_dir = os.path.join(
self.args.save_path,
self.args.method,
self.args.tag,
label,
f"g{self.generation}_{self.seq_index}",
)
os.makedirs(save_dir, exist_ok=True)
mean_fitness_list = []
for id, individual in enumerate(population):
mean_fitness = np.array(individual.train_fitness)
mean_fitness_list.append([self.generation, id, *mean_fitness.tolist()])
model_save_path = os.path.join(
save_dir, "{}_{:.5f}_{:.5f}.pth".format(id, *mean_fitness.tolist())
)
individual.update()
torch.save(individual.agent.job_actor.state_dict(), model_save_path)
mean_fitness_list = np.array(mean_fitness_list)
np.save(os.path.join(save_dir, "mean_fitness_record.npy"), mean_fitness_list)
return mean_fitness_list
def evolve(self):
# # 普通循环测试
# population = []
# for individual in self.population:
# individual = run_individual_in_env(
# self.args,
# individual.job_genes,
# self.seq_index,
# )
# population.append(individual)
# 多进程
population_num = self.args.ga_parent_size + self.args.ga_children_size
pool_num = min(cpu_count(), population_num)
print(f"use {pool_num} cup core")
pool = Pool(pool_num)
mutil_process = []
for id, individual in enumerate(self.population):
# 在坏境中运行个体获得个体适应度
if self.args.job_seq_num == 1 and individual.train_fitness is not None:
continue
one_process = pool.apply_async(
run_individual_in_env,
args=(
id,
self.args,
individual.job_genes,
self.seq_index,
),
)
mutil_process.append(one_process)
pool.close()
pool.join()
# 收集进程结果
for one_process in mutil_process:
id, fitness = one_process.get()
self.population[id].train_fitness = fitness
# 保存所有结果
self.save_population(self.population, "all")
# 精英选择
elite_change_num = self.elitism_selection()
# 保存精英
elite_fitness_list = self.save_population(self.elitism_population, "elite")
# 子代生成
self.generate_children()
new_population = []
new_population.extend(self.elitism_population)
new_population.extend(self.children_population)
self.population = new_population
self.seq_index = (self.seq_index + 1) % self.seq_num
self.generation += 1
return elite_change_num, elite_fitness_list
# def eval(self):
# # 多进程
# population_mp = []
# population_num = self.args.ga_parent_size + self.args.ga_children_size
# pool = Pool(min(cpu_count(), population_num))
# for individual in self.population:
# # 在坏境中运行个体获得个体适应度
# finish_individual = pool.apply_async(
# eval_individual_in_env,
# args=(
# self.args,
# individual.job_genes,
# self.seq_index,
# ),
# )
# population_mp.append(finish_individual)
# pool.close()
# pool.join()
if __name__ == "__main__":
args = parse_args()
args.method = "wsga"
args.job_seq_num = 1
args.tag = "run06"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
ga = GA(args)
ga.setup_seed()
if args.ga_choice == "generate":
ga.generate_ancestor()
else:
ga.inherit_ancestor()
fitness_list = []
mean_best_fitness = [-np.inf] * args.ga_fitness_num
while True:
print("=" * 100)
print(f"evolve generation {ga.generation}")
elite_change_num, elite_fitness_list = ga.evolve()
# log to tensorbord
writer.add_scalar("Elite change num", elite_change_num, ga.generation)
elite_fitness_list = np.array(elite_fitness_list)
elite_fitness_list = -elite_fitness_list[:, -2:]
# elite_fitness_list = -elite_fitness_list[:, -2:] * [[1, args.res_capacity**2]]
y = elite_fitness_list[:, 0]
x = elite_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, ga.generation)
plt.close()
max_elite_fitness = np.max(elite_fitness_list, axis=0)
min_elite_fitness = np.min(elite_fitness_list, axis=0)
writer.add_scalar("Balance fitness max", max_elite_fitness[1], ga.generation)
writer.add_scalar("Duration fitness max", max_elite_fitness[0], ga.generation)
writer.add_scalar("Balance fitness min", min_elite_fitness[1], ga.generation)
writer.add_scalar("Duration fitness min", min_elite_fitness[0], ga.generation)
| 18,403 | 34.460501 | 96 |
py
|
MERL-LB
|
MERL-LB-main/mp_train_nn_deepjs_no_mask.py
|
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.distributions import Categorical
from torch.utils.data import Dataset, DataLoader
from config.deepjs import *
from envs.datacenter_env.env import DatacenterEnv
from multiprocessing import Pool, cpu_count
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def predict(self, input, action_mask=None, absolute=True):
predict = self(input)
# if action_mask is not None:
# predict[action_mask == False] += -1e8
if absolute:
action = torch.argmax(predict, dim=1).cpu().item()
else:
action_probs = torch.softmax(predict, dim=-1)
action_probs = action_probs.detach().cpu().numpy()
action_probs = action_probs[0]
action_list = list(range(len(action_probs)))
action = np.random.choice(action_list, p=action_probs)
# action_dist = Categorical(action_probs)
# action = action_dist.sample().cpu().item()
return action
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def choose_action(self, obs, absolute=True):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask, absolute)
# action = self.job_actor.predict(job_input)
return action
class JobShopDataset(Dataset):
def __init__(self, obs_data, action_data, advantage_data) -> None:
self.obs_data = [i for item in obs_data for i in item]
self.action_data = [i for item in action_data for i in item]
self.advantage_data = [i for item in advantage_data for i in item]
def __getitem__(self, index):
obs = self.obs_data[index]
action = self.action_data[index]
advantage = self.advantage_data[index]
state, action_mask = self.obs_format(obs)
return state, action_mask, action, advantage
def obs_format(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
state = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
return state, action_mask
def __len__(self):
return len(self.action_data)
class InputDrive:
def __init__(self, args) -> None:
self.args = args
self.seq_index = 0
self.seq_num = args.job_seq_num
self.agent = Agent()
self.prob = 0.8
self.prob_step = 2 / self.args.epoch
def set_seed(self, seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def get_one_experience(self, args, seed, model_state_dict, seq_index, prob=0):
# 初始化环境
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
# 初始化agent
agent = Agent()
agent.load_state_dict(model_state_dict)
# 设置随机种子
self.set_seed(seed)
# 收集轨迹
obs = env.reset()
done = False
trajectory = []
agent.eval()
with torch.no_grad():
while not done:
action = agent.choose_action(obs, absolute=False)
next_obs, reward, done, _ = env.step(action)
trajectory.append([obs, action, reward, next_obs, done])
obs = next_obs
# 收集fitness
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record)
fitness = np.array([-runtime_fitness, -std_fitness])
return trajectory, fitness, env.curr_time
# 计算折扣累积reward
def get_discount_reward(self, trajectory, reward_index):
# 统计reward
reward = []
for item in trajectory:
reward.append(item[reward_index])
# reward 标准化
norm_reward_batch = (reward - np.mean(reward, axis=0)) / (np.std(reward, axis=0))
# 归一化
# norm_reward_batch = (reward - np.min(reward, axis=0)) / (
# np.max(reward, axis=0) - np.min(reward, axis=0)
# )
# 目标权重相同
mean_reward = np.sum(norm_reward_batch, axis=-1)
# mean_reward = norm_reward_batch[:, 0]
# mean_reward = np.sum(reward, axis=-1)
# 计算折扣累积reward
trajectory_len = len(trajectory)
discount_reward = np.zeros(trajectory_len)
for index in reversed(range(trajectory_len - 1)):
discount_reward[index] = mean_reward[index] + self.args.gamma * mean_reward[index + 1]
return discount_reward
# 收集经验
def get_experience(self, seq_index):
# 多线程收集经验
pool = Pool(min(cpu_count(), self.args.experience_num))
all_record = []
for seed in range(self.args.experience_num):
record = pool.apply_async(
self.get_one_experience,
args=(
self.args,
seed,
self.agent.state_dict(),
seq_index,
self.prob,
),
)
all_record.append(record)
pool.close()
pool.join()
all_trajectory = []
all_fitness = []
end_time_list = []
for record in all_record:
trajectory, fitness, end_time = record.get()
all_trajectory.append(trajectory)
all_fitness.append(fitness)
end_time_list.append(end_time)
return all_trajectory, all_fitness, end_time_list
# 计算baseline
def get_advantage(self, all_trajectory):
# 计算累积reward
all_reward = []
all_reward_flat = []
max_reward_len = 0
for trajectory in all_trajectory:
max_reward_len = max(max_reward_len, len(trajectory))
reward = []
for item in trajectory:
reward.append(item[2])
all_reward_flat.append(item[2])
all_reward.append(reward)
all_reward_flat = np.array(all_reward_flat)
reward_mean = np.mean(all_reward_flat, axis=0)
reward_std = np.std(all_reward_flat, axis=0)
all_discount_reward = []
for reward in all_reward:
norm_reward = (reward - reward_mean) / (reward_std + 1e-7)
mean_reward = np.mean(norm_reward, axis=-1)
# mean_reward = np.sum(norm_reward * [[0.2, 0.8]], axis=-1)
# mean_reward = np.sum(norm_reward * [[0.8, 0.2]], axis=-1)
# mean_reward = np.sum(norm_reward * [[1, 0]], axis=-1)
# mean_reward = np.sum(norm_reward * [[0, 1]], axis=-1)
# mean_reward = np.sum(np.array(reward) * np.array([[1 / 600, 1 / 50]]), axis=-1)
reward_len = len(reward)
discount_reward = np.zeros(reward_len)
for index in reversed(range(reward_len - 1)):
discount_reward[index] = (
mean_reward[index] + self.args.gamma * mean_reward[index + 1]
)
all_discount_reward.append(discount_reward)
# padding
all_padded_discount_reward = [
np.concatenate([discount_reward, np.zeros(max_reward_len - len(discount_reward))])
for discount_reward in all_discount_reward
]
# 计算baseline
baseline = np.mean(all_padded_discount_reward, axis=0)
# 计算advantage
all_advantage = [
discount_reward - baseline[: len(discount_reward)]
for discount_reward in all_discount_reward
]
return all_advantage
def train(self):
optimizer = optim.AdamW(self.agent.parameters(), lr=self.args.lr)
best_fitness = [np.array([np.inf, np.inf])] * self.args.job_seq_num
i_episode = 0
EP = []
fitness_list = []
for epoch in range(self.args.epoch):
for seq_index in range(self.args.job_seq_num):
# 收集经验
all_trajectory, all_fitness, end_time_list = self.get_experience(seq_index)
all_obs = []
all_action = []
for trajectory in all_trajectory:
_obs = []
_action = []
for item in trajectory:
_obs.append(item[0])
_action.append(item[1])
all_obs.append(_obs)
all_action.append(_action)
# 结果汇总
mean_fitness = -np.mean(all_fitness, axis=0)
print(f"train epoch {epoch} seq_index {seq_index} i_episode {i_episode}")
print("mean_fitness: ", mean_fitness)
# writer.add_scalar(
# "current/ws_score",
# mean_fitness[0] / 600 + mean_fitness[1] / 50,
# i_episode,
# )
fitness_list.append(mean_fitness)
writer.add_scalar("Train/End time max", max(end_time_list), i_episode)
writer.add_scalar("Train/End time min", min(end_time_list), i_episode)
writer.add_scalar("Train/End time mean", np.mean(end_time_list), i_episode)
# 记录fitness
writer.add_scalar("current/duration_score", mean_fitness[0], i_episode)
writer.add_scalar("current/balance_score", mean_fitness[1], i_episode)
# 记录 mean fitness
fitness_mean = np.mean(fitness_list[-args.job_seq_num :], axis=0)
writer.add_scalar("mean/duration_score", fitness_mean[0], i_episode)
writer.add_scalar("mean/balance_score", fitness_mean[1], i_episode)
# 记录最优非支配曲面
d_n = 0
remove_list = []
for item in EP:
_, item_fitness = item
if np.all(fitness_mean < item_fitness):
remove_list.append(item)
if np.all(fitness_mean > item_fitness):
d_n += 1
if d_n != 0:
break
if d_n == 0:
for item in remove_list:
EP.remove(item)
EP.append((i_episode, fitness_mean))
# 打印曲面
EP_fitness = np.array([i[1] for i in EP])
x = EP_fitness[:, 1]
y = EP_fitness[:, 0]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, i_episode)
plt.close()
# 模型保存
model_name = (
f"e{i_episode}_s{seq_index}_d{mean_fitness[0]:.4f}_b{mean_fitness[1]:.4f}"
)
model_save_path = os.path.join(model_save_dir, model_name)
torch.save(self.agent.job_actor.state_dict(), model_save_path)
# 计算advantage
all_advantage = self.get_advantage(all_trajectory)
# 训练模型
# 构建dataloader
dataset = JobShopDataset(
obs_data=all_obs,
action_data=all_action,
advantage_data=all_advantage,
)
dataloader = DataLoader(dataset, batch_size=512, shuffle=False, num_workers=10)
# 清空梯度
optimizer.zero_grad()
self.agent.train()
# 梯度累加
for batch in dataloader:
state, action_mask, action, advantage = batch
action_predict = self.agent.job_actor(state)
# 直接赋值会导致无法梯度回传
# TODO 如何把mask用上?
# action_predict[action_mask == False] += -1e9
action_predict = torch.squeeze(action_predict, dim=1)
action_probs = torch.softmax(action_predict, dim=-1)
action_dist = Categorical(action_probs)
action_logprobs = action_dist.log_prob(action)
"""
优化目标是loss越小越好
advantage大于0说明该动作好要增大该动作的概率 即减小 -action_logprobs * advantage
"""
loss = -action_logprobs * advantage
# 一次梯度回传
loss.mean().backward()
# 梯度更新
optimizer.step()
i_episode += 1
# 更新随机权重
self.prob = max(self.prob - self.prob_step, self.prob)
if __name__ == "__main__":
args = parse_args()
args.method = "ns_deepjs"
args.tag = "run04_no_mask_no_absolute"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
model_save_dir = os.path.join(save_dir, "models")
os.makedirs(model_save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
inputdrive = InputDrive(args)
inputdrive.train()
| 18,402 | 35.586481 | 98 |
py
|
MERL-LB
|
MERL-LB-main/utils.py
|
import matplotlib.pyplot as plt
import numpy as np
def plot_mutil_lines_chart(
input,
legend_list=None,
max_line=np.inf,
save_name="test.png",
xlabel="",
ylabel="",
title=""
):
data = np.array(input)
data = data.T
plt.figure(figsize=(12, 6))
for y in data:
if max_line > 0:
x = range(y.size)
plt.plot(x, y)
max_line -= 1
if legend_list is not None:
plt.legend(legend_list)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.savefig(save_name)
plt.close()
# plt.show()
def plot_sum_lines_chart(data):
data = np.array(data)
y = np.sum(data, axis=-1)
plt.figure(figsize=(12, 6))
x = range(y.size)
plt.plot(x, y)
plt.show()
def plot_mean_lines_chart(data):
data = np.array(data)
y = np.mean(data, axis=-1)
plt.figure(figsize=(12, 6))
x = range(y.size)
plt.plot(x, y)
plt.show()
| 960 | 19.020833 | 32 |
py
|
MERL-LB
|
MERL-LB-main/mp_test.py
|
import os
import random
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class RR:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
self.action_index = 0
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action = self.action_index
for i in range(self.machine_num):
action = (action + 1) % self.machine_num
if action_mask[action] == True:
self.action_index = action
break
return action
class RD:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action_prob = np.random.random(self.machine_num)
action_prob = (action_prob + action_mask) / 2
action = np.argmax(action_prob)
return action
class LG:
def select_action(self, obs):
_, job_run_time, _, machines_run_time, _, action_mask = obs
gap = np.abs(machines_run_time - job_run_time)
gap[action_mask == False] = 1e9
action = np.argmin(gap)
return action
class LC:
def select_action(self, obs):
_, _, _, _, jobs_num, action_mask = obs
jobs_num[action_mask == False] = 1e9
action = np.argmin(jobs_num)
return action
class Actor(nn.Module):
def __init__(self, absolute=True, dim_list=[126, 32, 1]):
super().__init__()
self.absolute = absolute
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
if not self.absolute:
action_prob = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_prob)
action = action_dist.sample()
self.action_logprobs = action_dist.log_prob(action).detach()
action = action.cpu().item()
else:
action = torch.argmax(predict, dim=1).cpu().item()
return action
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self, absolute=True):
super(Agent, self).__init__()
self.job_actor = Actor(absolute=absolute)
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def get_agent(args):
method = args.method
if method == "rr":
agent = RR(args.machine_num)
elif method == "rd":
agent = RD(args.machine_num)
elif method == "lg":
agent = LG()
elif method == "lc":
agent = LC()
elif method in ["nsga", "wsga", "deepjs", "igd"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
elif method in ["ppo"]:
agent = Agent()
# agent = Agent(absolute=False)
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
return agent
def set_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = get_agent(args)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "ppo"
# args.checkpoint_path = "output/train/deepjs/run02/models/e10001_s1_d497.6165_b14.0890"
# args.checkpoint_path = "output/train/deepjs/run03/models/e3700_s0_d274.3077_b199.8079"
# args.checkpoint_path = "output/train/deepjs/run01/models/e19000_s0_d386.8642_b19.4361"
# args.checkpoint_path = "output/train/deepjs/run01/models/e19850_s0_d275.4718_b194.5685"
# args.checkpoint_path = "output/train/wsga/run05/elite/g13443_3/24_-326.97737_-13.71405.pth"
# args.checkpoint_path = "/root/workspace/project/version3/output/train/ppo/run_0/model/e10001_s1_d407.9307_b16.3444_actor.pth"
# args.checkpoint_path = "output/train/wsga/run05/elite/g13443_3/20_-336.39251_-12.79905.pth"
# args.checkpoint_path = "output/train/ppo/run_0/model/e16679_s9_d376.1445_b18.8828_actor.pth"
# args.checkpoint_path = (
# "output/train/ns_deepjs/run02_no_mask/models/e13919_s9_d380.7892_b22.2165"
# )
# args.max_time = 30 * 60
# args.job_seq_num = 5
args.tag = "best_run01"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
model_save_path = os.path.join(save_dir, "models")
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
os.makedirs(model_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
set_seed()
# mutil process
mutil_process = []
# pool = Pool(cpu_count())
pool = Pool(10)
for i in range(args.job_seq_num):
one_process = pool.apply_async(test_one_path, args=(args, i, data_save_path, fig_save_path))
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(std_fitness[0], std_fitness[1])
)
result1 = [(*mean_fitness, *std_fitness)]
df = pd.DataFrame(
result1,
columns=[
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(save_dir, f"mean_std.csv"))
df2 = pd.DataFrame(
fitness_record,
columns=[
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(save_dir, f"all_data.csv"))
print("done")
| 12,458 | 32.312834 | 131 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_fixed_lg_load.py
|
import os
import numpy as np
import pandas as pd
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class LG:
def select_action(self, obs):
_, job_run_time, _, machines_run_time, _, action_mask = obs
gap = np.abs(machines_run_time - job_run_time)
gap[action_mask == False] = 1e9
action = np.argmin(gap)
return action
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = LG()
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "lg"
args.tag = "user_load_test"
args.actual = False
job_num_list = range(2, 10)
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for max_job_num in job_num_list:
user_load_rate = (
max_job_num
/ 2
* args.max_res_req
/ 2
* args.max_job_len
/ 2
/ args.res_capacity
/ args.machine_num
)
print(f"Test user_load_rate {user_load_rate:.3f}")
save_dir = os.path.join(
root_dir,
f"user_load_rate_{user_load_rate:.3f}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.max_job_num = max_job_num
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_load_rate, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_load_rate",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"{ args.method}_user_load_exp.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_load_rate",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"{ args.method}_user_load_exp2.csv"))
| 6,402 | 30.234146 | 93 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_fixed_lg_sigma.py
|
import os
import numpy as np
import pandas as pd
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class LG:
def select_action(self, obs):
_, job_run_time, _, machines_run_time, _, action_mask = obs
gap = np.abs(machines_run_time - job_run_time)
gap[action_mask == False] = 1e9
action = np.argmin(gap)
return action
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = LG()
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "lg"
args.tag = "user_sigam_test"
args.actual = False
user_sigam_list = np.linspace(0, 7.5 * 60 // 3, 10, dtype=np.int32)
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for user_sigma in user_sigam_list:
print(f"Test user sigma {user_sigma}")
save_dir = os.path.join(
root_dir,
f"user_sigma_{user_sigma}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.user_sigma = user_sigma
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_sigma, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_sigma",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"{ args.method}_user_sigma_exp.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_sigma",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"{ args.method}_user_sigma_exp2.csv"))
| 6,175 | 30.671795 | 93 |
py
|
MERL-LB
|
MERL-LB-main/sp_train_nn_ppo.py
|
import os
import random
import numpy as np
import torch
from collections import namedtuple, deque
from itertools import count
from config.ppo import *
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from torch.utils.tensorboard import SummaryWriter
from envs.datacenter_env.env import DatacenterEnv
from utils import *
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Transition = namedtuple(
"Transition",
(
"state",
"action_mask",
"action",
"action_logprobs",
"reward",
"done",
),
)
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.reset()
def push(self, *args):
"""Save a transition"""
self.memory.append(Transition(*args))
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def reset(self):
self.memory = deque([], maxlen=self.capacity)
def __len__(self):
return len(self.memory)
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
fc = []
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
class Critic(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
fc = []
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
x = torch.sum(x, dim=-1)
return x
class PPO:
def __init__(
self,
args,
) -> None:
self.args = args
self.learn_step_counter = 0
self.action_logprobs = None # 缓存
self._build_net()
def _build_net(self):
self.actor = Actor().to(device)
self.critic = Critic().to(device)
self.memory = ReplayMemory(5000)
self.optimizer = torch.optim.Adam(
[
{"params": self.actor.parameters(), "lr": args.ppo_actor_lr},
{"params": self.critic.parameters(), "lr": args.ppo_critic_lr},
]
)
self.critic_loss = nn.MSELoss()
def choose_action(self, obs, absolute=False):
state, action_mask = self.obs_format(obs)
predict = self.actor(state)
predict[action_mask == False] += -torch.inf
if not absolute:
action_prob = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_prob)
action = action_dist.sample()
self.action_logprobs = action_dist.log_prob(action).detach()
action = action.cpu().item()
else:
action = torch.argmax(predict, dim=1).cpu().item()
return action
def obs_format(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
state = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
return state.to(device), action_mask.to(device)
def remember(self, obs, action, reward, done):
state, action_mask = self.obs_format(obs)
action_logprobs = self.action_logprobs
action = torch.tensor(action, dtype=torch.int32)
self.memory.push(
state,
action_mask,
action,
action_logprobs,
reward,
done,
)
def learn(self):
if len(self.memory) < self.args.ppo_update_timestep:
return
transitions = self.memory.memory
batch = Transition(*zip(*transitions))
state_batch = torch.cat(batch.state, dim=0).to(device)
action_batch = torch.vstack(batch.action).to(device)
action_mask_batch = torch.cat(batch.action_mask, dim=0).to(device)
action_logprobs_batch = torch.vstack(batch.action_logprobs).to(device)
reward_batch = np.array(batch.reward)
done_batch = np.array(batch.done)
# reward 标准化
reward_batch = (reward_batch - np.mean(reward_batch, axis=0)) / (
np.std(reward_batch, axis=0) + 1e-7
)
# reward 缩放
# reward_batch = reward_batch * np.array([[0.001, 1]])
# # 归一化
# norm_reward_batch = (reward_batch - np.min(reward_batch, axis=0)) / (
# np.max(reward_batch, axis=0) - np.min(reward_batch, axis=0)
# )
# mean_reward_batch = np.mean(norm_reward_batch, axis=-1)
# 无归一化 或 标准化
# mean_reward_batch = np.sum(reward_batch, axis=-1)
# mean_reward_batch = reward_batch[:, 0]
# Monte Carlo estimate of returns
# cumulate_rewards = []
# discounted_reward = 0
# for reward, is_terminal in zip(
# reversed(mean_reward_batch), reversed(done_batch)
# ):
# if is_terminal:
# discounted_reward = 0
# discounted_reward = reward + (self.args.ppo_gamma * discounted_reward)
# cumulate_rewards.insert(0, discounted_reward)
# cumulate_rewards = torch.tensor(cumulate_rewards, dtype=torch.float32).to(
# device
# )
# 标准化
# cumulate_rewards = (cumulate_rewards - cumulate_rewards.mean()) / (
# cumulate_rewards.std() + 1e-7
# )
cumulate_rewards = []
discounted_reward = 0
for reward, is_terminal in zip(reversed(reward_batch), reversed(done_batch)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.args.ppo_gamma * discounted_reward)
cumulate_rewards.insert(0, discounted_reward)
cumulate_rewards = torch.tensor(cumulate_rewards, dtype=torch.float32).to(device)
# 标准化
cumulate_rewards = (cumulate_rewards - cumulate_rewards.mean(dim=0)) / (
cumulate_rewards.std(dim=0) + 1e-7
)
# 合并两个目标的reward
cumulate_rewards = cumulate_rewards * torch.tensor([[0.5, 0.5]]).to(device)
cumulate_rewards = torch.sum(cumulate_rewards, dim=-1)
# cumulate_rewards = cumulate_rewards[:, 0]
# Optimize policy for K epochs
for epoch in range(self.args.ppo_epochs):
new_action_predict = self.actor(state_batch)
new_action_predict[action_mask_batch == False] += -torch.inf
new_action_probs = torch.softmax(new_action_predict, dim=-1)
new_action_dist = Categorical(new_action_probs)
new_action_entropy = new_action_dist.entropy()
new_action_logprobs = new_action_dist.log_prob(action_batch.reshape(-1))
state_values = self.critic(state_batch)
advantages = cumulate_rewards - state_values.detach()
ratios = torch.exp(new_action_logprobs - action_logprobs_batch.reshape(-1))
surr1 = ratios * advantages
surr2 = (
torch.clamp(ratios, 1 - self.args.ppo_eps_clip, 1 + self.args.ppo_eps_clip)
* advantages
)
# loss = -advantages
loss = (
-torch.min(surr1, surr2)
+ 0.5 * self.critic_loss(state_values, cumulate_rewards)
- 0.01 * new_action_entropy
)
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
self.learn_step_counter += 1
# TODO Copy new weights into old policy
# self.policy_old.load_state_dict(self.policy.state_dict()
# 清空缓冲区
self.memory.reset()
def save(self, save_path):
torch.save(self.actor.state_dict(), save_path + "_actor.pth")
torch.save(self.critic.state_dict(), save_path + "_critic.pth")
if __name__ == "__main__":
args = parse_args()
args.method = "ppo"
args.tag = "run_0"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
model_save_dir = os.path.join(save_dir, "model")
os.makedirs(model_save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
env = DatacenterEnv(args)
ppo = PPO(args)
score_list = []
fitness_list = []
EP = []
for i_episode in range(args.num_episodes):
print("i_episode: ", i_episode)
# Initialize the environment and state
seq_index = i_episode % args.job_seq_num
env.seq_index = seq_index
obs = env.reset()
score = np.zeros(2)
for t in count():
# Select and perform an action
action = ppo.choose_action(obs)
next_obs, reward, done, info = env.step(action)
score += reward
if done:
print("done")
# Store the transition in memory
ppo.remember(obs, action, reward, done)
# Move to the next state
obs = next_obs
# Perform one step of the optimization (on the policy network)
ppo.learn()
if done:
ppo.memory.reset() # 是否需要清除缓冲呢?
break
score_list.append(score)
# 收集fitness
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record)
fitness = np.array([runtime_fitness, std_fitness])
# 记录fitness
writer.add_scalar("current/duration_score", fitness[0], i_episode)
writer.add_scalar("current/balance_score", fitness[1], i_episode)
print("train fitness", fitness)
fitness_list.append(fitness)
fitness_mean = np.mean(fitness_list[-args.job_seq_num :], axis=0)
print("train mean fitness", fitness_mean)
# 记录最优非支配曲面
d_n = 0
remove_list = []
for item in EP:
_, item_fitness = item
if np.all(fitness_mean < item_fitness):
remove_list.append(item)
if np.all(fitness_mean > item_fitness):
d_n += 1
if d_n != 0:
break
if d_n == 0:
for item in remove_list:
EP.remove(item)
EP.append((i_episode, fitness_mean))
# 打印曲面
EP_fitness = np.array([i[1] for i in EP])
x = EP_fitness[:, 1]
y = EP_fitness[:, 0]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, i_episode)
plt.close()
# 记录fitness
writer.add_scalar("mean/duration_score", fitness_mean[0], i_episode)
writer.add_scalar("mean/balance_score", fitness_mean[1], i_episode)
# 保存模型
model_save_path = os.path.join(
model_save_dir,
f"e{i_episode}_s{seq_index}_d{fitness_mean[0]:.4f}_b{fitness_mean[1]:.4f}",
)
ppo.save(model_save_path)
print("Complete")
| 14,034 | 32.023529 | 96 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_nn_sigma.py
|
import os
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# checkpoint_path = "output/train/nsga/run02/elite/g3382_0/15_-349.95341_-19.68042.pth"
# checkpoint_path = "output/one_job/ga/reward_sum/run02_m15/final_population/g_9796_f_-310.773_-0.026/24_f_-308.432_-0.024.pth"
agent = Agent()
# state_dict = torch.load("24_f_-342.436_-0.029.pth")
# agent.load_state_dict(state_dict)
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"job_num_{seq_index}.npy"),
machines_job_num_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"end_job_num_{seq_index}.npy"),
machines_job_num_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "nsga"
args.tag = "nsga_run05_g20000_12"
args.actual = True
# args.checkpoint_path = "output/train/nsga/run03/elite/g1_1/20_-501.30449_-25.49838.pth"
# args.checkpoint_path = "output/train/nsga/run05/elite/g24214_0/10_-351.04309_-20.52227.pth"
# args.checkpoint_path = "output/train/wsga/run05/elite/g13443_3/0_-335.70133_-14.49433.pth"
args.checkpoint_path = "output/train/nsga/run05/elite/g20000_0/0_-455.58486_-12.92719.pth"
# user_sigam_list = np.linspace(0, 7.5 * 60 // 3, 10, dtype=np.int32)
user_sigam_list = [0]
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for user_sigma in user_sigam_list:
print(f"Test user sigma {user_sigma}")
save_dir = os.path.join(
root_dir,
f"user_sigma_{user_sigma}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.user_sigma = user_sigma
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_sigma, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_sigma",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"{ args.method}_user_sigma_exp.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_sigma",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"{ args.method}_user_sigma_exp2.csv"))
| 10,899 | 33.169279 | 131 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_fixed_lc_sigma.py
|
import os
import numpy as np
import pandas as pd
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class LC:
def select_action(self, obs):
_, _, _, _, jobs_num, action_mask = obs
jobs_num[action_mask == False] = 1e9
action = np.argmin(jobs_num)
return action
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = LC()
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "lc"
args.tag = "user_sigam_test"
args.actual = False
user_sigam_list = np.linspace(0, 7.5 * 60 // 3, 10, dtype=np.int32)
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for user_sigma in user_sigam_list:
print(f"Test user sigma {user_sigma}")
save_dir = os.path.join(
root_dir,
f"user_sigma_{user_sigma}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.user_sigma = user_sigma
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_sigma, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_sigma",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"{ args.method}_user_sigma_exp.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_sigma",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"{ args.method}_user_sigma_exp2.csv"))
| 6,110 | 30.5 | 93 |
py
|
MERL-LB
|
MERL-LB-main/mp_train_nn_deepjs_no_mask_ppo.py
|
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.distributions import Categorical
from torch.utils.data import Dataset, DataLoader
from config.deepjs import *
from envs.datacenter_env.env import DatacenterEnv
from multiprocessing import Pool, cpu_count
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def predict(self, input, action_mask=None, absolute=True, return_log_prob=False):
predict = self(input)
# if action_mask is not None:
# predict[action_mask == False] += -1e8
if absolute:
action = torch.argmax(predict, dim=1).cpu().item()
else:
action_probs = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_probs)
action = action_dist.sample()
action_log_prob = action_dist.log_prob(action)
action = action.cpu().item()
action_log_prob = action_log_prob.cpu().item()
if return_log_prob:
return action, action_log_prob
else:
return action
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def choose_action(self, obs, absolute=True, return_log_prob=False):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action, log_prob = self.job_actor.predict(job_input, action_mask, absolute, return_log_prob)
# action = self.job_actor.predict(job_input)
if return_log_prob:
return action, log_prob
else:
return action
class JobShopDataset(Dataset):
def __init__(self, obs_data, action_data, action_prob_data, advantage_data) -> None:
self.obs_data = [i for item in obs_data for i in item]
self.action_data = [i for item in action_data for i in item]
self.action_prob_data = [i for item in action_prob_data for i in item]
self.advantage_data = [i for item in advantage_data for i in item]
def __getitem__(self, index):
obs = self.obs_data[index]
action = self.action_data[index]
action_prob = self.action_prob_data[index]
advantage = self.advantage_data[index]
state, action_mask = self.obs_format(obs)
return state, action_mask, action_prob, action, advantage
def obs_format(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
state = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
return state, action_mask
def __len__(self):
return len(self.action_data)
class InputDrive:
def __init__(self, args) -> None:
self.args = args
self.seq_index = 0
self.seq_num = args.job_seq_num
self.agent = Agent()
self.prob = 0.8
self.prob_step = 2 / self.args.epoch
def set_seed(self, seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def get_one_experience(self, args, seed, model_state_dict, seq_index, prob=0):
# 初始化环境
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
# 初始化agent
agent = Agent()
agent.load_state_dict(model_state_dict)
# 设置随机种子
self.set_seed(seed)
# 收集轨迹
obs = env.reset()
done = False
trajectory = []
agent.eval()
with torch.no_grad():
while not done:
action, log_prob = agent.choose_action(obs, absolute=False, return_log_prob=True)
next_obs, reward, done, _ = env.step(action)
trajectory.append([obs, action, reward, next_obs, done, log_prob])
obs = next_obs
# 收集fitness
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record)
fitness = np.array([-runtime_fitness, -std_fitness])
return trajectory, fitness, env.curr_time
# 计算折扣累积reward
def get_discount_reward(self, trajectory, reward_index):
# 统计reward
reward = []
for item in trajectory:
reward.append(item[reward_index])
# reward 标准化
norm_reward_batch = (reward - np.mean(reward, axis=0)) / (np.std(reward, axis=0))
# 归一化
# norm_reward_batch = (reward - np.min(reward, axis=0)) / (
# np.max(reward, axis=0) - np.min(reward, axis=0)
# )
# 目标权重相同
mean_reward = np.sum(norm_reward_batch, axis=-1)
# mean_reward = norm_reward_batch[:, 0]
# mean_reward = np.sum(reward, axis=-1)
# 计算折扣累积reward
trajectory_len = len(trajectory)
discount_reward = np.zeros(trajectory_len)
for index in reversed(range(trajectory_len - 1)):
discount_reward[index] = mean_reward[index] + self.args.gamma * mean_reward[index + 1]
return discount_reward
# 收集经验
def get_experience(self, seq_index):
# 多线程收集经验
pool = Pool(min(cpu_count(), self.args.experience_num))
all_record = []
for seed in range(self.args.experience_num):
record = pool.apply_async(
self.get_one_experience,
args=(
self.args,
seed,
self.agent.state_dict(),
seq_index,
self.prob,
),
)
all_record.append(record)
pool.close()
pool.join()
all_trajectory = []
all_fitness = []
end_time_list = []
for record in all_record:
trajectory, fitness, end_time = record.get()
all_trajectory.append(trajectory)
all_fitness.append(fitness)
end_time_list.append(end_time)
return all_trajectory, all_fitness, end_time_list
# 计算baseline
def get_advantage(self, all_trajectory):
# 计算累积reward
all_reward = []
all_reward_flat = []
max_reward_len = 0
for trajectory in all_trajectory:
max_reward_len = max(max_reward_len, len(trajectory))
reward = []
for item in trajectory:
reward.append(item[2])
all_reward_flat.append(item[2])
all_reward.append(reward)
all_reward_flat = np.array(all_reward_flat)
reward_mean = np.mean(all_reward_flat, axis=0)
reward_std = np.std(all_reward_flat, axis=0)
all_discount_reward = []
for reward in all_reward:
norm_reward = (reward - reward_mean) / (reward_std + 1e-7)
mean_reward = np.mean(norm_reward, axis=-1)
# mean_reward = np.sum(norm_reward * [[0.2, 0.8]], axis=-1)
# mean_reward = np.sum(norm_reward * [[0.8, 0.2]], axis=-1)
# mean_reward = np.sum(norm_reward * [[1, 0]], axis=-1)
# mean_reward = np.sum(norm_reward * [[0, 1]], axis=-1)
# mean_reward = np.sum(np.array(reward) * np.array([[1 / 600, 1 / 50]]), axis=-1)
reward_len = len(reward)
discount_reward = np.zeros(reward_len)
for index in reversed(range(reward_len - 1)):
discount_reward[index] = (
mean_reward[index] + self.args.gamma * mean_reward[index + 1]
)
all_discount_reward.append(discount_reward)
# padding
all_padded_discount_reward = [
np.concatenate([discount_reward, np.zeros(max_reward_len - len(discount_reward))])
for discount_reward in all_discount_reward
]
# 计算baseline
baseline = np.mean(all_padded_discount_reward, axis=0)
# 计算advantage
all_advantage = [
discount_reward - baseline[: len(discount_reward)]
for discount_reward in all_discount_reward
]
return all_advantage
def train(self):
optimizer = optim.AdamW(self.agent.parameters(), lr=self.args.lr)
best_fitness = [np.array([np.inf, np.inf])] * self.args.job_seq_num
i_episode = 0
EP = []
fitness_list = []
for epoch in range(self.args.epoch):
for seq_index in range(self.args.job_seq_num):
# 收集经验
all_trajectory, all_fitness, end_time_list = self.get_experience(seq_index)
all_obs = []
all_action = []
all_action_prob = []
for trajectory in all_trajectory:
_obs = []
_action = []
_action_prob = []
for item in trajectory:
_obs.append(item[0])
_action.append(item[1])
_action_prob.append(item[-1])
all_obs.append(_obs)
all_action.append(_action)
all_action_prob.append(_action_prob)
# 结果汇总
mean_fitness = -np.mean(all_fitness, axis=0)
print(f"train epoch {epoch} seq_index {seq_index} i_episode {i_episode}")
print("mean_fitness: ", mean_fitness)
# writer.add_scalar(
# "current/ws_score",
# mean_fitness[0] / 600 + mean_fitness[1] / 50,
# i_episode,
# )
fitness_list.append(mean_fitness)
writer.add_scalar("Train/End time max", max(end_time_list), i_episode)
writer.add_scalar("Train/End time min", min(end_time_list), i_episode)
writer.add_scalar("Train/End time mean", np.mean(end_time_list), i_episode)
# 记录fitness
writer.add_scalar("current/duration_score", mean_fitness[0], i_episode)
writer.add_scalar("current/balance_score", mean_fitness[1], i_episode)
# 记录 mean fitness
fitness_mean = np.mean(fitness_list[-args.job_seq_num :], axis=0)
writer.add_scalar("mean/duration_score", fitness_mean[0], i_episode)
writer.add_scalar("mean/balance_score", fitness_mean[1], i_episode)
# 记录最优非支配曲面
d_n = 0
remove_list = []
for item in EP:
_, item_fitness = item
if np.all(fitness_mean < item_fitness):
remove_list.append(item)
if np.all(fitness_mean > item_fitness):
d_n += 1
if d_n != 0:
break
if d_n == 0:
for item in remove_list:
EP.remove(item)
EP.append((i_episode, fitness_mean))
# 打印曲面
EP_fitness = np.array([i[1] for i in EP])
x = EP_fitness[:, 1]
y = EP_fitness[:, 0]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, i_episode)
plt.close()
# 模型保存
model_name = (
f"e{i_episode}_s{seq_index}_d{mean_fitness[0]:.4f}_b{mean_fitness[1]:.4f}"
)
model_save_path = os.path.join(model_save_dir, model_name)
torch.save(self.agent.job_actor.state_dict(), model_save_path)
# 计算advantage
all_advantage = self.get_advantage(all_trajectory)
# 训练模型
# 构建dataloader
dataset = JobShopDataset(
obs_data=all_obs,
action_data=all_action,
action_prob_data=all_action_prob,
advantage_data=all_advantage,
)
dataloader = DataLoader(dataset, batch_size=512, shuffle=False, num_workers=10)
# 清空梯度
optimizer.zero_grad()
self.agent.train()
# 梯度累加
for batch in dataloader:
state, action_mask, old_action_probs, action, advantage = batch
# 新Actor
action_predict = self.agent.job_actor(state)
# 直接赋值会导致无法梯度回传
# TODO 如何把mask用上?
# action_predict[action_mask == False] += -1e9
action_predict = torch.squeeze(action_predict, dim=1)
action_probs = torch.softmax(action_predict, dim=-1)
action_dist = Categorical(action_probs)
action_entropy = action_dist.entropy()
action_logprobs = action_dist.log_prob(action)
"""
优化目标是loss越小越好
advantage大于0说明该动作好要增大该动作的概率 即减小 -action_logprobs * advantage
"""
# 旧advantage
# loss = -action_logprobs * advantage
ratios = torch.exp(action_logprobs - old_action_probs)
surr1 = ratios * advantage
surr2 = torch.clamp(ratios, 1 - 0.2, 1 + 0.2) * advantage
loss = -torch.min(surr1, surr2) - 0.01 * action_entropy
# 一次梯度回传
loss.mean().backward()
# 梯度更新
optimizer.step()
i_episode += 1
# 更新随机权重
self.prob = max(self.prob - self.prob_step, self.prob)
if __name__ == "__main__":
args = parse_args()
args.method = "ns_deepjs"
args.tag = "run03_no_mask_ppo"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
model_save_dir = os.path.join(save_dir, "models")
os.makedirs(model_save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
inputdrive = InputDrive(args)
inputdrive.train()
| 19,410 | 36.185824 | 100 |
py
|
MERL-LB
|
MERL-LB-main/mp_train_nn_moead.py
|
import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from config.moead import *
from typing import List
from envs.datacenter_env.env import DatacenterEnv
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def choose_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
return action
def show(self):
self.job_actor.show()
class Individual:
def __init__(self, job_genes=None):
self.agent = Agent()
self.param_num = self.agent.job_actor.param_num
self.job_genes = job_genes
self.train_fitness = None
self.eval_fitness = None
self.std_fitness = np.inf
self.steps = 0
def init(self):
self.job_genes = np.random.uniform(-1, 1, self.param_num)
def update(self):
self.agent.update(self.job_genes.copy())
def run_individual_in_env(id1, id2, args, genes, seq_index):
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
individual = Individual(genes)
individual.update()
obs = env.reset()
done = False
action_list = []
reward_list = []
while not done:
action = individual.agent.choose_action(obs)
obs, reward, done, _ = env.step(action)
action_list.append(action)
reward_list.append(reward)
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record) # 避免过大
fitness = np.array([runtime_fitness, std_fitness])
return id1, id2, fitness
class MOEAD:
def __init__(self, args) -> None:
self.args = args
self.EP: List[Individual] = [] # 最优曲面
self.EP_N_ID = [] # 最优曲面
self.N = args.moead_n # 权重划分数量
self.M = args.moead_m # 目标个数
self.T = args.moead_t # 邻居个数
self.B = [] # 邻居下标 根据权重相似度计算
self.Z = [0, 0] # 理想点 最小值就是[0,0]所以理想点为0
self.population: List[Individual] = [] # 种群
self.generation = 0
self.seq_index = 0
self.seq_num = args.job_seq_num
# 初始化
self.set_weight()
self.get_neighbor()
self.generate_ancestor()
def setup_seed(self):
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def set_weight(self):
# 划分权重
self.W = np.zeros((self.N, self.M))
W = np.linspace(0, 1, self.N)
self.W[:, 0] = W
self.W[:, 1] = 1 - W
def get_neighbor(self):
# 计算权重的T个邻居
for i in range(self.N):
bi = self.W[i]
distance = np.sum((self.W - bi) ** 2, axis=1)
neighbor = np.argsort(distance)
self.B.append(neighbor[1 : self.T + 1])
def generate_ancestor(self):
# 初代种群
for _ in range(self.N):
individual = Individual()
individual.init()
self.population.append(individual)
self.job_genes_len = individual.param_num
def crossover(self, c1_genes, c2_genes):
"""Single point crossover."""
p1_genes = c1_genes.copy()
p2_genes = c2_genes.copy()
point = np.random.randint(0, (self.job_genes_len))
c1_genes[: point + 1] = p2_genes[: point + 1]
c2_genes[: point + 1] = p1_genes[: point + 1]
def mutate(self, c_genes):
"""Gaussian mutation with scale"""
if np.random.random() < self.args.mutate_rate * 2:
mutation_array = np.random.random(c_genes.shape) < self.args.mutate_rate
mutation = np.random.normal(size=c_genes.shape)
mutation[mutation_array] *= self.args.mutate_scale
c_genes[mutation_array] += mutation[mutation_array]
# 产生子代
def generate_children(self, p1: Individual, p2: Individual):
c1_genes, c2_genes = p1.job_genes.copy(), p2.job_genes.copy()
self.crossover(c1_genes, c2_genes)
self.mutate(c1_genes)
self.mutate(c2_genes)
c1 = Individual(c1_genes)
c2 = Individual(c2_genes)
return c1, c2
def save_population(self, population: list[Individual], label=""):
save_dir = os.path.join(
self.args.save_path,
self.args.method,
self.args.tag,
label,
f"g{self.generation}_{self.seq_index}",
)
os.makedirs(save_dir, exist_ok=True)
mean_fitness_list = []
for id, individual in enumerate(population):
mean_fitness = np.array(individual.train_fitness)
mean_fitness_list.append([self.generation, id, *mean_fitness.tolist()])
model_save_path = os.path.join(
save_dir, "{}_{:.5f}_{:.5f}.pth".format(id, *mean_fitness.tolist())
)
individual.update()
torch.save(individual.agent.job_actor.state_dict(), model_save_path)
mean_fitness_list = np.array(mean_fitness_list)
np.save(os.path.join(save_dir, "mean_fitness_record.npy"), mean_fitness_list)
return mean_fitness_list
# 进化
def evolve(self):
all_evaluate_list: list[list[individual]] = []
for pi in range(self.N):
Bi = self.B[pi] # 邻居集合
# 随机选择邻居进行交叉变异
k = random.randint(0, len(Bi) - 1)
l = random.randint(0, len(Bi) - 1)
ki = Bi[k]
li = Bi[l]
xp = self.population[pi]
xk = self.population[ki]
xl = self.population[li]
c1, c2 = self.generate_children(xp, xk)
c3, c4 = self.generate_children(xk, xl)
evaluate_list = [xp, xk, xl, c1, c2, c3, c4]
all_evaluate_list.append(evaluate_list)
# 评估这些模型
pool = Pool(cpu_count())
mutil_process = []
for id1 in range(self.N):
for id2, individual in enumerate(all_evaluate_list[id1]):
# 跳过已经评估过的个体 加速训练
if individual.train_fitness is not None:
continue
one_process = pool.apply_async(
run_individual_in_env,
args=(
id1,
id2,
self.args,
individual.job_genes,
self.seq_index,
),
)
mutil_process.append(one_process)
pool.close()
pool.join()
# 收集进程结果
for one_process in mutil_process:
id1, id2, fitness = one_process.get()
all_evaluate_list[id1][id2].train_fitness = fitness
# 根据结果进行迭代
elite_change_num = 0
for pi in range(self.N):
evaluate_list = all_evaluate_list[pi]
fitness_list = []
for individual in evaluate_list:
fitness_list.append(individual.train_fitness)
fitness_list = np.array(fitness_list)
tchebycheff_list = fitness_list * self.W[pi]
# 取最大值作为比较
tchebycheff_list = np.max(tchebycheff_list, axis=-1).reshape(-1)
best_i1 = np.argmin(tchebycheff_list[:3])
best_i2 = np.argmin(tchebycheff_list)
best_i = best_i2
# 以一定概率进行详细比较 避免陷入局部最优
mi = random.randint(0, self.M - 1)
if random.random() < 0.5:
if (
evaluate_list[best_i1].train_fitness[mi]
< evaluate_list[best_i2].train_fitness[mi]
):
best_i = best_i1
best_individual = evaluate_list[best_i]
# # 没有找到更好的解则跳过更新
# if best_i == 0:
# continue
# self.population[pi] = best_individual
# 更新邻居
for nj in self.B[pi]:
nei_individual = self.population[nj]
nei_tchebycheff = np.max(np.array(nei_individual.train_fitness) * self.W[pi])
cur_tchebycheff = np.max(np.array(best_individual.train_fitness) * self.W[pi])
if cur_tchebycheff < nei_tchebycheff:
self.population[nj] = best_individual
elite_change_num += 1
# 更新EP
if abs(tchebycheff_list[best_i2] - tchebycheff_list[0]) > 1:
remove_list = []
n = 0
for individual in self.EP:
if np.all(best_individual.train_fitness < individual.train_fitness):
remove_list.append(individual)
elif np.all(best_individual.train_fitness > individual.train_fitness):
n += 1
if n != 0:
break
if n == 0:
for individual in remove_list:
self.EP.remove(individual)
self.EP.append(best_individual)
# 保存前沿
self.save_population(self.EP, "elite")
self.save_population(self.population, "population")
self.generation += 1
self.seq_index = (self.seq_index + 1) % self.seq_num
elite_fitness_list = []
for individual in self.EP:
elite_fitness_list.append(individual.train_fitness)
population_fitness_list = []
for individual in self.population:
population_fitness_list.append(individual.train_fitness)
return elite_change_num, elite_fitness_list, population_fitness_list
if __name__ == "__main__":
args = parse_args()
args.method = "moead"
args.job_seq_num = 1
args.tag = "run02"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
moead = MOEAD(args)
moead.setup_seed()
fitness_list = []
while True:
print("=" * 100)
print(f"evolve generation {moead.generation}")
elite_change_num, elite_fitness_list, population_fitness_list = moead.evolve()
# log to tensorbord
writer.add_scalar("Elite change num", elite_change_num, moead.generation)
elite_fitness_list = np.array(elite_fitness_list)
y = elite_fitness_list[:, 0]
x = elite_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Elite Target Distribution")
plt.legend()
writer.add_figure("Elite Target Distribution", figure, moead.generation)
plt.close()
population_fitness_list = np.array(population_fitness_list)
y = population_fitness_list[:, 0]
x = population_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Population Target Distribution")
plt.legend()
writer.add_figure("Population Target Distribution", figure, moead.generation)
plt.close()
max_elite_fitness = np.max(elite_fitness_list, axis=0)
min_elite_fitness = np.min(elite_fitness_list, axis=0)
writer.add_scalar("Balance fitness max", max_elite_fitness[1], moead.generation)
writer.add_scalar("Duration fitness max", max_elite_fitness[0], moead.generation)
writer.add_scalar("Balance fitness min", min_elite_fitness[1], moead.generation)
writer.add_scalar("Duration fitness min", min_elite_fitness[0], moead.generation)
| 16,252 | 33.877682 | 96 |
py
|
MERL-LB
|
MERL-LB-main/mp_train_nn_deepjs.py
|
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.distributions import Categorical
from torch.utils.data import Dataset, DataLoader
from config.deepjs import *
from envs.datacenter_env.env import DatacenterEnv
from multiprocessing import Pool, cpu_count
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def predict(self, input, action_mask=None, absolute=True):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
if absolute:
action = torch.argmax(predict, dim=1).cpu().item()
else:
action_probs = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_probs)
action = action_dist.sample().cpu().item()
return action
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def choose_action(self, obs, absolute=True):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask, absolute)
# action = self.job_actor.predict(job_input)
return action
class JobShopDataset(Dataset):
def __init__(self, obs_data, action_data, advantage_data) -> None:
self.obs_data = [i for item in obs_data for i in item]
self.action_data = [i for item in action_data for i in item]
self.advantage_data = [i for item in advantage_data for i in item]
def __getitem__(self, index):
obs = self.obs_data[index]
action = self.action_data[index]
advantage = self.advantage_data[index]
state, action_mask = self.obs_format(obs)
return state, action_mask, action, advantage
def obs_format(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
state = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
return state, action_mask
def __len__(self):
return len(self.action_data)
class InputDrive:
def __init__(self, args) -> None:
self.args = args
self.seq_index = 0
self.seq_num = args.job_seq_num
self.agent = Agent()
self.prob = 0.8
self.prob_step = 2 / self.args.epoch
def set_seed(self, seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def get_one_experience(self, args, seed, model_state_dict, seq_index, prob=0):
# 初始化环境
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
# 初始化agent
agent = Agent()
agent.load_state_dict(model_state_dict)
# 设置随机种子
self.set_seed(seed)
# 收集轨迹
obs = env.reset()
done = False
trajectory = []
agent.eval()
with torch.no_grad():
while not done:
action = agent.choose_action(obs, absolute=False)
next_obs, reward, done, _ = env.step(action)
trajectory.append([obs, action, reward, next_obs, done])
obs = next_obs
# 收集fitness
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record)
fitness = np.array([-runtime_fitness, -std_fitness])
return trajectory, fitness
# 计算折扣累积reward
def get_discount_reward(self, trajectory, reward_index):
# 统计reward
reward = []
for item in trajectory:
reward.append(item[reward_index])
# reward 标准化
# norm_reward_batch = (reward - np.mean(reward, axis=0)) / (np.std(reward, axis=0))
# 归一化
# norm_reward_batch = (reward - np.min(reward, axis=0)) / (
# np.max(reward, axis=0) - np.min(reward, axis=0)
# )
# 目标权重相同
mean_reward = np.sum(
np.clip(reward, a_min=[-500, -200], a_max=[0, 0]) / [-500, -200], axis=-1
)
# mean_reward = norm_reward_batch[:, 0]
# mean_reward = np.sum(reward, axis=-1)
# 计算折扣累积reward
trajectory_len = len(trajectory)
discount_reward = np.zeros(trajectory_len)
for index in reversed(range(trajectory_len - 1)):
discount_reward[index] = mean_reward[index] + self.args.gamma * mean_reward[index + 1]
return discount_reward
# 收集经验
def get_experience(self, seq_index):
# 多线程收集经验
pool = Pool(min(cpu_count(), self.args.experience_num))
all_record = []
for seed in range(self.args.experience_num):
record = pool.apply_async(
self.get_one_experience,
args=(
self.args,
seed,
self.agent.state_dict(),
seq_index,
self.prob,
),
)
all_record.append(record)
pool.close()
pool.join()
all_trajectory = []
all_fitness = []
for record in all_record:
trajectory, fitness = record.get()
all_trajectory.append(trajectory)
all_fitness.append(fitness)
return all_trajectory, all_fitness
# 计算baseline
def get_advantage(self, all_trajectory):
# 计算累积reward
all_reward = []
all_reward_flat = []
max_reward_len = 0
for trajectory in all_trajectory:
max_reward_len = max(max_reward_len, len(trajectory))
reward = []
for item in trajectory:
reward.append(item[2])
all_reward_flat.append(item[2])
all_reward.append(reward)
all_reward_flat = np.array(all_reward_flat)
reward_mean = np.mean(all_reward_flat, axis=0)
reward_std = np.std(all_reward_flat, axis=0)
all_discount_reward = []
for reward in all_reward:
# norm_reward = (reward - reward_mean) / (reward_std + 1e-7)
# mean_reward = np.mean(norm_reward, axis=-1)
# mean_reward = np.sum(norm_reward * [[0.2, 0.8]], axis=-1)
# mean_reward = np.sum(norm_reward * [[0.8, 0.2]], axis=-1)
# mean_reward = np.sum(norm_reward * [[1, 0]], axis=-1)
# mean_reward = np.sum(norm_reward * [[0, 1]], axis=-1)
# mean_reward = np.sum(np.array(reward) * np.array([[1 / 600, 1 / 50]]), axis=-1)
mean_reward = np.sum(
(np.clip(reward, a_min=[-500, -200], a_max=[0, 0]) - [-500, -200]) / [500, 200],
axis=-1,
)
reward_len = len(reward)
discount_reward = np.zeros(reward_len)
for index in reversed(range(reward_len - 1)):
discount_reward[index] = (
mean_reward[index] + self.args.gamma * mean_reward[index + 1]
)
all_discount_reward.append(discount_reward)
# padding
all_padded_discount_reward = [
np.concatenate([discount_reward, np.zeros(max_reward_len - len(discount_reward))])
for discount_reward in all_discount_reward
]
# 计算baseline
baseline = np.mean(all_padded_discount_reward, axis=0)
# 计算advantage
all_advantage = [
discount_reward - baseline[: len(discount_reward)]
for discount_reward in all_discount_reward
]
return all_advantage
def train(self):
optimizer = optim.AdamW(self.agent.parameters(), lr=self.args.lr)
best_fitness = [np.array([np.inf, np.inf])] * self.args.job_seq_num
i_episode = 0
EP = []
fitness_list = []
for epoch in range(self.args.epoch):
for seq_index in range(self.args.job_seq_num):
# 收集经验
all_trajectory, all_fitness = self.get_experience(seq_index)
all_obs = []
all_action = []
for trajectory in all_trajectory:
_obs = []
_action = []
for item in trajectory:
_obs.append(item[0])
_action.append(item[1])
all_obs.append(_obs)
all_action.append(_action)
# 结果汇总
mean_fitness = -np.mean(all_fitness, axis=0)
print(f"train epoch {epoch} seq_index {seq_index} i_episode {i_episode}")
print("mean_fitness: ", mean_fitness)
# writer.add_scalar(
# "current/ws_score",
# mean_fitness[0] / 600 + mean_fitness[1] / 50,
# i_episode,
# )
fitness_list.append(mean_fitness)
# 记录fitness
writer.add_scalar("current/duration_score", mean_fitness[0], i_episode)
writer.add_scalar("current/balance_score", mean_fitness[1], i_episode)
# 记录 mean fitness
fitness_mean = np.mean(fitness_list[-args.job_seq_num :], axis=0)
writer.add_scalar("mean/duration_score", fitness_mean[0], i_episode)
writer.add_scalar("mean/balance_score", fitness_mean[1], i_episode)
# 记录最优非支配曲面
d_n = 0
remove_list = []
for item in EP:
_, item_fitness = item
if np.all(fitness_mean < item_fitness):
remove_list.append(item)
if np.all(fitness_mean > item_fitness):
d_n += 1
if d_n != 0:
break
if d_n == 0:
for item in remove_list:
EP.remove(item)
EP.append((i_episode, fitness_mean))
# 打印曲面
EP_fitness = np.array([i[1] for i in EP])
x = EP_fitness[:, 1]
y = EP_fitness[:, 0]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, i_episode)
plt.close()
# 模型保存
model_name = (
f"e{i_episode}_s{seq_index}_d{mean_fitness[0]:.4f}_b{mean_fitness[1]:.4f}"
)
model_save_path = os.path.join(model_save_dir, model_name)
torch.save(self.agent.job_actor.state_dict(), model_save_path)
# 计算advantage
all_advantage = self.get_advantage(all_trajectory)
# 训练模型
# 构建dataloader
dataset = JobShopDataset(
obs_data=all_obs,
action_data=all_action,
advantage_data=all_advantage,
)
dataloader = DataLoader(dataset, batch_size=512, shuffle=False, num_workers=10)
# 清空梯度
optimizer.zero_grad()
self.agent.train()
# 梯度累加
for batch in dataloader:
state, action_mask, action, advantage = batch
action_predict = self.agent.job_actor(state)
# 直接赋值会导致无法梯度回传
# TODO 如何把mask用上?
action_predict[action_mask == False] += -1e9
action_predict = torch.squeeze(action_predict, dim=1)
action_probs = torch.softmax(action_predict, dim=-1)
action_dist = Categorical(action_probs)
action_logprobs = action_dist.log_prob(action)
"""
优化目标是loss越小越好
advantage大于0说明该动作好要增大该动作的概率 即减小 -action_logprobs * advantage
"""
loss = -action_logprobs * advantage
# 一次梯度回传
loss.mean().backward()
# 梯度更新
optimizer.step()
i_episode += 1
# 更新随机权重
self.prob = max(self.prob - self.prob_step, self.prob)
if __name__ == "__main__":
args = parse_args()
args.method = "ws_deepjs"
args.tag = "run01"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
model_save_dir = os.path.join(save_dir, "models")
os.makedirs(model_save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
inputdrive = InputDrive(args)
inputdrive.train()
| 17,996 | 34.994 | 98 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_nn.py
|
import os
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
checkpoint_path = "output/train/nsga/run02/elite/g3382_0/15_-349.95341_-19.68042.pth"
# checkpoint_path = "output/one_job/ga/reward_sum/run02_m15/final_population/g_9796_f_-310.773_-0.026/24_f_-308.432_-0.024.pth"
agent = Agent()
state_dict = torch.load(checkpoint_path)
agent.load_state_dict(state_dict)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"job_num_{seq_index}.npy"),
machines_job_num_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"end_job_num_{seq_index}.npy"),
machines_job_num_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
# args.max_job_num = 2
# args.machine_num = 20
args.save_path = "output/test/GA/reward_mean/run01_m10/final_population_false/g_3921_f_-341.265_-0.036/24_f_-342.436_-0.029"
args.save_path = "/root/workspace/project/version3/output/train/ppo/run_0/model/e10000_s0_d408.3441_b16.2197_actor.pth"
# args.save_path = "output/test/ga/reward_mean/run02_m15/final_population/g_9796_f_-310.773_-0.026/24_f_-308.432_-0.024/test_m_10"
save_dir = args.save_path
model_save_path = os.path.join(save_dir, "models")
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
os.makedirs(model_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(test_one_path, args=(args, i, data_save_path, fig_save_path))
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(std_fitness[0], std_fitness[1])
)
print("done")
| 9,255 | 34.328244 | 134 |
py
|
MERL-LB
|
MERL-LB-main/try.py
|
from multiprocessing.pool import Pool
from time import sleep
def worker2(id):
print(f"worker2-id{id}")
def worker1(id):
print(f"worker1-id{id}")
for id in range(5):
pool.apply_async(worker2, args=(id,))
# def task(message):
# # report a message
# print(f"Task executing: {message}", flush=True)
# sleep(1)
# print(f"Task done: {message}", flush=True)
print("concurrent:")
pool = Pool()
for id in range(5):
pool.apply_async(worker1, args=(id,))
# pool.apply_async(task, args=("Hello world",))
pool.close()
pool.join()
# # SuperFastPython.com
# # example of issuing a task with apply_async() to the process pool with arguments
# from multiprocessing.pool import Pool
# # task executed in a worker process
# def task(message):
# # report a message
# print(f"Task executing: {message}", flush=True)
# # block for a moment
# sleep(1)
# # report a message
# print(f"Task done: {message}", flush=True)
# # protect the entry point
# if __name__ == "__main__":
# # create and configure the process pool
# pool = Pool()
# # issue tasks to the process pool
# pool.apply_async(task, args=("Hello world",))
# # close the process pool
# pool.close()
# # wait for all tasks to finish
# pool.join()
| 1,294 | 21.719298 | 83 |
py
|
MERL-LB
|
MERL-LB-main/mp_train_nn_nsga2_neighbor.py
|
import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from config.ga import *
from typing import List
from envs.datacenter_env.env import DatacenterEnv
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def choose_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
return action
def show(self):
self.job_actor.show()
class Individual:
def __init__(self, job_genes=None):
self.agent = Agent()
self.param_num = self.agent.job_actor.param_num
self.job_genes = job_genes
self.train_fitness = None
self.eval_fitness = None
self.std_fitness = np.inf
self.steps = 0
def init(self):
self.job_genes = np.random.uniform(-1, 1, self.param_num)
def update(self):
self.agent.update(self.job_genes.copy())
def run_individual_in_env(id, args, genes, seq_index):
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
individual = Individual(genes)
individual.update()
obs = env.reset()
done = False
action_list = []
reward_list = []
while not done:
action = individual.agent.choose_action(obs)
obs, reward, done, _ = env.step(action)
action_list.append(action)
reward_list.append(reward)
if args.ga_fitness_type == "std":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.sum(machines_occupancy_mean_std)
fitness = -std_fitness
elif args.ga_fitness_type == "runtime":
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.sum(machines_finish_time_record / 60) # 避免过大
fitness = -runtime_fitness
elif args.ga_fitness_type == "double":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record) # 避免过大
fitness = np.array([-runtime_fitness, -std_fitness])
return id, fitness
class GA:
def __init__(self, args):
self.args = args
self.p_size = args.ga_parent_size
self.c_size = args.ga_children_size
self.job_genes_len = 0
self.mutate_rate = args.ga_mutate_rate
self.mutate_scale = args.ga_mutate_scale
self.population: List[Individual] = []
self.elitism_population: List[Individual] = []
self.avg_fitness = 0
self.seq_index = 0
self.seq_num = args.job_seq_num
self.generation = 0
def setup_seed(self):
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def generate_ancestor(self):
for _ in range(self.p_size):
individual = Individual()
individual.init()
self.population.append(individual)
self.job_genes_len = individual.param_num
def inherit_ancestor(self):
"""Load genes(nn model parameters) from file."""
for i in range(self.p_size):
pth = os.path.join("model", "all_individual", str(i) + "_nn.pth")
nn = torch.load(pth)
genes = []
with torch.no_grad():
for parameters in nn.parameters():
genes.extend(parameters.numpy().flatten())
self.population.append(Individual(np.array(genes)))
def crossover(self, c1_genes, c2_genes):
"""Single point crossover."""
p1_genes = c1_genes.copy()
p2_genes = c2_genes.copy()
point = np.random.randint(0, (self.job_genes_len))
c1_genes[: point + 1] = p2_genes[: point + 1]
c2_genes[: point + 1] = p1_genes[: point + 1]
def mutate(self, c_genes):
"""Gaussian mutation with scale"""
mutation_array = np.random.random(c_genes.shape) < self.mutate_rate
mutation = np.random.normal(size=c_genes.shape)
mutation[mutation_array] *= self.mutate_scale
c_genes[mutation_array] += mutation[mutation_array]
# def elitism_selection(self):
# # 归一化
# fitness_list = []
# for individual in self.population:
# fitness_list.append(individual.train_fitness)
# fitness_list = np.array(fitness_list)
# norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
# np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
# )
# # 权重相加排序
# norm_fitness_list = np.sum(
# norm_fitness_list * self.args.ga_fitness_wight, axis=-1
# )
# population_sorted_index = np.argsort(norm_fitness_list) # 升序取后面几位
# population_sorted_index = population_sorted_index[-self.p_size :]
# self.elitism_population = [
# self.population[index] for index in population_sorted_index
# ]
# self.avg_fitness = np.mean(fitness_list[population_sorted_index], axis=0)
# self.elitism_norm_fitness_list = norm_fitness_list[population_sorted_index]
def elitism_selection(self):
# 归一化值
fitness_list = []
for individual in self.population:
fitness_list.append(individual.train_fitness)
fitness_list = np.array(fitness_list)
norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
)
# 快速非支配排序越小越好 所以转换为正数
fm_fitness_list = -np.array(fitness_list).T
# 快速非支配排序
front_list = self.fast_non_dominated_sort(fm_fitness_list)
# 拥挤度计算
crowded_distance_list = []
for front in front_list:
front_values = fm_fitness_list[:, front]
crowded_distance = self.crowded_distance(front_values)
crowded_distance_list.append(crowded_distance)
# 精英选择
elitism_index = []
save_best_front = False
for front, crowded_distance in zip(front_list, crowded_distance_list):
# 保存最前沿模型
if not save_best_front:
best_front_population = []
for index in front:
best_front_population.append(self.population[index])
self.best_front_population = best_front_population
save_best_front = True
# 根据拥挤度排序
front = np.array(front)
sorted_index = np.argsort(crowded_distance) # 升序排序
sorted_front = front[sorted_index[::-1]] # 降序排序取拥挤度大的
# 选择精英
# 选择的个数是不是可以定义?
for index in sorted_front:
if len(elitism_index) < self.p_size:
elitism_index.append(index)
else:
break
# [0.5, 05] 权重相加排序
norm_fitness_list = np.sum(norm_fitness_list * self.args.ga_fitness_wight, axis=-1)
elitism_population = [self.population[index] for index in elitism_index]
# 检查精英变化数量
elite_change_num = len(elitism_population)
for elite in elitism_population:
if elite in self.elitism_population:
elite_change_num -= 1
self.elitism_population = elitism_population
self.fitness_list = fitness_list
self.avg_fitness = np.mean(fitness_list[elitism_index], axis=0)
self.elitism_norm_fitness_list = norm_fitness_list[elitism_index]
return elite_change_num
# 轮盘赌选择子代
def roulette_wheel_selection(self, size) -> List[Individual]:
# 值越大被取到的概率就越大
selection = []
wheel = sum(self.elitism_norm_fitness_list)
for _ in range(size):
pick = np.random.uniform(0, wheel)
current = 0
for i, individual_fitness in enumerate(self.elitism_norm_fitness_list):
current += individual_fitness
if current > pick:
selection.append(self.elitism_population[i])
break
return selection
# 随机选择
def random_select_parent(self, size):
# 随机选择两个父代
selection = random.sample(self.elitism_population, size)
return selection
# 产生子代
# 改进子代生成方式
def generate_children(self):
children_population = []
# 对精英排序
fitness_list = []
for item in self.elitism_population:
fitness_list.append(item.train_fitness)
fitness_list = np.array(fitness_list)
fitness0_index = np.argsort(fitness_list[:, 0])
fitness1_index = np.argsort(fitness_list[:, 1])
a = np.arange(0, len(fitness_list))
neighbor = []
for i in range(len(fitness_list)):
distance = abs(i - a)
nei = np.argsort(distance)
neighbor.append(nei[1 : 5 + 1])
self.exploration_rate = 1
if self.generation < 100:
self.exploration_rate = 0.5 + 0.5 * (1 - self.generation / 100)
elif self.generation < 2000:
self.exploration_rate = 0.1 + 0.4 * (1 - self.generation / 2000)
else:
self.exploration_rate = 0.1
# 1000 探索
# 5000 待收缩
#
while len(children_population) < self.c_size:
# p1, p2 = self.roulette_wheel_selection(2)
if random.random() < self.exploration_rate:
p1, p2 = self.random_select_parent(2)
else:
rd_index1 = random.randint(0, len(fitness0_index) - 1)
rd_index2 = neighbor[rd_index1][random.randint(0, 5 - 1)]
if random.random() < 0.5:
p1 = self.elitism_population[fitness0_index[rd_index1]]
p2 = self.elitism_population[fitness0_index[rd_index2]]
else:
p1 = self.elitism_population[fitness1_index[rd_index1]]
p2 = self.elitism_population[fitness1_index[rd_index2]]
c1_genes, c2_genes = p1.job_genes.copy(), p2.job_genes.copy()
self.crossover(c1_genes, c2_genes)
self.mutate(c1_genes)
self.mutate(c2_genes)
c1 = Individual(c1_genes)
c2 = Individual(c2_genes)
children_population.extend([c1, c2])
self.children_population = children_population
def save_population(self, population: list[Individual], label=""):
save_dir = os.path.join(
self.args.save_path,
self.args.method,
self.args.tag,
label,
f"g{self.generation}_{self.seq_index}",
)
os.makedirs(save_dir, exist_ok=True)
mean_fitness_list = []
for id, individual in enumerate(population):
mean_fitness = np.array(individual.train_fitness)
mean_fitness_list.append([self.generation, id, *mean_fitness.tolist()])
model_save_path = os.path.join(
save_dir, "{}_{:.5f}_{:.5f}.pth".format(id, *mean_fitness.tolist())
)
individual.update()
torch.save(individual.agent.job_actor.state_dict(), model_save_path)
mean_fitness_list = np.array(mean_fitness_list)
np.save(os.path.join(save_dir, "mean_fitness_record.npy"), mean_fitness_list)
return mean_fitness_list
# 进化
def evolve(self):
# 普通循环测试
# population = []
# for individual in self.population:
# individual = run_individual_in_env(
# self.args,
# individual.job_genes,
# self.seq_index,
# )
# population.append(individual)
# 多进程
population_num = self.args.ga_parent_size + self.args.ga_children_size
pool_num = min(cpu_count(), population_num)
print(f"use {pool_num} cup core")
pool = Pool(pool_num)
mutil_process = []
for id, individual in enumerate(self.population):
# 在坏境中运行个体获得个体适应度
one_process = pool.apply_async(
run_individual_in_env,
args=(
id,
self.args,
individual.job_genes,
self.seq_index,
),
)
mutil_process.append(one_process)
pool.close()
pool.join()
# 收集进程结果
for one_process in mutil_process:
id, fitness = one_process.get()
self.population[id].train_fitness = fitness
# 保存所有结果
self.save_population(self.population, "all")
# 精英选择
elite_change_num = self.elitism_selection()
# 保存精英
elite_fitness_list = self.save_population(self.elitism_population, "elite")
# 子代生成
self.generate_children()
new_population = []
new_population.extend(self.elitism_population)
new_population.extend(self.children_population)
self.population = new_population
self.seq_index = (self.seq_index + 1) % self.seq_num
self.generation += 1
return elite_change_num, elite_fitness_list
# 值排序
def sort_by_values(self, values):
# 升序排序
sorted_index_list = []
for value in values:
sorted_index = np.argsort(value)
sorted_index_list.append(sorted_index)
return sorted_index_list
# 拥挤度计算
def crowded_distance(self, values):
distances = []
sorted_index_list = self.sort_by_values(values) # 升序排序
for value, sorted_index in zip(values, sorted_index_list):
distance = np.ones(len(sorted_index)) * 1e5
for i in range(1, len(sorted_index) - 1):
pre_index = sorted_index[i - 1]
curr_index = sorted_index[i]
after_index = sorted_index[i + 1]
distance[curr_index] = (value[after_index] - value[pre_index]) / (
max(value) - min(value)
)
distances.append(distance)
distances = np.array(distances)
distance = np.sum(distances, axis=0)
return distance
# 快速非支配排序
def fast_non_dominated_sort(self, values):
# 值越小越好
values11 = values[0] # 函数1解集
S = [[] for _ in range(0, len(values11))] # 存放 每个个体支配解的集合
front = [[]] # 存放群体的级别集合,一个级别对应一个[]
n = [0 for _ in range(0, len(values11))] # 每个个体被支配解的个数 即针对每个解 存放有多少好于这个解的个数
rank = [np.inf for _ in range(0, len(values11))] # 存放每个个体的级别
# 遍历每一个个体得到各个个体的被支配解个数和支配解集合
# 目标函数值越小越好
for p in range(0, len(values11)):
S[p] = [] # 该个体支配解的集合 即存放差于该解的解
n[p] = 0 # 该个体被支配的解的个数初始化为0 即找到有多少好于该解
for q in range(0, len(values11)): # 遍历每一个个体
less = 0 # 的目标函数值小于p个体的目标函数值数目
equal = 0 # 的目标函数值等于p个体的目标函数值数目
greater = 0 # 的目标函数值大于p个体的目标函数值数目
for k in range(len(values)): # 遍历每一个目标函数
if values[k][p] > values[k][q]: # 目标函数k时 q个体值小于p个体
less = less + 1 # q比p 好
if values[k][p] == values[k][q]: # 目标函数k时 p个体值等于于q个体
equal = equal + 1
if values[k][p] < values[k][q]: # 目标函数k时 q个体值大于p个体
greater = greater + 1 # q比p差
if (less + equal == len(values)) and (equal != len(values)):
n[p] = n[p] + 1 # q比好 比p好的个体个数加1
elif (greater + equal == len(values)) and (equal != len(values)):
S[p].append(q) # q比p差 存放比p差的个体解序号
# 找出Pareto最优解 即n[p]=0的个体p序号
if n[p] == 0:
rank[p] = 0 # 序号为p的个体 等级为0即最优
if p not in front[0]:
# 如果p不在第0层中 将其追加到第0层中
front[0].append(p) # 存放Pareto最优解序号
# 划分各层解
i = 0
while front[i] != []: # 如果分层集合为不为空
Q = []
for p in front[i]: # 遍历当前分层集合的各个个体p
for q in S[p]: # 遍历p个体的每个支配解q
n[q] = n[q] - 1 # 则将支配解中所有给对应的个体np-1
if n[q] == 0:
rank[q] = i + 1
if q not in Q:
Q.append(q) # 存放front=i+1的个体序号
i = i + 1 # front等级+1
front.append(Q)
del front[len(front) - 1] # 删除循环退出时i+1产生的[]
return front # 返回各层的解序号集合 类似[[1],[9],[0, 8],[7, 6],[3, 5],[2, 4]]
if __name__ == "__main__":
args = parse_args()
args.method = "nsga_nei"
args.job_seq_num = 1
args.tag = "run01"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
ga = GA(args)
ga.setup_seed()
if args.ga_choice == "generate":
ga.generate_ancestor()
else:
ga.inherit_ancestor()
fitness_list = []
mean_best_fitness = [-np.inf] * args.ga_fitness_num
while True:
print("=" * 100)
print(f"evolve generation {ga.generation}")
elite_change_num, elite_fitness_list = ga.evolve()
# log to tensorbord
writer.add_scalar("Train/Elite change num", elite_change_num, ga.generation)
elite_fitness_list = np.array(elite_fitness_list)
elite_fitness_list = -elite_fitness_list[:, -2:]
y = elite_fitness_list[:, 0]
x = elite_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Train/Target distribution", figure, ga.generation)
plt.close()
max_elite_fitness = np.max(elite_fitness_list, axis=0)
min_elite_fitness = np.min(elite_fitness_list, axis=0)
writer.add_scalar("Train/Balance fitness max", max_elite_fitness[1], ga.generation)
writer.add_scalar("Train/Duration fitness max", max_elite_fitness[0], ga.generation)
writer.add_scalar("Train/Balance fitness min", min_elite_fitness[1], ga.generation)
writer.add_scalar("Train/Duration fitness min", min_elite_fitness[0], ga.generation)
writer.add_scalar("Train/Exploration rate", ga.exploration_rate, ga.generation)
| 23,003 | 34.665116 | 96 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_fixed_lc.py
|
import os
import numpy as np
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class LC:
def select_action(self, obs):
_, _, _, _, jobs_num, action_mask = obs
jobs_num[action_mask == False] = 1e9
action = np.argmin(jobs_num)
return action
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = LC()
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "lc"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
model_save_path = os.path.join(save_dir, "models")
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
os.makedirs(model_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(test_one_path, args=(args, i, data_save_path, fig_save_path))
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(std_fitness[0], std_fitness[1])
)
print("done")
| 4,649 | 30.849315 | 100 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_server_num.py
|
import os
import random
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
device = torch.device("cpu")
class RR:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
self.action_index = 0
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action = self.action_index
for i in range(self.machine_num):
action = (action + 1) % self.machine_num
if action_mask[action] == True:
self.action_index = action
break
return action
class RD:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action_prob = np.random.random(self.machine_num)
action_prob = (action_prob + action_mask) / 2
action = np.argmax(action_prob)
return action
class LG:
def select_action(self, obs):
_, job_run_time, _, machines_run_time, _, action_mask = obs
gap = np.abs(machines_run_time - job_run_time)
gap[action_mask == False] = 1e9
action = np.argmin(gap)
return action
class LC:
def select_action(self, obs):
_, _, _, _, jobs_num, action_mask = obs
jobs_num[action_mask == False] = 1e9
action = np.argmin(jobs_num)
return action
class Actor(nn.Module):
def __init__(self, absolute=True, dim_list=[126, 32, 1]):
super().__init__()
self.absolute = absolute
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
if not self.absolute:
action_prob = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_prob)
action = action_dist.sample()
self.action_logprobs = action_dist.log_prob(action).detach()
action = action.cpu().item()
else:
action = torch.argmax(predict, dim=1).cpu().item()
return action
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self, absolute=True):
super(Agent, self).__init__()
self.job_actor = Actor(absolute=absolute)
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def get_agent(args):
method = args.method
if method == "rr":
agent = RR(args.machine_num)
elif method == "rd":
agent = RD(args.machine_num)
elif method == "lg":
agent = LG()
elif method == "lc":
agent = LC()
elif method in ["nsga", "wsga", "deepjs", "igd", "nei_nsga"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
agent.job_actor.eval()
elif method in ["ppo"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
agent.job_actor.eval()
return agent
def set_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = get_agent(args)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
with torch.no_grad():
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
del agent
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "igd"
args.tag = "server_num_test02"
args.actual = True
# args.checkpoint_path = "output/train/nsga/run03/elite/g1_1/20_-501.30449_-25.49838.pth"
# args.checkpoint_path = "output/train/nsga/run05/elite/g24214_0/10_-351.04309_-20.52227.pth"
# args.checkpoint_path = "output/train/wsga/run05/elite/g13443_3/0_-335.70133_-14.49433.pth"
# args.checkpoint_path = (
# "output/train/ns_deepjs/run02_no_mask/models/e10000_s0_d401.1772_b15.8262"
# )
args.checkpoint_path = (
"output/train/ns_deepjs/run02_no_mask/models/e13919_s9_d380.7892_b22.2165"
)
# args.checkpoint_path = "output/train/nei_nsga/g30000_0/12_-218.78153_-174.13751.pth"
# job_num_list = range(2, 10)
server_num_list = [5, 10, 20, 30, 40, 50]
job_num_list = [int(5 * i / 10) for i in server_num_list]
# user_sigam_list = [0]
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for server_num, max_job_num in zip(server_num_list, job_num_list):
args.machine_num = server_num
user_load_rate = (
max_job_num
/ 2
* args.max_res_req
/ 2
* args.max_job_len
/ 2
/ args.res_capacity
/ args.machine_num
)
if user_load_rate > 1.1:
break
print(f"Test server_num {server_num} user_load_rate {user_load_rate:.3f}")
save_dir = os.path.join(
root_dir,
f"server_num_{server_num}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.max_job_num = max_job_num
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
# pool = Pool(10)
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_load_rate, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_load_rate",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"mean_std.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_load_rate",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"all_data.csv"))
| 13,588 | 31.823671 | 97 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_fixed_rr_load.py
|
import os
import numpy as np
import pandas as pd
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class RR:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
self.action_index = 0
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action = self.action_index
for i in range(self.machine_num):
action = (action + 1) % self.machine_num
if action_mask[action] == True:
self.action_index = action
break
return action
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = RR(args.machine_num)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "rr"
args.tag = "user_load_test"
args.actual = False
job_num_list = range(2, 10)
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for max_job_num in job_num_list:
user_load_rate = (
max_job_num
/ 2
* args.max_res_req
/ 2
* args.max_job_len
/ 2
/ args.res_capacity
/ args.machine_num
)
print(f"Test user_load_rate {user_load_rate:.3f}")
save_dir = os.path.join(
root_dir,
f"user_load_rate_{user_load_rate:.3f}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.max_job_num = max_job_num
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_load_rate, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_load_rate",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"{ args.method}_user_load_exp.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_load_rate",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"{ args.method}_user_load_exp2.csv"))
| 6,618 | 30.221698 | 93 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_fixed_lc_load.py
|
import os
import numpy as np
import pandas as pd
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class LC:
def select_action(self, obs):
_, _, _, _, jobs_num, action_mask = obs
jobs_num[action_mask == False] = 1e9
action = np.argmin(jobs_num)
return action
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = LC()
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "lc"
args.tag = "user_load_test"
args.actual = False
job_num_list = range(2, 10)
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for max_job_num in job_num_list:
user_load_rate = (
max_job_num
/ 2
* args.max_res_req
/ 2
* args.max_job_len
/ 2
/ args.res_capacity
/ args.machine_num
)
print(f"Test user_load_rate {user_load_rate:.3f}")
save_dir = os.path.join(
root_dir,
f"user_load_rate_{user_load_rate:.3f}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.max_job_num = max_job_num
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_load_rate, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_load_rate",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"{ args.method}_user_load_exp.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_load_rate",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"{ args.method}_user_load_exp2.csv"))
| 6,337 | 30.068627 | 93 |
py
|
MERL-LB
|
MERL-LB-main/mp_train_nn_nsga2_no_mask.py
|
import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from config.ga import *
from typing import List
from envs.datacenter_env.env import DatacenterEnv
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def choose_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
# action = self.job_actor.predict(job_input, action_mask)
action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
class Individual:
def __init__(self, job_genes=None):
self.agent = Agent()
self.param_num = self.agent.job_actor.param_num
self.job_genes = job_genes
self.train_fitness = None
self.eval_fitness = None
self.std_fitness = np.inf
self.steps = 0
def init(self):
self.job_genes = np.random.uniform(-1, 1, self.param_num)
def update(self):
self.agent.update(self.job_genes.copy())
def run_individual_in_env(id, args, genes, seq_index):
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
individual = Individual(genes)
individual.update()
obs = env.reset()
done = False
action_list = []
reward_list = []
while not done:
action = individual.agent.choose_action(obs)
obs, reward, done, _ = env.step(action)
action_list.append(action)
reward_list.append(reward)
if args.ga_fitness_type == "std":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.sum(machines_occupancy_mean_std)
fitness = -std_fitness
elif args.ga_fitness_type == "runtime":
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.sum(machines_finish_time_record / 60) # 避免过大
fitness = -runtime_fitness
elif args.ga_fitness_type == "double":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record) # 避免过大
fitness = np.array([-runtime_fitness, -std_fitness])
return id, fitness
class GA:
def __init__(self, args):
self.args = args
self.p_size = args.ga_parent_size
self.c_size = args.ga_children_size
self.job_genes_len = 0
self.mutate_rate = args.ga_mutate_rate
self.mutate_scale = args.ga_mutate_scale
self.population: List[Individual] = []
self.elitism_population: List[Individual] = []
self.avg_fitness = 0
self.seq_index = 0
self.seq_num = args.job_seq_num
self.generation = 0
def setup_seed(self):
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def generate_ancestor(self):
for _ in range(self.p_size):
individual = Individual()
individual.init()
self.population.append(individual)
self.job_genes_len = individual.param_num
def inherit_ancestor(self):
"""Load genes(nn model parameters) from file."""
for i in range(self.p_size):
pth = os.path.join("model", "all_individual", str(i) + "_nn.pth")
nn = torch.load(pth)
genes = []
with torch.no_grad():
for parameters in nn.parameters():
genes.extend(parameters.numpy().flatten())
self.population.append(Individual(np.array(genes)))
def crossover(self, c1_genes, c2_genes):
"""Single point crossover."""
p1_genes = c1_genes.copy()
p2_genes = c2_genes.copy()
point = np.random.randint(0, (self.job_genes_len))
c1_genes[: point + 1] = p2_genes[: point + 1]
c2_genes[: point + 1] = p1_genes[: point + 1]
def mutate(self, c_genes):
"""Gaussian mutation with scale"""
mutation_array = np.random.random(c_genes.shape) < self.mutate_rate
mutation = np.random.normal(size=c_genes.shape)
mutation[mutation_array] *= self.mutate_scale
c_genes[mutation_array] += mutation[mutation_array]
# def elitism_selection(self):
# # 归一化
# fitness_list = []
# for individual in self.population:
# fitness_list.append(individual.train_fitness)
# fitness_list = np.array(fitness_list)
# norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
# np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
# )
# # 权重相加排序
# norm_fitness_list = np.sum(
# norm_fitness_list * self.args.ga_fitness_wight, axis=-1
# )
# population_sorted_index = np.argsort(norm_fitness_list) # 升序取后面几位
# population_sorted_index = population_sorted_index[-self.p_size :]
# self.elitism_population = [
# self.population[index] for index in population_sorted_index
# ]
# self.avg_fitness = np.mean(fitness_list[population_sorted_index], axis=0)
# self.elitism_norm_fitness_list = norm_fitness_list[population_sorted_index]
def elitism_selection(self):
# 归一化值
fitness_list = []
for individual in self.population:
fitness_list.append(individual.train_fitness)
fitness_list = np.array(fitness_list)
norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
)
# 快速非支配排序越小越好 所以转换为正数
fm_fitness_list = -np.array(fitness_list).T
# 快速非支配排序
front_list = self.fast_non_dominated_sort(fm_fitness_list)
# 拥挤度计算
crowded_distance_list = []
for front in front_list:
front_values = fm_fitness_list[:, front]
crowded_distance = self.crowded_distance(front_values)
crowded_distance_list.append(crowded_distance)
# 精英选择
elitism_index = []
save_best_front = False
for front, crowded_distance in zip(front_list, crowded_distance_list):
# 保存最前沿模型
if not save_best_front:
best_front_population = []
for index in front:
best_front_population.append(self.population[index])
self.best_front_population = best_front_population
save_best_front = True
# 根据拥挤度排序
front = np.array(front)
sorted_index = np.argsort(crowded_distance) # 升序排序
sorted_front = front[sorted_index[::-1]] # 降序排序取拥挤度大的
# 选择精英
# 选择的个数是不是可以定义?
for index in sorted_front:
if len(elitism_index) < self.p_size:
elitism_index.append(index)
else:
break
# [0.5, 05] 权重相加排序
norm_fitness_list = np.sum(norm_fitness_list * self.args.ga_fitness_wight, axis=-1)
elitism_population = [self.population[index] for index in elitism_index]
# 检查精英变化数量
elite_change_num = len(elitism_population)
for elite in elitism_population:
if elite in self.elitism_population:
elite_change_num -= 1
self.elitism_population = elitism_population
self.fitness_list = fitness_list
self.avg_fitness = np.mean(fitness_list[elitism_index], axis=0)
self.elitism_norm_fitness_list = norm_fitness_list[elitism_index]
return elite_change_num
# 轮盘赌选择子代
def roulette_wheel_selection(self, size) -> List[Individual]:
# 值越大被取到的概率就越大
selection = []
wheel = sum(self.elitism_norm_fitness_list)
for _ in range(size):
pick = np.random.uniform(0, wheel)
current = 0
for i, individual_fitness in enumerate(self.elitism_norm_fitness_list):
current += individual_fitness
if current > pick:
selection.append(self.elitism_population[i])
break
return selection
# 随机选择
def random_select_parent(self, size):
# 随机选择两个父代
selection = random.sample(self.elitism_population, size)
return selection
# 产生子代
def generate_children(self):
children_population = []
while len(children_population) < self.c_size:
# p1, p2 = self.roulette_wheel_selection(2)
p1, p2 = self.random_select_parent(2)
c1_genes, c2_genes = p1.job_genes.copy(), p2.job_genes.copy()
self.crossover(c1_genes, c2_genes)
self.mutate(c1_genes)
self.mutate(c2_genes)
c1 = Individual(c1_genes)
c2 = Individual(c2_genes)
children_population.extend([c1, c2])
self.children_population = children_population
def save_population(self, population: list[Individual], label=""):
save_dir = os.path.join(
self.args.save_path,
self.args.method,
self.args.tag,
label,
f"g{self.generation}_{self.seq_index}",
)
os.makedirs(save_dir, exist_ok=True)
mean_fitness_list = []
for id, individual in enumerate(population):
mean_fitness = np.array(individual.train_fitness)
mean_fitness_list.append([self.generation, id, *mean_fitness.tolist()])
model_save_path = os.path.join(
save_dir, "{}_{:.5f}_{:.5f}.pth".format(id, *mean_fitness.tolist())
)
individual.update()
torch.save(individual.agent.job_actor.state_dict(), model_save_path)
mean_fitness_list = np.array(mean_fitness_list)
np.save(os.path.join(save_dir, "mean_fitness_record.npy"), mean_fitness_list)
return mean_fitness_list
# 进化
def evolve(self):
# 普通循环测试
# population = []
# for individual in self.population:
# individual = run_individual_in_env(
# self.args,
# individual.job_genes,
# self.seq_index,
# )
# population.append(individual)
# 多进程
population_num = self.args.ga_parent_size + self.args.ga_children_size
pool_num = min(cpu_count(), population_num)
print(f"use {pool_num} cup core")
pool = Pool(10)
mutil_process = []
for id, individual in enumerate(self.population):
# 在坏境中运行个体获得个体适应度
one_process = pool.apply_async(
run_individual_in_env,
args=(
id,
self.args,
individual.job_genes,
self.seq_index,
),
)
mutil_process.append(one_process)
pool.close()
pool.join()
# 收集进程结果
for one_process in mutil_process:
id, fitness = one_process.get()
self.population[id].train_fitness = fitness
# 保存所有结果
self.save_population(self.population, "all")
# 精英选择
elite_change_num = self.elitism_selection()
# 保存精英
elite_fitness_list = self.save_population(self.elitism_population, "elite")
# 子代生成
self.generate_children()
new_population = []
new_population.extend(self.elitism_population)
new_population.extend(self.children_population)
self.population = new_population
self.seq_index = (self.seq_index + 1) % self.seq_num
self.generation += 1
return elite_change_num, elite_fitness_list
# 值排序
def sort_by_values(self, values):
# 升序排序
sorted_index_list = []
for value in values:
sorted_index = np.argsort(value)
sorted_index_list.append(sorted_index)
return sorted_index_list
# 拥挤度计算
def crowded_distance(self, values):
distances = []
sorted_index_list = self.sort_by_values(values) # 升序排序
for value, sorted_index in zip(values, sorted_index_list):
distance = np.ones(len(sorted_index)) * 1e5
for i in range(1, len(sorted_index) - 1):
pre_index = sorted_index[i - 1]
curr_index = sorted_index[i]
after_index = sorted_index[i + 1]
distance[curr_index] = (value[after_index] - value[pre_index]) / (
max(value) - min(value)
)
distances.append(distance)
distances = np.array(distances)
distance = np.sum(distances, axis=0)
return distance
# 快速非支配排序
def fast_non_dominated_sort(self, values):
# 值越小越好
values11 = values[0] # 函数1解集
S = [[] for _ in range(0, len(values11))] # 存放 每个个体支配解的集合
front = [[]] # 存放群体的级别集合,一个级别对应一个[]
n = [0 for _ in range(0, len(values11))] # 每个个体被支配解的个数 即针对每个解 存放有多少好于这个解的个数
rank = [np.inf for _ in range(0, len(values11))] # 存放每个个体的级别
# 遍历每一个个体得到各个个体的被支配解个数和支配解集合
# 目标函数值越小越好
for p in range(0, len(values11)):
S[p] = [] # 该个体支配解的集合 即存放差于该解的解
n[p] = 0 # 该个体被支配的解的个数初始化为0 即找到有多少好于该解
for q in range(0, len(values11)): # 遍历每一个个体
less = 0 # 的目标函数值小于p个体的目标函数值数目
equal = 0 # 的目标函数值等于p个体的目标函数值数目
greater = 0 # 的目标函数值大于p个体的目标函数值数目
for k in range(len(values)): # 遍历每一个目标函数
if values[k][p] > values[k][q]: # 目标函数k时 q个体值小于p个体
less = less + 1 # q比p 好
if values[k][p] == values[k][q]: # 目标函数k时 p个体值等于于q个体
equal = equal + 1
if values[k][p] < values[k][q]: # 目标函数k时 q个体值大于p个体
greater = greater + 1 # q比p差
if (less + equal == len(values)) and (equal != len(values)):
n[p] = n[p] + 1 # q比好 比p好的个体个数加1
elif (greater + equal == len(values)) and (equal != len(values)):
S[p].append(q) # q比p差 存放比p差的个体解序号
# 找出Pareto最优解 即n[p]=0的个体p序号
if n[p] == 0:
rank[p] = 0 # 序号为p的个体 等级为0即最优
if p not in front[0]:
# 如果p不在第0层中 将其追加到第0层中
front[0].append(p) # 存放Pareto最优解序号
# 划分各层解
i = 0
while front[i] != []: # 如果分层集合为不为空
Q = []
for p in front[i]: # 遍历当前分层集合的各个个体p
for q in S[p]: # 遍历p个体的每个支配解q
n[q] = n[q] - 1 # 则将支配解中所有给对应的个体np-1
if n[q] == 0:
rank[q] = i + 1
if q not in Q:
Q.append(q) # 存放front=i+1的个体序号
i = i + 1 # front等级+1
front.append(Q)
del front[len(front) - 1] # 删除循环退出时i+1产生的[]
return front # 返回各层的解序号集合 类似[[1],[9],[0, 8],[7, 6],[3, 5],[2, 4]]
if __name__ == "__main__":
args = parse_args()
args.method = "nsga_no_mask"
args.job_seq_num = 1
args.tag = "run01"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
ga = GA(args)
ga.setup_seed()
if args.ga_choice == "generate":
ga.generate_ancestor()
else:
ga.inherit_ancestor()
fitness_list = []
mean_best_fitness = [-np.inf] * args.ga_fitness_num
while True:
print("=" * 100)
print(f"evolve generation {ga.generation}")
elite_change_num, elite_fitness_list = ga.evolve()
# log to tensorbord
writer.add_scalar("Train/Elite change num", elite_change_num, ga.generation)
elite_fitness_list = np.array(elite_fitness_list)
elite_fitness_list = -elite_fitness_list[:, -2:]
y = elite_fitness_list[:, 0]
x = elite_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Train/Target distribution", figure, ga.generation)
plt.close()
max_elite_fitness = np.max(elite_fitness_list, axis=0)
min_elite_fitness = np.min(elite_fitness_list, axis=0)
writer.add_scalar("Train/Balance fitness max", max_elite_fitness[1], ga.generation)
writer.add_scalar("Train/Duration fitness max", max_elite_fitness[0], ga.generation)
writer.add_scalar("Train/Balance fitness min", min_elite_fitness[1], ga.generation)
writer.add_scalar("Train/Duration fitness min", min_elite_fitness[0], ga.generation)
| 21,474 | 34.378913 | 96 |
py
|
MERL-LB
|
MERL-LB-main/envs/datacenter_env/machine.py
|
import numpy as np
from typing import List
from envs.datacenter_env.job import Job
class Machine:
SLEEP = 0
RUN = 1
def __init__(self, args, machine_id) -> None:
self.args = args
self.id = machine_id
self.state = Machine.SLEEP
self.sleep_delay = 0
# resource time-series occupancy
self.available_res = np.ones((args.timeline_size, args.res_num)) * args.res_capacity
self.finished_job: List[Job] = []
self.running_jobs: List[Job] = []
self.finish_time_log = []
self.state_log = []
self.curr_time = -1
# colormap for graphical representation
self.colormap = np.arange(1 / float(args.job_color_num), 1, 1 / float(args.job_color_num))
np.random.shuffle(self.colormap)
# graphical representation
self.image_represent = np.zeros((args.timeline_size, args.res_num, args.res_capacity))
def wakeup(self):
self.state = Machine.RUN
self.sleep_delay = self.args.sleep_delay # 强制运行t分钟才休眠
def sleep(self):
self.state = Machine.SLEEP
# 检查连接是否可以放入
def check_allocate_feasible(self, job: Job):
allocated = False
if self.state == Machine.RUN and job is not None:
for t in range(
0,
min(
(self.args.timeline_size - job.len) + 1,
self.args.pre_allocate_time,
),
):
new_available_res = self.available_res[t : t + job.len, :] - job.res_req
# resource allocability
if np.all(new_available_res >= 0):
allocated = True
break
return allocated
# 放置任务
def allocation_job(self, job: Job, curr_time):
allocated = False
assert self.args.timeline_size >= job.len, "timeline should large than job len"
for t in range(
0,
min(
(self.args.timeline_size - job.len) + 1,
self.args.pre_allocate_time,
),
):
new_available_res = self.available_res[t : t + job.len, :] - job.res_req
# check resource allocability
if np.all(new_available_res >= 0):
allocated = True
# update resource time-series occupancy
self.available_res[t : t + job.len, :] = new_available_res
job.start_time = curr_time + t
job.finish_time = job.start_time + job.len
job.predict_finish_time = job.start_time + job.predict_len
self.running_jobs.append(job)
# update graphocal representation
if self.args.obs_represent == "image":
new_color = None
used_color = np.unique(self.image_represent[:])
for color in self.colormap:
if color not in used_color:
new_color = color
break
assert new_color != None, "job_num_color is not enough to represent running job"
start_time = t
end_time = t + job.len
for res in range(self.args.res_num):
for t in range(start_time, end_time):
available_resource_index = np.where(
self.image_represent[t, res, :] == 0
)[0]
self.image_represent[
t, res, available_resource_index[: job.res_req[res]]
] = new_color
break
return allocated
# 判断当前机器资源是否紧张
def resource_crisis(self):
# t分钟最大资源占用都大于rate则认为资源紧张
occupancy_rate = 1 - (self.available_res[: self.args.crisis_time] / self.args.res_capacity)
occupancy_rate = np.max(occupancy_rate, axis=-1) > self.args.crisis_rate
crisis = np.sum(occupancy_rate) == self.args.crisis_time
return crisis
# 判断当前机器资源是否闲置
def resource_idle(self):
# 无占用且sleep_delay等于0
idle = np.all(self.available_res == self.args.res_capacity) and self.sleep_delay == 0
return idle
def observe(self):
if self.args.obs_represent == "image":
return self.image_represent
elif self.args.obs_represent == "timeline":
if self.state == Machine.RUN:
return (self.args.res_capacity - self.available_res) / self.args.res_capacity
else:
return np.ones_like(self.available_res) * -1
def time_proceed(self, curr_time):
self.curr_time = curr_time
if self.state == Machine.RUN:
# 更新资源容量
self.available_res[:-1, :] = self.available_res[1:, :]
self.available_res[-1, :] = self.args.res_capacity
# 无任务则减少睡眠延迟
if np.all(self.available_res == self.args.res_capacity):
if self.sleep_delay > 0:
self.sleep_delay -= 1
# 存在任务则重置睡眠延迟
else:
self.sleep_delay = self.args.sleep_delay
# 更新任务状态
for job in self.running_jobs:
if job.finish_time <= curr_time:
self.finished_job.append(job)
self.running_jobs.remove(job)
# 更新状态表示
if self.args.obs_represent == "image":
self.image_represent[:-1, :, :] = self.image_represent[1:, :, :]
self.image_represent[-1, :, :] = 0
# 记录剩余时间
self.finish_time_log.append(self.get_max_finish_time())
self.state_log.append(self.state)
# 获得当前最大剩余时间
def get_max_finish_time(self):
state = self.available_res != self.args.res_capacity
max_finish_time = np.sum(np.sum(state, axis=1) != 0)
return max_finish_time
# 获得当前最大资源占用率
def get_max_occupancy_rate(self):
current_occupancy_rate = 1 - (self.available_res[0] / self.args.res_capacity)
max_occupancy_rate = np.max(current_occupancy_rate)
return max_occupancy_rate
# 获得当前资源占用率
def get_curr_occupancy_rate(self):
current_occupancy_rate = 1 - (self.available_res[0] / self.args.res_capacity)
return current_occupancy_rate
# 后续资源占用情况
def get_all_occupancy_rate(self):
current_occupancy_rate = 1 - (self.available_res / self.args.res_capacity)
return current_occupancy_rate
# 获得预估占用情况
def get_all_predict_occupancy_rate(self):
current_occupancy = np.zeros_like(self.available_res)
for job in self.running_jobs:
predict_duration = job.predict_finish_time - self.curr_time
predict_duration = max(0, predict_duration)
current_occupancy[:predict_duration] += job.res_req
current_occupancy_rate = current_occupancy / self.args.res_capacity
return current_occupancy_rate
# test n
def get_all_actual_occupancy_rate(self):
current_occupancy = np.zeros_like(self.available_res)
for job in self.running_jobs:
actual_duration = job.finish_time - self.curr_time
actual_duration = max(0, actual_duration)
current_occupancy[:actual_duration] += job.res_req
current_occupancy_rate = current_occupancy / self.args.res_capacity
return current_occupancy_rate
# 获得平均资源最大占用率
def get_mean_max_occupancy_rate(self):
occupancy_rate = 1 - (self.available_res[: self.args.crisis_time] / self.args.res_capacity)
mean_max_occupancy_rate = np.mean(np.max(occupancy_rate, axis=0))
return mean_max_occupancy_rate
# 获得当前任务数
def get_running_job_num(self):
return len(self.running_jobs)
# 获得当前功率
def get_current_power(self):
power = 0
if self.state == Machine.RUN:
current_occupancy_rate = 1 - (self.available_res[0] / self.args.res_capacity)
power = current_occupancy_rate**1.5
power = np.array(self.args.res_power_weight) * power
power = np.sum(power) + self.args.base_power
return power
| 8,221 | 35.705357 | 100 |
py
|
MERL-LB
|
MERL-LB-main/envs/datacenter_env/job_generator.py
|
import numpy as np
class JobGenerator:
def __init__(self, args) -> None:
self.args = args
self.big_job_len_lower = args.max_job_len * 2 / 3
self.big_job_len_upper = args.max_job_len
self.small_job_len_lower = 1
self.small_job_len_upper = args.max_job_len / 5
self.dominant_res_lower = args.max_res_req / 2
self.dominant_res_upper = args.max_res_req
self.other_res_lower = 1
self.other_res_upper = args.max_res_req / 5
# 均匀分布
def uniform_dist(self):
timeline_job_data = []
for _ in range(self.args.job_seq_num):
actual_timeline_job_len = []
predict_timeline_job_len = []
timeline_job_res_req = []
for t in range(self.args.max_time):
job_num = np.random.randint(0, self.args.max_job_num)
actual_nj_len = []
predict_nj_len = []
nj_res_req = []
for _ in range(job_num):
predict_job_len = np.random.randint(1, self.args.max_job_len + 1, 1)[0]
actual_job_len = (
np.random.normal(predict_job_len, self.args.user_sigma, 1)
.clip(1, self.args.max_job_len)
.astype(int)[0]
)
res_req = np.random.randint(1, self.args.max_res_req + 1, self.args.res_num)
actual_nj_len.append(actual_job_len)
predict_nj_len.append(predict_job_len)
nj_res_req.append(res_req)
actual_timeline_job_len.append(np.array(actual_nj_len, dtype=np.int64))
predict_timeline_job_len.append(np.array(predict_nj_len, dtype=np.int64))
timeline_job_res_req.append(np.array(nj_res_req, dtype=np.int64))
timeline_job_data.append(
(actual_timeline_job_len, predict_timeline_job_len, timeline_job_res_req)
)
return timeline_job_data
# 连接数量变动
def level_uniform_dist(self):
level_len = len(self.args.level_job_num)
base = self.args.max_time / level_len
timeline_job_data = []
for _ in range(self.args.job_seq_num):
timeline_job_len = []
timeline_job_res_req = []
for t in range(self.args.max_time):
t_level = self.args.level_job_num[int(t // base)]
# job_num = np.random.randint(0, t_level)
job_num = min(np.random.poisson(t_level), self.args.max_job_num)
nj_len = np.random.randint(1, self.args.max_job_len + 1, job_num)
nj_res_req = np.random.randint(
1, self.args.max_res_req + 1, (job_num, self.args.res_num)
)
timeline_job_len.append(nj_len)
timeline_job_res_req.append(nj_res_req)
timeline_job_data.append((timeline_job_len, timeline_job_res_req))
return timeline_job_data
# small & large job distribution
def level_bi_model_dist(self):
level_len = len(self.args.level_job_num)
base = self.args.max_time / level_len
timeline_job_data = []
for _ in range(self.args.job_seq_num):
timeline_job_len = []
timeline_job_res_req = []
for t in range(self.args.max_time):
t_level = self.args.level_job_num[int(t // base)]
job_num = min(np.random.poisson(t_level), self.args.max_job_num)
nj_len = np.zeros(job_num, dtype=np.int32)
nj_big_index = np.random.random(job_num) > self.args.job_small_rate
big_nj_len = np.random.randint(
self.big_job_len_lower, self.big_job_len_upper + 1, job_num
)
small_nj_len = np.random.randint(
self.small_job_len_lower, self.small_job_len_upper + 1, job_num
)
nj_len[nj_big_index == 1] = big_nj_len[nj_big_index == 1]
nj_len[nj_big_index == 0] = small_nj_len[nj_big_index == 0]
nj_res_req = np.zeros((job_num, self.args.res_num), dtype=np.int32)
nj_dominant_rate = np.random.random((job_num, self.args.res_num))
max_index = np.max(nj_dominant_rate, axis=-1, keepdims=True)
nj_dominant_index = nj_dominant_rate == max_index
nj_dominant_res_req = np.random.randint(
self.dominant_res_lower,
self.dominant_res_lower + 1,
(job_num, self.args.res_num),
)
nj_other_res_req = np.random.randint(
self.other_res_lower,
self.other_res_upper + 1,
(job_num, self.args.res_num),
)
nj_res_req[nj_dominant_index == True] = nj_dominant_res_req[
nj_dominant_index == True
]
nj_res_req[nj_dominant_index == False] = nj_other_res_req[
nj_dominant_index == False
]
timeline_job_len.append(nj_len)
timeline_job_res_req.append(nj_res_req)
timeline_job_data.append((timeline_job_len, timeline_job_res_req))
return timeline_job_data
def get_new_job(self):
if self.args.job_generate == "uniform":
return self.uniform_dist()
if self.args.job_generate == "level_uniform":
return self.level_uniform_dist()
elif self.args.job_generate == "level_bi_model":
return self.level_bi_model_dist()
else:
return NotImplemented
| 5,729 | 41.444444 | 96 |
py
|
MERL-LB
|
MERL-LB-main/envs/datacenter_env/buffer.py
|
import queue
import numpy as np
class Buffer(queue.Queue):
def __init__(self, args) -> None:
super(Buffer, self).__init__(args.buffer_size)
self.args = args
self.occupation_rate_record = []
# 记录占用率
def record_rate(self):
self.occupation_rate_record.append(self.qsize() / self.args.buffer_size)
# 获得缓冲的观测值
def observe(self):
buffer_history = np.zeros(self.args.timeline_size)
# 第一位为当前的实时占用
buffer_history[0] = self.qsize() / self.args.buffer_size
r_size = len(self.occupation_rate_record)
if r_size > self.args.timeline_size - 1:
r_size = self.args.timeline_size - 1
buffer_history[1 : r_size + 1] = self.occupation_rate_record[-r_size:]
return buffer_history
| 787 | 26.172414 | 80 |
py
|
MERL-LB
|
MERL-LB-main/envs/datacenter_env/job.py
|
import numpy as np
class Job:
def __init__(self, args, job_id, res_req, job_len, job_predict_len, enter_time) -> None:
self.args = args
self.id = job_id
self.res_req = res_req
self.len = job_len
self.predict_len = job_predict_len
self.enter_time = enter_time
self.start_time = -1
self.finish_time = -1
self.predict_finish_time = -1
def observe(self):
job_state = np.zeros((self.args.timeline_size, self.args.res_num))
for r in range(self.args.res_num):
job_state[: self.len, r] = self.res_req[r] / self.args.res_capacity
return job_state
def res_req_rate(self):
return np.array(self.res_req, dtype=np.float32) / self.args.res_capacity
| 769 | 28.615385 | 92 |
py
|
MERL-LB
|
MERL-LB-main/envs/datacenter_env/env.py
|
import random
import numpy as np
from typing import List
from envs.datacenter_env.buffer import Buffer
from envs.datacenter_env.job import Job
from envs.datacenter_env.job_generator import JobGenerator
from envs.datacenter_env.machine import Machine
class DatacenterEnv:
def __init__(self, args) -> None:
self.args = args
self.seed = self.args.seed
self.set_seed()
self.job_generator = JobGenerator(args)
self.seq_index = 0 # which example sequence
self.timeline_job_data = None
# set random seed
def set_seed(self):
random.seed(self.seed)
np.random.seed(self.seed)
# reset datacenter environment
def reset(self):
self.pre_time = -1
self.curr_time = -1
self.job_id = 0
self.jobs_record = {}
self.machines_state_record = []
self.machines_occupancy_rate_record = []
self.machines_max_rate_record = []
self.machines_finish_time_record = []
self.machines_job_num_record = []
self.machines_power_record = []
self.machines_var_record = []
self.action_record = []
self.early_stop = False
self.curr_allocated = False
self.buffer_full = False
self.job_num = None
self.average_workload = None
# done
self.done = False
# reest mode
# 循环使用一定数量的数据
if self.args.reset_type == "cycle":
if self.timeline_job_data is None:
self.generate_sequence_jobs()
else:
self.seq_index = (self.seq_index + 1) % self.args.job_seq_num
# 每次生成新的一个数据
elif self.args.reset_type == "new":
self.generate_sequence_jobs()
self.seq_index = 0
# 重复使用一个数据
elif self.args.reset_type == "repeat":
if self.timeline_job_data is None:
self.generate_sequence_jobs()
self.seq_index = 0
elif self.args.reset_type == "index":
if self.timeline_job_data is None:
self.generate_sequence_jobs()
# set current jobs
self.set_curr_timeline_jobs()
# set block list
self.block_jobs: List[Job] = []
# set machines
self.machine_list: List[Machine] = [
Machine(self.args, machine_id=machine_id) for machine_id in range(self.args.machine_num)
]
# weekup all container
for machine in self.machine_list:
machine.wakeup()
# set allocate buffer
self.buffer = Buffer(self.args)
# 缓冲为空且未终止时候 时间走到下一步
# put curr time job to buffer
while self.buffer.empty() and not self.done:
self.time_proceed_step()
# curr_job
self.curr_allocate_jobs: List[Job] = [None] * self.args.job_allocate_num
for index in range(self.args.job_allocate_num):
if not self.buffer.empty():
self.curr_allocate_jobs[index] = self.buffer.get()
obs = self.observe()
return obs
# 生成链接
def generate_sequence_jobs(self, max_time=None):
# generate new jobs
if max_time is not None:
self.args.max_time = max_time
self.timeline_job_data = self.job_generator.get_new_job()
# 设置当前的任务队列
def set_curr_timeline_jobs(self):
(
self.curr_actual_timeline_job_len,
self.curr_predict_timeline_job_len,
self.curr_timeline_job_res_req,
) = self.timeline_job_data[self.seq_index]
self.calculate_job_num()
self.calculate_average_workload()
# 计算当前任务数量
def calculate_job_num(self):
job_num = 0
for time_len in self.curr_actual_timeline_job_len:
job_num += len(time_len)
self.job_num = job_num
print("job num: ", job_num)
# 计算当前的平均负载
def calculate_average_workload(self):
t_all_res_req = np.zeros(self.args.res_num)
time_len = len(self.curr_actual_timeline_job_len)
for t in range(time_len):
t_job_len = self.curr_actual_timeline_job_len[t] # n*1
if t_job_len.shape[0] == 0:
continue
t_job_res_req = self.curr_timeline_job_res_req[t] # n*res_num
t_res_req = t_job_len.reshape((-1, 1)) * t_job_res_req
t_all_res_req += np.sum(t_res_req, axis=0)
average_workload = (
t_all_res_req
/ float(self.args.res_capacity)
/ float(self.args.machine_num)
/ float(time_len)
)
self.average_workload = average_workload
print("average work load: ", average_workload)
# 上轮阻塞队列入队
def put_pre_block_job_to_buffer(self):
for job in self.block_jobs:
if not self.buffer.full():
self.buffer.put(job)
else:
print("buffer full !")
self.buffer_full = True
self.done = True
self.block_jobs = []
# 当前时刻任务入队
def put_curr_time_job_to_buffer(self):
# 当前时间到来的任务入队
curr_actual_time_jobs_len = self.curr_actual_timeline_job_len[self.curr_time]
curr_predict_time_jobs_len = self.curr_predict_timeline_job_len[self.curr_time]
curr_time_jobs_res_req = self.curr_timeline_job_res_req[self.curr_time]
for job_index in range(len(curr_actual_time_jobs_len)):
job_actual_len = curr_actual_time_jobs_len[job_index]
job_predict_len = curr_predict_time_jobs_len[job_index]
job_res_req = curr_time_jobs_res_req[job_index]
new_job = Job(
args=self.args,
job_id=self.job_id,
res_req=job_res_req,
job_len=job_actual_len,
job_predict_len=job_predict_len,
enter_time=self.curr_time,
)
if not self.buffer.full():
self.buffer.put(new_job)
self.job_id += 1
# 记录进入系统内的任务
self.jobs_record[self.job_id] = new_job
else:
print("buffer full !")
self.buffer_full = True
self.done = True
break
# 记录缓冲占用状态
self.buffer.record_rate()
# 获得所有运行完成的任务
def get_finished_job(self):
finished_jobs: List[Job] = []
for vm in self.machine_list:
finished_jobs.extend(vm.finished_job)
return finished_jobs
def get_max_finish_time_by_occupy(self, occupy):
occupy = np.array(occupy)
occupy = np.sum(occupy, axis=1)
state = occupy > 0
max_finish_time = np.sum(state == True)
return max_finish_time
# 获取放置mask
def get_machines_allocate_mask(self, job):
machines_allocate_mask = [] # 0表示不放置
for machine in self.machine_list:
machines_allocate_mask.append(machine.check_allocate_feasible(job))
machines_allocate_mask = np.array(machines_allocate_mask, dtype=np.bool8)
return machines_allocate_mask
# 观察当前系统状态
def observe(self):
if self.args.observe_type == 1:
job = self.curr_allocate_jobs[0]
if job == None:
return None
job_res_req_rate = job.res_req_rate()
job_run_time = job.len
machines_occupancy_rate = []
machines_run_time = []
action_mask = []
for machine in self.machine_list:
machines_occupancy_rate.append(machine.get_curr_occupancy_rate())
machines_run_time.append(machine.get_max_finish_time())
action_mask.append(machine.check_allocate_feasible(job))
return (
np.array(job_res_req_rate, dtype=np.float32),
np.array(job_run_time, dtype=np.int32),
np.array(machines_occupancy_rate, dtype=np.float32),
np.array(machines_run_time, dtype=np.int32),
np.array(action_mask, dtype=np.bool8),
)
elif self.args.observe_type == 2:
job = self.curr_allocate_jobs[0]
if job == None:
return None
job_res_req_rate = job.res_req_rate()
job_run_time = job.len / self.args.max_job_len # 归一化
machines_sample_occupancy_rate = []
machines_run_time = []
action_mask = []
for machine in self.machine_list:
# 采样5个点
# t*4
machine_all_occupancy_rate = machine.get_all_occupancy_rate()
index = np.linspace(0, len(machine_all_occupancy_rate) - 1, 5, dtype=np.int32)
machine_sample_occupancy_rate = machine_all_occupancy_rate[index, :]
machines_sample_occupancy_rate.append(machine_sample_occupancy_rate)
# 记录时长
machines_run_time.append(
machine.get_max_finish_time() / self.args.max_job_len
) # 归一化
action_mask.append(machine.check_allocate_feasible(job))
return (
np.array(job_res_req_rate, dtype=np.float32),
np.array(job_run_time, dtype=np.float32),
np.array(machines_sample_occupancy_rate, dtype=np.float32), # 10*5*4
np.array(machines_run_time, dtype=np.float32),
np.array(action_mask, dtype=np.bool8),
)
elif self.args.observe_type == 3:
# job state
jobs_state = []
for job in self.curr_allocate_jobs:
if job == None:
jobs_state.append(np.zeros((self.args.timeline_size, self.args.res_num)))
else:
# [time_horizon, num_res]
jobs_state.append(job.observe())
# machines state
machines_state = []
action_mask = []
for machine in self.machine_list:
# [time_horizon, num_res]
machines_state.append(machine.observe())
# get allocate mask
action_mask.append(machine.check_allocate_feasible(self.curr_allocate_jobs[0]))
# buffer state
buffer_state = self.buffer.observe()
return (
np.array(jobs_state, dtype=np.float32),
np.array(machines_state, dtype=np.float32),
np.array(action_mask, dtype=np.bool8),
np.array(buffer_state, dtype=np.float32),
)
elif self.args.observe_type == 4:
job = self.curr_allocate_jobs[0]
if job == None:
return None
job_res_req_rate = job.res_req_rate()
job_run_time = job.len / self.args.max_job_len # 归一化
# todo actual or not actual
machines_all_occupancy_rate = []
machines_run_time = []
jobs_num = []
action_mask = []
for machine in self.machine_list:
# 采样10个点
# t*4
# machine_all_occupancy_rate = machine.get_all_occupancy_rate()
if self.args.actual:
machine_all_occupancy_rate = machine.get_all_actual_occupancy_rate()
else:
machine_all_occupancy_rate = machine.get_all_predict_occupancy_rate()
# print(np.sum(machine_all_occupancy_rate - _machine_all_occupancy_rate))
# print((machine_all_occupancy_rate == _machine_all_occupancy_rate).all())
machines_all_occupancy_rate.append(machine_all_occupancy_rate)
# 记录时长
# machines_run_time.append(
# machine.get_max_finish_time() / self.args.max_job_len
# ) # 归一化
machines_run_time.append(
self.get_max_finish_time_by_occupy(machine_all_occupancy_rate)
/ self.args.max_job_len
) # 归一化
jobs_num.append(machine.get_running_job_num())
action_mask.append(machine.check_allocate_feasible(job))
machines_all_occupancy_rate = np.array(machines_all_occupancy_rate)
index = np.linspace(0, len(machines_all_occupancy_rate[0]) - 1, 10, dtype=np.int32)
machines_all_occupancy_rate = machines_all_occupancy_rate[:, index, :]
return (
np.array(job_res_req_rate, dtype=np.float32),
np.array(job_run_time, dtype=np.float32),
np.array(machines_all_occupancy_rate, dtype=np.float32), # 10*10*4
np.array(machines_run_time, dtype=np.float32),
np.array(jobs_num, dtype=np.float32),
np.array(action_mask, dtype=np.bool8),
)
else:
NotImplementedError()
# 获得当前reward
def get_reward(self):
reward = 0
if self.args.reward_type == "machine_run_num":
run_num = 0
for machine in self.machine_list:
if machine.state == Machine.RUN:
run_num += 1
reward = -run_num
elif self.args.reward_type == "res_std":
occupancy_rate = []
for machine in self.machine_list:
if machine.state == Machine.RUN:
occupancy_rate.append(machine.get_curr_occupancy_rate())
occupancy_rate = np.array(occupancy_rate)
occupancy_std = np.std(occupancy_rate, axis=0)
std = np.sum(occupancy_std * np.array(self.args.res_std_weight))
reward = -std
elif self.args.reward_type == "res_var":
occupancy_rate = []
for machine in self.machine_list:
if machine.state == Machine.RUN:
occupancy_rate.append(machine.get_curr_occupancy_rate())
occupancy_rate = np.array(occupancy_rate)
occupancy_var = np.var(occupancy_rate, axis=0)
var = np.sum(occupancy_var * np.array(self.args.res_var_weight))
reward = 1 / (var + 5e-1)
if self.curr_allocated:
reward += 0.5
else:
reward -= 0.5
elif self.args.reward_type == "curr_res_rate":
occupancy_rate = []
for machine in self.machine_list:
if machine.state == Machine.RUN:
occupancy_rate.append(machine.get_curr_occupancy_rate())
occupancy_rate = 1 - np.array(occupancy_rate)
reward = -np.sum(occupancy_rate)
elif self.args.reward_type == "all_res_rate":
occupancy_rate = []
for machine in self.machine_list:
if machine.state == Machine.RUN:
occupancy_rate.append(machine.get_all_occupancy_rate())
occupancy_rate = 1 - np.array(occupancy_rate)
reward = -np.sum(occupancy_rate)
elif self.args.reward_type == "job_slowdown":
for machine in self.machine_list:
for job in machine.running_jobs:
reward -= 1 / float(job.len)
for job in self.block_jobs:
reward -= 1 / float(job.len)
elif self.args.reward_type == "run_time_and_var":
run_num = 0
for machine in self.machine_list:
if machine.state == Machine.RUN:
run_num += 1
reward = -run_num
occupancy_rate = []
for machine in self.machine_list:
if machine.state == Machine.RUN:
occupancy_rate.append(machine.get_curr_occupancy_rate())
occupancy_rate = np.array(occupancy_rate)
occupancy_std = np.std(occupancy_rate, axis=0)
std = np.sum(occupancy_std * np.array(self.args.res_std_weight))
reward = -std
# 如何合理遍历队列?
buffer_job_num = self.buffer.qsize()
for _ in range(buffer_job_num):
job = self.buffer.get()
reward -= 1 / float(job.len)
self.buffer.put(job)
elif self.args.reward_type == "machine_finish_time":
finish_time = []
for machine in self.machine_list:
if machine.state == Machine.RUN:
finish_time.append(machine.get_max_finish_time())
reward = -np.sum(finish_time)
elif self.args.reward_type == "utilization_and_std":
machine_future_utilization = []
for machine in self.machine_list:
machine_future_utilization.append(machine.get_all_occupancy_rate())
# m*t*res_num
machine_future_utilization = np.array(machine_future_utilization)[:, :, :]
# t*res_num
use_rate = np.mean(machine_future_utilization, axis=0)
# res_num
use_rate = np.mean(use_rate, axis=0)
# t*res_num
use_std = np.std(machine_future_utilization, axis=0)
# res_num
use_std = np.mean(use_std, axis=0)
# 占用率越大越好,方差越小越好
# if self.curr_allocated:
# reward = np.sum(use_rate - use_std)
# else:
# reward = -np.sum(use_std)
reward = -np.mean(use_std)
elif self.args.reward_type == "runtime_and_std":
runtime_list = []
machines_occupancy_rate = []
for machine in self.machine_list:
runtime_list.append(machine.get_max_finish_time())
machines_occupancy_rate.append(machine.get_curr_occupancy_rate())
machines_occupancy_rate = np.array(machines_occupancy_rate)
machines_occupancy_std = np.std(machines_occupancy_rate, axis=0)
mean_std = np.mean(machines_occupancy_std)
runtime = np.mean(runtime_list)
# runtime std 都是越小越好 负号变为越大越好 表示累积reward越大越好
reward = -np.array([runtime, mean_std * self.args.res_capacity])
elif self.args.reward_type == "zero":
reward = 0
else:
return NotImplemented
# 如果缓冲满了导致done或者早停增加一个很大的负奖励信号
# if self.buffer_full or self.early_stop:
# self.done = True
# reward -= 10000
# reward 缩放
reward = reward * self.args.reward_scale
return reward
# 系统整体时间向前一步
def time_proceed_step(self):
# 更新当前时间
self.curr_time += 1
# 执行任务,并更新任务状态
for machine in self.machine_list:
machine.time_proceed(self.curr_time)
# 阻塞任务入队
self.put_pre_block_job_to_buffer()
if self.curr_time < self.args.max_time:
# 当前时间任务入队
self.put_curr_time_job_to_buffer()
elif self.args.end_mode == "max_time":
self.done = True
elif self.args.end_mode == "all_allocate" and self.buffer.empty():
self.done = True
elif self.curr_time > self.args.max_end_time:
print(f"Early Stop ! {self.args.end_mode} {self.buffer.empty()}")
self.early_stop = True
self.done = True
# 记录当前machines状态
self.record()
def record(self):
machines_state = []
machines_max_rate = []
machines_finish_time = []
machines_power = []
machines_occupancy_rate = []
machines_job_num = []
for machine in self.machine_list:
machines_state.append(machine.state)
machines_max_rate.append(machine.get_max_occupancy_rate())
machines_finish_time.append(machine.get_max_finish_time())
machines_power.append(machine.get_current_power())
machines_occupancy_rate.append(machine.get_curr_occupancy_rate())
machines_job_num.append(len(machine.running_jobs))
self.machines_job_num_record.append(machines_job_num)
self.machines_state_record.append(np.array(machines_state))
self.machines_max_rate_record.append(np.array(machines_max_rate))
self.machines_finish_time_record.append(np.array(machines_finish_time))
self.machines_power_record.append(np.array(machines_power))
machines_occupancy_rate = np.array(machines_occupancy_rate)
self.machines_occupancy_rate_record.append(machines_occupancy_rate)
# machines_state = np.array(machines_state)
# machines_occupancy_rate = machines_occupancy_rate[machines_state == 1]
# 全0 方差也很小
machines_var = np.sum(
np.var(machines_occupancy_rate, axis=0) * np.array(self.args.res_var_weight)
)
self.machines_var_record.append(machines_var)
# def step_action(self, action):
# reward = 0
# for job_index, curr_job in enumerate(self.curr_allocate_jobs):
# if curr_job is not None:
def step(self, action):
self.action_record.append(action)
reward = 0
info = None
# if self.curr_time == 1250:
# print("debug")
curr_job = self.curr_allocate_jobs[0]
self.curr_allocate_jobs[0] = None
# not allocate job
if action == self.args.machine_num:
self.block_jobs.append(curr_job)
self.curr_allocated = False
# allocate job
else:
self.curr_allocated = self.machine_list[action].allocation_job(
curr_job,
self.curr_time,
)
if not self.curr_allocated:
self.block_jobs.append(curr_job)
# 计算reward
reward = self.get_reward()
# 缓冲为空且未终止时候 时间走到下一步
while self.buffer.empty() and not self.done:
self.time_proceed_step()
# 缓冲不为空而且插槽为空时候任务出队
for jop_index, job in enumerate(self.curr_allocate_jobs):
if job is None and not self.buffer.empty():
self.curr_allocate_jobs[jop_index] = self.buffer.get()
# 观察新状态
obs = self.observe()
# 获取记录
info = self.jobs_record
done = self.done
return obs, reward, done, info
# def step_probs(self, jobs_action_prob, greedy=False):
# reward = 0
# info = None
# choose_actions = []
# for job_index, curr_job in enumerate(self.curr_allocate_jobs):
# if curr_job is not None:
# machines_allocate_mask = self.get_machines_allocate_mask(curr_job)
# job_action_prob = jobs_action_prob[job_index]
# if np.all(machines_allocate_mask == False):
# print("ok")
# pass
# job_action_prob[machines_allocate_mask == False] = 0
# if greedy:
# action = np.argmax(job_action_prob)
# else:
# # TODO
# # 按照概率选择动作
# job_action_prob = job_action_prob / np.sum(job_action_prob)
# action = np.random.choice(
# np.arange(len(job_action_prob)), p=job_action_prob
# )
# choose_actions.append(action)
# allocated = self.machine_list[action].allocation_job(
# curr_job, self.curr_time
# )
# # TODO 实验无法放置的情况
# # assert allocated == True, "本实验不应该出现无法放置的情况"
# if not allocated:
# # 分配失败 进入阻塞队列
# assert allocated == True, "本实验不应该出现无法放置的情况"
# # self.block_jobs.append(curr_job)
# # 重置插槽
# self.curr_allocate_jobs[job_index] = None
# else:
# choose_actions.append(-1)
# # 计算reward
# reward = self.get_reward()
# # 缓冲为空且未终止时候 时间走到下一步
# while self.buffer.empty() and not self.done:
# self.time_proceed_step()
# # 缓冲不为空而且插槽为空时候任务出队
# for jop_index, job in enumerate(self.curr_allocate_jobs):
# if job is None and not self.buffer.empty():
# self.curr_allocate_jobs[jop_index] = self.buffer.get()
# # 观察新状态
# ob = self.observe()
# # 获取记录
# info = self.jobs_record
# done = self.done
# return choose_actions, ob, reward, done, info
# 执行完所有任务
def run_to_end(self):
all_done = False
while not all_done and not self.buffer.full():
running_job_num = 0
# running_machine_num = 0
for machine in self.machine_list:
if machine.state == Machine.RUN:
# running_machine_num += 1
running_job_num += len(machine.running_jobs)
if (
self.buffer.empty()
and running_job_num == 0
# and running_machine_num == 1
):
all_done = True
# 继续执行
# print(running_job_num)
self.time_proceed_step()
def get_matrix(self, eval_type):
if eval_type == "compute_time":
data = np.array(self.machines_max_rate_record)
data = data > 0
return np.sum(data)
| 25,251 | 35.703488 | 100 |
py
|
MERL-LB
|
MERL-LB-main/config/test.py
|
import argparse
def parse_args():
parser = argparse.ArgumentParser()
# environment
parser.add_argument("--seed", default=10, type=int)
parser.add_argument(
"--reset_type",
default="index",
choices=["new", "repeat", "cycle", "index"],
type=str,
)
parser.add_argument("--job_seq_num", default=50, type=int)
parser.add_argument("--actual", default=True, type=bool)
parser.add_argument("--user_sigma", default=10 * 60 // 3 // 3, type=int)
parser.add_argument("--max_time", default=10 * 60, type=int)
parser.add_argument("--max_end_time", default=60 * 60, type=int)
parser.add_argument("--max_job_num", default=5, type=int)
# parser.add_argument("--max_res_req", default=15, type=int)
# parser.add_argument("--max_job_len", default=5 * 60, type=int)
parser.add_argument("--max_res_req", default=10, type=int)
parser.add_argument("--max_job_len", default=10 * 60, type=int)
parser.add_argument("--job_allocate_num", default=1, type=int)
# parser.add_argument("--job_small_rate", default=0.6, type=float)
# parser.add_argument(
# "--level_job_num",
# # default=[5, 5, 20, 20, 10, 10, 20, 20, 5, 5],
# # default=[10, 10, 8, 4, 2, 10, 10, 8, 4, 2],
# default=[2, 4, 8, 8, 6, 4, 6, 8, 8, 4, 2],
# type=list,
# )
# parser.add_argument(
# "--level_job_long_rate",
# default=[0.9, 0.6, 0.5, 0.6, 0.7, 0.5, 0.6, 0.7, 0.8, 0.8, 0.9],
# type=list,
# )
parser.add_argument("--sleep_delay", default=3, type=int)
parser.add_argument("--pre_allocate_time", default=1, type=int)
parser.add_argument("--crisis_rate", default=0.8, type=float)
parser.add_argument("--max_crisis_rate", default=0.8, type=float)
parser.add_argument("--crisis_time", default=5, type=int)
parser.add_argument("--res_num", default=4, type=int)
parser.add_argument("--base_power", default=0.5, type=float)
parser.add_argument("--res_capacity", default=500, type=int)
parser.add_argument("--machine_num", default=10, type=int)
parser.add_argument("--max_expand_num", default=5, type=int)
parser.add_argument("--res_power_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_std_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_var_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--buffer_size", default=2000, type=int)
parser.add_argument("--timeline_size", default=10 * 60, type=int)
parser.add_argument("--job_color_num", default=40, type=int)
parser.add_argument(
"--job_generate",
default="uniform",
choices=[
"uniform",
"level_uniform",
"level_bi_model",
],
type=str,
)
parser.add_argument(
"--obs_represent",
default="timeline",
choices=["image", "timeline"],
type=str,
)
parser.add_argument(
"--end_mode",
default="all_allocate",
choices=[
"all_allocate",
"all_done",
"max_time",
],
type=str,
)
parser.add_argument("--reward_scale", default=1, type=int)
parser.add_argument(
"--reward_type",
default="zero",
choices=[
"machine_run_num",
"machine_power",
"job_slowdown",
"curr_res_rate",
"res_std",
"res_var",
"run_time_and_var",
"utilization_and_std",
"zero",
],
type=str,
)
parser.add_argument(
"--save_path",
default="output/test",
type=str,
)
parser.add_argument(
"--method",
default="wsga",
type=str,
)
parser.add_argument(
"--tag",
default="run01",
type=str,
)
parser.add_argument("--observe_type", default=4, type=int)
args, unknown = parser.parse_known_args()
return args
| 4,049 | 33.615385 | 90 |
py
|
MERL-LB
|
MERL-LB-main/config/deepjs.py
|
import argparse
def parse_args():
parser = argparse.ArgumentParser()
# environment
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--reset_type",
default="index",
choices=["new", "repeat", "cycle", "index"],
type=str,
)
parser.add_argument("--job_seq_num", default=10, type=int)
parser.add_argument("--actual", default=True, type=bool)
parser.add_argument("--user_sigma", default=10 * 60 // 6 // 3, type=int)
parser.add_argument("--max_time", default=10 * 60, type=int)
parser.add_argument("--max_end_time", default=60 * 60, type=int)
parser.add_argument("--max_job_num", default=5, type=int)
parser.add_argument("--max_res_req", default=10, type=int)
parser.add_argument("--max_job_len", default=10 * 60, type=int)
parser.add_argument("--job_allocate_num", default=1, type=int)
parser.add_argument("--sleep_delay", default=3, type=int)
parser.add_argument("--pre_allocate_time", default=1, type=int)
parser.add_argument("--crisis_rate", default=0.8, type=float)
parser.add_argument("--max_crisis_rate", default=0.8, type=float)
parser.add_argument("--crisis_time", default=5, type=int)
parser.add_argument("--res_num", default=4, type=int)
parser.add_argument("--base_power", default=0.5, type=float)
parser.add_argument("--res_capacity", default=500, type=int)
parser.add_argument("--machine_num", default=10, type=int)
parser.add_argument("--max_expand_num", default=5, type=int)
parser.add_argument("--res_power_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_std_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_var_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--buffer_size", default=2000, type=int)
parser.add_argument("--timeline_size", default=10 * 60, type=int)
parser.add_argument("--job_color_num", default=40, type=int)
parser.add_argument(
"--job_generate",
default="uniform",
choices=[
"uniform",
"level_uniform",
"level_bi_model",
],
type=str,
)
parser.add_argument(
"--obs_represent",
default="timeline",
choices=["image", "timeline"],
type=str,
)
parser.add_argument(
"--end_mode",
default="all_allocate",
choices=[
"all_allocate",
"all_done",
"max_time",
],
type=str,
)
parser.add_argument("--reward_scale", default=1, type=int)
parser.add_argument(
"--reward_type",
default="runtime_and_std",
choices=[
"machine_run_num",
"machine_power",
"job_slowdown",
"curr_res_rate",
"res_std",
"res_var",
"run_time_and_var",
"utilization_and_std",
"zero",
"runtime_and_std",
],
type=str,
)
# input drive
parser.add_argument(
"--save_path",
default="output/train",
type=str,
)
parser.add_argument(
"--method",
default="nsga",
type=str,
)
parser.add_argument(
"--tag",
default="run03",
type=str,
)
parser.add_argument("--gamma", default=0.99, type=float)
parser.add_argument("--experience_num", default=10, type=int)
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--epoch", default=100000, type=float)
parser.add_argument("--observe_type", default=4, type=int)
args, unknown = parser.parse_known_args()
return args
| 3,736 | 33.284404 | 90 |
py
|
MERL-LB
|
MERL-LB-main/config/ppo.py
|
import argparse
def parse_args():
parser = argparse.ArgumentParser()
# environment
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--reset_type",
default="index",
choices=["new", "repeat", "cycle", "index"],
type=str,
)
parser.add_argument("--job_seq_num", default=10, type=int)
parser.add_argument("--actual", default=True, type=bool)
parser.add_argument("--user_sigma", default=10 * 60 // 6 // 3, type=int)
parser.add_argument("--max_time", default=10 * 60, type=int)
parser.add_argument("--max_end_time", default=60 * 60, type=int)
parser.add_argument("--max_job_num", default=5, type=int)
parser.add_argument("--max_res_req", default=10, type=int)
parser.add_argument("--max_job_len", default=10 * 60, type=int)
parser.add_argument("--job_allocate_num", default=1, type=int)
# parser.add_argument("--job_small_rate", default=0.6, type=float)
# parser.add_argument(
# "--level_job_num",
# # default=[5, 5, 20, 20, 10, 10, 20, 20, 5, 5],
# # default=[10, 10, 8, 4, 2, 10, 10, 8, 4, 2],
# default=[2, 4, 8, 8, 6, 4, 6, 8, 8, 4, 2],
# type=list,
# )
# parser.add_argument(
# "--level_job_long_rate",
# default=[0.9, 0.6, 0.5, 0.6, 0.7, 0.5, 0.6, 0.7, 0.8, 0.8, 0.9],
# type=list,
# )
parser.add_argument("--sleep_delay", default=3, type=int)
parser.add_argument("--pre_allocate_time", default=1, type=int)
parser.add_argument("--crisis_rate", default=0.8, type=float)
parser.add_argument("--max_crisis_rate", default=0.8, type=float)
parser.add_argument("--crisis_time", default=5, type=int)
parser.add_argument("--res_num", default=4, type=int)
parser.add_argument("--base_power", default=0.5, type=float)
parser.add_argument("--res_capacity", default=500, type=int)
parser.add_argument("--machine_num", default=10, type=int)
parser.add_argument("--max_expand_num", default=5, type=int)
parser.add_argument("--res_power_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_std_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_var_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--buffer_size", default=2000, type=int)
parser.add_argument("--timeline_size", default=10 * 60, type=int)
parser.add_argument("--job_color_num", default=40, type=int)
parser.add_argument(
"--job_generate",
default="uniform",
choices=[
"uniform",
"level_uniform",
"level_bi_model",
],
type=str,
)
parser.add_argument(
"--obs_represent",
default="timeline",
choices=["image", "timeline"],
type=str,
)
parser.add_argument(
"--end_mode",
default="all_allocate",
choices=[
"all_allocate",
"all_done",
"max_time",
],
type=str,
)
parser.add_argument("--reward_scale", default=1, type=int)
parser.add_argument(
"--reward_type",
default="runtime_and_std",
choices=[
"machine_run_num",
"machine_power",
"job_slowdown",
"curr_res_rate",
"res_std",
"res_var",
"run_time_and_var",
"utilization_and_std",
"zero",
],
type=str,
)
parser.add_argument(
"--save_path",
default="output/train",
type=str,
)
parser.add_argument(
"--method",
default="ppo",
type=str,
)
parser.add_argument(
"--tag",
default="run01",
type=str,
)
# ppo agent
parser.add_argument("--observe_type", default=4, type=int)
parser.add_argument("--num_episodes", default=200000, type=int)
parser.add_argument("--ppo_epochs", default=5, type=int)
parser.add_argument("--ppo_update_timestep", default=256, type=int)
parser.add_argument("--ppo_gamma", default=0.99, type=float)
parser.add_argument("--ppo_actor_lr", default=0.001, type=float)
parser.add_argument("--ppo_critic_lr", default=0.001, type=float)
parser.add_argument("--ppo_eps_clip", default=0.2, type=float)
args, unknown = parser.parse_known_args()
return args
| 4,417 | 33.787402 | 90 |
py
|
MERL-LB
|
MERL-LB-main/config/dqn.py
|
import argparse
def parse_args():
parser = argparse.ArgumentParser()
# environment
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--reset_type",
default="index",
choices=["new", "repeat", "cycle", "index"],
type=str,
)
parser.add_argument("--job_seq_num", default=10, type=int)
parser.add_argument("--actual", default=True, type=bool)
parser.add_argument("--user_sigma", default=10 * 60 // 6 // 3, type=int)
parser.add_argument("--max_time", default=10 * 60, type=int)
parser.add_argument("--max_end_time", default=60 * 60, type=int)
parser.add_argument("--max_job_num", default=5, type=int)
parser.add_argument("--max_res_req", default=10, type=int)
parser.add_argument("--max_job_len", default=10 * 60, type=int)
parser.add_argument("--job_allocate_num", default=1, type=int)
# parser.add_argument("--job_small_rate", default=0.6, type=float)
# parser.add_argument(
# "--level_job_num",
# # default=[5, 5, 20, 20, 10, 10, 20, 20, 5, 5],
# # default=[10, 10, 8, 4, 2, 10, 10, 8, 4, 2],
# default=[2, 4, 8, 8, 6, 4, 6, 8, 8, 4, 2],
# type=list,
# )
# parser.add_argument(
# "--level_job_long_rate",
# default=[0.9, 0.6, 0.5, 0.6, 0.7, 0.5, 0.6, 0.7, 0.8, 0.8, 0.9],
# type=list,
# )
parser.add_argument("--sleep_delay", default=3, type=int)
parser.add_argument("--pre_allocate_time", default=1, type=int)
parser.add_argument("--crisis_rate", default=0.8, type=float)
parser.add_argument("--max_crisis_rate", default=0.8, type=float)
parser.add_argument("--crisis_time", default=5, type=int)
parser.add_argument("--res_num", default=4, type=int)
parser.add_argument("--base_power", default=0.5, type=float)
parser.add_argument("--res_capacity", default=500, type=int)
parser.add_argument("--machine_num", default=10, type=int)
parser.add_argument("--max_expand_num", default=5, type=int)
parser.add_argument("--res_power_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_std_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_var_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--buffer_size", default=2000, type=int)
parser.add_argument("--timeline_size", default=10 * 60, type=int)
parser.add_argument("--job_color_num", default=40, type=int)
parser.add_argument(
"--job_generate",
default="uniform",
choices=[
"uniform",
"level_uniform",
"level_bi_model",
],
type=str,
)
parser.add_argument(
"--obs_represent",
default="timeline",
choices=["image", "timeline"],
type=str,
)
parser.add_argument(
"--end_mode",
default="all_allocate",
choices=[
"all_allocate",
"all_done",
"max_time",
],
type=str,
)
parser.add_argument("--reward_scale", default=1, type=int)
parser.add_argument(
"--reward_type",
default="runtime_and_std",
choices=[
"machine_run_num",
"machine_power",
"job_slowdown",
"curr_res_rate",
"res_std",
"res_var",
"run_time_and_var",
"utilization_and_std",
"zero",
],
type=str,
)
parser.add_argument(
"--save_path",
default="output/train",
type=str,
)
parser.add_argument(
"--method",
default="dqn",
type=str,
)
parser.add_argument(
"--tag",
default="run01",
type=str,
)
# DQN
parser.add_argument("--observe_type", default=4, type=int)
parser.add_argument("--batch_size", default=256, type=int)
parser.add_argument("--gamma", default=0.99, type=float)
parser.add_argument("--eps_start", default=0.6, type=float)
parser.add_argument("--eps_end", default=0.1, type=float)
parser.add_argument("--target_update", default=64, type=int)
parser.add_argument("--num_episodes", default=200000, type=int)
args, unknown = parser.parse_known_args()
return args
| 4,322 | 33.309524 | 90 |
py
|
MERL-LB
|
MERL-LB-main/config/ga.py
|
import argparse
def parse_args():
parser = argparse.ArgumentParser()
# environment
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--reset_type",
default="index",
choices=["new", "repeat", "cycle", "index"],
type=str,
)
parser.add_argument("--job_seq_num", default=10, type=int)
parser.add_argument("--actual", default=True, type=bool)
parser.add_argument("--user_sigma", default=10 * 60 // 6 // 3, type=int)
parser.add_argument("--max_time", default=10 * 60, type=int)
parser.add_argument("--max_end_time", default=60 * 60, type=int)
parser.add_argument("--max_job_num", default=5, type=int)
parser.add_argument("--max_res_req", default=10, type=int)
parser.add_argument("--max_job_len", default=10 * 60, type=int)
parser.add_argument("--job_allocate_num", default=1, type=int)
# parser.add_argument("--job_small_rate", default=0.6, type=float)
# parser.add_argument(
# "--level_job_num",
# # default=[5, 5, 20, 20, 10, 10, 20, 20, 5, 5],
# # default=[10, 10, 8, 4, 2, 10, 10, 8, 4, 2],
# default=[2, 4, 8, 8, 6, 4, 6, 8, 8, 4, 2],
# type=list,
# )
# parser.add_argument(
# "--level_job_long_rate",
# default=[0.9, 0.6, 0.5, 0.6, 0.7, 0.5, 0.6, 0.7, 0.8, 0.8, 0.9],
# type=list,
# )
parser.add_argument("--sleep_delay", default=3, type=int)
parser.add_argument("--pre_allocate_time", default=1, type=int)
parser.add_argument("--crisis_rate", default=0.8, type=float)
parser.add_argument("--max_crisis_rate", default=0.8, type=float)
parser.add_argument("--crisis_time", default=5, type=int)
parser.add_argument("--res_num", default=4, type=int)
parser.add_argument("--base_power", default=0.5, type=float)
parser.add_argument("--res_capacity", default=500, type=int)
parser.add_argument("--machine_num", default=10, type=int)
parser.add_argument("--max_expand_num", default=5, type=int)
parser.add_argument("--res_power_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_std_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_var_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--buffer_size", default=2000, type=int)
parser.add_argument("--timeline_size", default=10 * 60, type=int)
parser.add_argument("--job_color_num", default=40, type=int)
parser.add_argument(
"--job_generate",
default="uniform",
choices=[
"uniform",
"level_uniform",
"level_bi_model",
],
type=str,
)
parser.add_argument(
"--obs_represent",
default="timeline",
choices=["image", "timeline"],
type=str,
)
parser.add_argument(
"--end_mode",
default="all_allocate",
choices=[
"all_allocate",
"all_done",
"max_time",
],
type=str,
)
parser.add_argument("--reward_scale", default=1, type=int)
parser.add_argument(
"--reward_type",
default="zero",
choices=[
"machine_run_num",
"machine_power",
"job_slowdown",
"curr_res_rate",
"res_std",
"res_var",
"run_time_and_var",
"utilization_and_std",
"zero",
],
type=str,
)
parser.add_argument(
"--save_path",
default="output/train",
type=str,
)
parser.add_argument(
"--method",
default="nsga",
type=str,
)
parser.add_argument(
"--tag",
default="run03",
type=str,
)
parser.add_argument("--observe_type", default=4, type=int)
# genetic
parser.add_argument("--ga_parent_size", default=25, type=int)
parser.add_argument("--ga_children_size", default=25, type=int)
parser.add_argument("--ga_mutate_rate", default=0.25, type=float)
parser.add_argument("--ga_mutate_scale", default=0.05, type=float)
parser.add_argument("--ga_choice", default="generate", type=str)
parser.add_argument("--ga_fitness_num", default=2, type=int)
parser.add_argument("--ga_fitness_wight", default=[0.4, 0.6], type=list)
parser.add_argument(
"--ga_fitness_type",
default="double",
choices=["std", "runtime", "double"],
type=str,
)
args, unknown = parser.parse_known_args()
return args
| 4,568 | 33.613636 | 90 |
py
|
MERL-LB
|
MERL-LB-main/config/es.py
|
import argparse
def parse_args():
parser = argparse.ArgumentParser()
# environment
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--reset_type",
default="index",
choices=["new", "repeat", "cycle", "index"],
type=str,
)
parser.add_argument("--job_seq_num", default=10, type=int)
parser.add_argument("--max_time", default=10 * 60, type=int)
parser.add_argument("--max_end_time", default=60 * 60, type=int)
parser.add_argument("--max_job_num", default=5, type=int)
parser.add_argument("--max_res_req", default=10, type=int)
parser.add_argument("--max_job_len", default=10 * 60, type=int)
parser.add_argument("--job_allocate_num", default=1, type=int)
# parser.add_argument("--job_small_rate", default=0.6, type=float)
# parser.add_argument(
# "--level_job_num",
# # default=[5, 5, 20, 20, 10, 10, 20, 20, 5, 5],
# # default=[10, 10, 8, 4, 2, 10, 10, 8, 4, 2],
# default=[2, 4, 8, 8, 6, 4, 6, 8, 8, 4, 2],
# type=list,
# )
# parser.add_argument(
# "--level_job_long_rate",
# default=[0.9, 0.6, 0.5, 0.6, 0.7, 0.5, 0.6, 0.7, 0.8, 0.8, 0.9],
# type=list,
# )
parser.add_argument("--sleep_delay", default=3, type=int)
parser.add_argument("--pre_allocate_time", default=1, type=int)
parser.add_argument("--crisis_rate", default=0.8, type=float)
parser.add_argument("--max_crisis_rate", default=0.8, type=float)
parser.add_argument("--crisis_time", default=5, type=int)
parser.add_argument("--res_num", default=4, type=int)
parser.add_argument("--base_power", default=0.5, type=float)
parser.add_argument("--res_capacity", default=500, type=int)
parser.add_argument("--machine_num", default=10, type=int)
parser.add_argument("--max_expand_num", default=5, type=int)
parser.add_argument(
"--res_power_weight", default=[0.25, 0.25, 0.25, 0.25], type=list
)
parser.add_argument("--res_std_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_var_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--buffer_size", default=2000, type=int)
parser.add_argument("--timeline_size", default=10 * 60, type=int)
parser.add_argument("--job_color_num", default=40, type=int)
parser.add_argument(
"--job_generate",
default="uniform",
choices=[
"uniform",
"level_uniform",
"level_bi_model",
],
type=str,
)
parser.add_argument(
"--obs_represent",
default="timeline",
choices=["image", "timeline"],
type=str,
)
parser.add_argument(
"--end_mode",
default="all_allocate",
choices=[
"all_allocate",
"all_done",
"max_time",
],
type=str,
)
parser.add_argument("--reward_scale", default=1, type=int)
parser.add_argument(
"--reward_type",
default="zero",
choices=[
"machine_run_num",
"machine_power",
"job_slowdown",
"curr_res_rate",
"res_std",
"res_var",
"run_time_and_var",
"utilization_and_std",
"zero",
],
type=str,
)
# genetic
parser.add_argument(
"--save_path",
default="output/one_job/es/reward_mean/run02",
type=str,
)
parser.add_argument("--ga_parent_size", default=1, type=int)
parser.add_argument("--ga_children_size", default=10, type=int)
parser.add_argument("--ga_mutate_rate", default=0.25, type=float)
parser.add_argument("--ga_mutate_scale", default=0.15, type=float)
parser.add_argument("--ga_choice", default="generate", type=str)
parser.add_argument("--ga_fitness_num", default=2, type=int)
parser.add_argument("--ga_fitness_wight", default=[0.5, 0.5], type=list)
parser.add_argument(
"--ga_fitness_type",
default="double",
choices=["std", "runtime", "double"],
type=str,
)
parser.add_argument("--observe_type", default=4, type=int)
args, unknown = parser.parse_known_args()
return args
| 4,280 | 34.675 | 88 |
py
|
MERL-LB
|
MERL-LB-main/config/cdga.py
|
import argparse
def parse_args():
parser = argparse.ArgumentParser()
# environment
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--reset_type",
default="index",
choices=["new", "repeat", "cycle", "index"],
type=str,
)
parser.add_argument("--job_seq_num", default=10, type=int)
parser.add_argument("--max_time", default=10 * 60, type=int)
parser.add_argument("--max_end_time", default=60 * 60, type=int)
parser.add_argument("--max_job_num", default=5, type=int)
parser.add_argument("--max_res_req", default=10, type=int)
parser.add_argument("--max_job_len", default=10 * 60, type=int)
parser.add_argument("--job_allocate_num", default=1, type=int)
# parser.add_argument("--job_small_rate", default=0.6, type=float)
# parser.add_argument(
# "--level_job_num",
# # default=[5, 5, 20, 20, 10, 10, 20, 20, 5, 5],
# # default=[10, 10, 8, 4, 2, 10, 10, 8, 4, 2],
# default=[2, 4, 8, 8, 6, 4, 6, 8, 8, 4, 2],
# type=list,
# )
# parser.add_argument(
# "--level_job_long_rate",
# default=[0.9, 0.6, 0.5, 0.6, 0.7, 0.5, 0.6, 0.7, 0.8, 0.8, 0.9],
# type=list,
# )
parser.add_argument("--sleep_delay", default=3, type=int)
parser.add_argument("--pre_allocate_time", default=1, type=int)
parser.add_argument("--crisis_rate", default=0.8, type=float)
parser.add_argument("--max_crisis_rate", default=0.8, type=float)
parser.add_argument("--crisis_time", default=5, type=int)
parser.add_argument("--res_num", default=4, type=int)
parser.add_argument("--base_power", default=0.5, type=float)
parser.add_argument("--res_capacity", default=500, type=int)
parser.add_argument("--machine_num", default=10, type=int)
parser.add_argument("--max_expand_num", default=5, type=int)
parser.add_argument(
"--res_power_weight", default=[0.25, 0.25, 0.25, 0.25], type=list
)
parser.add_argument("--res_std_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_var_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--buffer_size", default=2000, type=int)
parser.add_argument("--timeline_size", default=10 * 60, type=int)
parser.add_argument("--job_color_num", default=40, type=int)
parser.add_argument(
"--job_generate",
default="uniform",
choices=[
"uniform",
"level_uniform",
"level_bi_model",
],
type=str,
)
parser.add_argument(
"--obs_represent",
default="timeline",
choices=["image", "timeline"],
type=str,
)
parser.add_argument(
"--end_mode",
default="all_allocate",
choices=[
"all_allocate",
"all_done",
"max_time",
],
type=str,
)
parser.add_argument("--reward_scale", default=1, type=int)
parser.add_argument(
"--reward_type",
default="zero",
choices=[
"machine_run_num",
"machine_power",
"job_slowdown",
"curr_res_rate",
"res_std",
"res_var",
"run_time_and_var",
"utilization_and_std",
"zero",
],
type=str,
)
# genetic
parser.add_argument(
"--save_path",
default="output/one_job/cdga/reward_mean/run01_m10_j10",
type=str,
)
parser.add_argument("--ga_parent_size", default=25, type=int)
parser.add_argument("--ga_children_size", default=25, type=int)
parser.add_argument("--ga_mutate_rate", default=0.25, type=float)
parser.add_argument("--ga_mutate_scale", default=0.15, type=float)
parser.add_argument("--ga_choice", default="generate", type=str)
parser.add_argument("--ga_fitness_num", default=2, type=int)
parser.add_argument("--ga_fitness_wight", default=[0.5, 0.5], type=list)
parser.add_argument(
"--ga_fitness_type",
default="double",
choices=["std", "runtime", "double"],
type=str,
)
parser.add_argument("--observe_type", default=4, type=int)
args, unknown = parser.parse_known_args()
return args
| 4,291 | 34.766667 | 88 |
py
|
MERL-LB
|
MERL-LB-main/config/moead.py
|
import argparse
def parse_args():
parser = argparse.ArgumentParser()
# environment
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--reset_type",
default="index",
choices=["new", "repeat", "cycle", "index"],
type=str,
)
parser.add_argument("--job_seq_num", default=10, type=int)
parser.add_argument("--actual", default=True, type=bool)
parser.add_argument("--user_sigma", default=10 * 60 // 6 // 3, type=int)
parser.add_argument("--max_time", default=10 * 60, type=int)
parser.add_argument("--max_end_time", default=60 * 60, type=int)
parser.add_argument("--max_job_num", default=5, type=int)
parser.add_argument("--max_res_req", default=10, type=int)
parser.add_argument("--max_job_len", default=10 * 60, type=int)
parser.add_argument("--job_allocate_num", default=1, type=int)
# parser.add_argument("--job_small_rate", default=0.6, type=float)
# parser.add_argument(
# "--level_job_num",
# # default=[5, 5, 20, 20, 10, 10, 20, 20, 5, 5],
# # default=[10, 10, 8, 4, 2, 10, 10, 8, 4, 2],
# default=[2, 4, 8, 8, 6, 4, 6, 8, 8, 4, 2],
# type=list,
# )
# parser.add_argument(
# "--level_job_long_rate",
# default=[0.9, 0.6, 0.5, 0.6, 0.7, 0.5, 0.6, 0.7, 0.8, 0.8, 0.9],
# type=list,
# )
parser.add_argument("--sleep_delay", default=3, type=int)
parser.add_argument("--pre_allocate_time", default=1, type=int)
parser.add_argument("--crisis_rate", default=0.8, type=float)
parser.add_argument("--max_crisis_rate", default=0.8, type=float)
parser.add_argument("--crisis_time", default=5, type=int)
parser.add_argument("--res_num", default=4, type=int)
parser.add_argument("--base_power", default=0.5, type=float)
parser.add_argument("--res_capacity", default=500, type=int)
parser.add_argument("--machine_num", default=10, type=int)
parser.add_argument("--max_expand_num", default=5, type=int)
parser.add_argument("--res_power_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_std_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--res_var_weight", default=[0.25, 0.25, 0.25, 0.25], type=list)
parser.add_argument("--buffer_size", default=2000, type=int)
parser.add_argument("--timeline_size", default=10 * 60, type=int)
parser.add_argument("--job_color_num", default=40, type=int)
parser.add_argument(
"--job_generate",
default="uniform",
choices=[
"uniform",
"level_uniform",
"level_bi_model",
],
type=str,
)
parser.add_argument(
"--obs_represent",
default="timeline",
choices=["image", "timeline"],
type=str,
)
parser.add_argument(
"--end_mode",
default="all_allocate",
choices=[
"all_allocate",
"all_done",
"max_time",
],
type=str,
)
parser.add_argument("--reward_scale", default=1, type=int)
parser.add_argument(
"--reward_type",
default="zero",
choices=[
"machine_run_num",
"machine_power",
"job_slowdown",
"curr_res_rate",
"res_std",
"res_var",
"run_time_and_var",
"utilization_and_std",
"zero",
],
type=str,
)
parser.add_argument(
"--save_path",
default="output/train",
type=str,
)
parser.add_argument(
"--method",
default="nsga",
type=str,
)
parser.add_argument(
"--tag",
default="run03",
type=str,
)
parser.add_argument("--observe_type", default=4, type=int)
# genetic
parser.add_argument("--moead_n", default=50, type=int)
parser.add_argument("--moead_m", default=2, type=int)
parser.add_argument("--moead_t", default=5, type=int)
parser.add_argument("--mutate_rate", default=0.25, type=float)
parser.add_argument("--mutate_scale", default=0.05, type=float)
args, unknown = parser.parse_known_args()
return args
| 4,242 | 33.217742 | 90 |
py
|
PySDD
|
PySDD-master/setup.py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
setup.py
~~~~~~~~
Usage: python3 setup.py build_ext --inplace
:author: Wannes Meert
:copyright: Copyright 2017-2023 KU Leuven and Regents of the University of California.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
from setuptools import setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext as BuildExtCommand
from setuptools import Distribution
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
import platform
import os
import re
from pathlib import Path
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
try:
import cysignals
except ImportError as exc:
print(f"cysignals not found\n{exc}")
cysignals = None
print("Python version = " + str(platform.python_version()))
class MyDistribution(Distribution):
global_options = Distribution.global_options + [
('debug', None, 'Compile with debug options on (PySDD option)'),
('usecysignals', None, 'Compile with CySignals (PySDD option)')
]
def __init__(self, attrs=None):
self.debug = 0
self.usecysignals = 0
super().__init__(attrs)
# build_type = "debug"
build_type = "optimized"
here = Path(".") # setup script requires relative paths
with (here / "pysdd" / "__init__.py").open('r') as fd:
wrapper_version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not wrapper_version:
raise RuntimeError('Cannot find version information')
sdd_version = "2.0"
libwrapper_path = here / "pysdd" / "lib"
sdd_path = libwrapper_path / f"sdd-{sdd_version}"
lib_path = sdd_path / "lib"
inc_path = sdd_path / "include"
src_path = sdd_path / "src"
csrc_path = here / "pysdd" / "src"
# c_files_paths = src_path.glob("**/*.c")
c_files_paths = (src_path / "fnf").glob("*.c")
sdd_extra_inc_path = libwrapper_path / "sdd_extra" / "include"
# weight optimization wrapper
wo_path = libwrapper_path / "weight_optimization"
wo_inc_path = wo_path / "include"
wo_src_path = wo_path / "src"
wo_c_files_paths = wo_src_path.glob("*.c")
c_dirs_paths = set(p.parent for p in src_path.glob("**/*.c")) | {wo_src_path}
all_c_file_paths = [str(p) for p in c_files_paths] + [str(p) for p in wo_c_files_paths]
# print("Found c files: ", ", ".join([str(p) for p in all_c_file_paths]))
os.environ["LDFLAGS"] = f"-L{lib_path}"
os.environ["CPPFLAGS"] = f"-I{inc_path} " + f"-I{wo_inc_path} " + f"-I{sdd_extra_inc_path} " + f"-I{csrc_path} " + \
" ".join(f"-I{p}" for p in c_dirs_paths)
library_dirs = [str(lib_path)]
include_dirs = [str(inc_path), str(wo_inc_path), str(sdd_extra_inc_path), str(csrc_path)] + \
[str(p) for p in c_dirs_paths]
compile_time_env = {'HAVE_CYSIGNALS': False}
# if cysignals is not None:
# compile_time_env['HAVE_CYSIGNALS'] = True
c_args = {
'unix': ['-O3'],
'msvc': ['/Ox', '/fp:fast', '/favor:INTEL64', '/Og'],
'mingw32': ['-O3', '-march=native']
}
c_args_debug = {
'unix': ["-O0", '-g'],
'msvc': [["-Zi", "/Od"]],
'mingw32': ["-march=native", "-O0", '-g']
}
l_args = {
'unix': [],
'msvc': [],
'mingw32': ['-static-libgcc', '-static-libstdc++', '-Wl,-Bstatic,--whole-archive',
'-lwinpthread', '-Wl,--no-whole-archive']
}
l_args_debug = {
'unix': ['-g'],
'msvc': ["-debug"],
'mingw32': ['-g']
}
class MyBuildExtCommand(BuildExtCommand):
def build_extensions(self):
global lib_path
try:
c = self.compiler.compiler_type
print("Compiler type: {}".format(c))
compiler_name = self.compiler.compiler[0]
print("Compiler name: {}".format(compiler_name))
except AttributeError as exc:
output = str(exc)
if 'MSVCCompiler' in output:
c = "msvc"
print("Compiler type: {}".format(c))
compiler_name = "MSVCCompiler"
print("Compiler name: {}".format(compiler_name))
else:
print("Could not derive compiler type")
c = "Unknown"
compiler_name = "Unknown"
print("--debug: {}".format(self.distribution.debug))
print("--usecysignals: {}".format(self.distribution.usecysignals))
# Compiler and linker options
if self.distribution.debug:
self.force = True # force full rebuild in debugging mode
cur_c_args = c_args_debug
cur_l_args = l_args_debug
else:
cur_c_args = c_args
cur_l_args = l_args
if "gcc" in compiler_name:
cur_c_args["unix"].append("-std=c99")
if c in cur_c_args:
args = cur_c_args[c]
for e in self.extensions: # type: Extension
e.extra_compile_args = args
else:
print("Unknown compiler type: {}".format(c))
if c in cur_l_args:
args = cur_l_args[c]
for e in self.extensions: # type: Extension
e.extra_link_args = args
if self.distribution.usecysignals:
if cysignals is not None:
if self.cython_compile_time_env is None:
self.cython_compile_time_env = {'HAVE_CYSIGNALS': True}
else:
self.cython_compile_time_env['HAVE_CYSIGNALS'] = True
else:
print("Warning: import cysignals failed")
# Extra objects
if "Darwin" in platform.system():
if "arm" in platform.platform():
cur_lib_path = lib_path / "Darwin-arm"
else:
cur_lib_path = lib_path / "Darwin"
if build_type == "debug":
cur_lib_path = cur_lib_path / "debug"
libsdd_path = cur_lib_path / "libsdd.a"
elif "Linux" in platform.system():
cur_lib_path = lib_path / "Linux"
libsdd_path = cur_lib_path / "libsdd.a"
elif "Windows" in platform.system():
cur_lib_path = lib_path / "Windows"
libsdd_path = cur_lib_path / "sdd.lib"
else:
libsdd_path = lib_path / "libsdd.a"
for e in self.extensions: # type: Extension
e.extra_objects = [str(libsdd_path)]
BuildExtCommand.build_extensions(self)
if cythonize is not None:
ext_modules = cythonize([
Extension(
"pysdd.sdd", [str(here / "pysdd" / "sdd.pyx")] + all_c_file_paths,
include_dirs=include_dirs,
library_dirs=library_dirs
# extra_objects=[str(libsdd_path)],
# extra_compile_args=extra_compile_args,
# extra_link_args=extra_link_args
# include_dirs=[numpy.get_include()]
)],
compiler_directives={'embedsignature': True},
# gdb_debug=gdb_debug,
compile_time_env=compile_time_env)
else:
ext_modules = []
print('****************************************')
print('Cython not yet available, cannot compile')
print('****************************************')
raise ImportError('Cython not available')
# install_requires = ['numpy', 'cython']
install_requires = ['cython>=0.29.6']
setup_requires = ['setuptools>=18.0', 'cython>=0.29.6']
tests_require = ['pytest']
dev_require = tests_require + ['cython>=0.29.6']
with (here / 'README.rst').open('r', encoding='utf-8') as f:
long_description = f.read()
setup_kwargs = {}
def set_setup_kwargs(**kwargs):
global setup_kwargs
setup_kwargs = kwargs
set_setup_kwargs(
name='PySDD',
version=wrapper_version,
description='Sentential Decision Diagrams',
long_description=long_description,
author='Wannes Meert, Arthur Choi',
author_email='[email protected]',
url='https://github.com/wannesm/PySDD',
project_urls={
'PySDD documentation': 'https://pysdd.readthedocs.io/en/latest/',
'PySDD source': 'https://github.com/wannesm/PySDD'
},
packages=["pysdd"],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require={
'all': ['cysignals', 'numpy'],
'dev': dev_require
},
include_package_data=True,
package_data={
'': ['*.pyx', '*.pxd', '*.h', '*.c', '*.so', '*.a', '*.dll', '*.dylib', '*.lib'],
},
distclass=MyDistribution,
cmdclass={
'build_ext': MyBuildExtCommand
},
entry_points={
'console_scripts': [
'pysdd = pysdd.cli:main'
]},
python_requires='>=3.6',
license='Apache 2.0',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
keywords='sdd, knowledge compilation',
ext_modules=ext_modules,
zip_safe=False
)
try:
setup(**setup_kwargs)
except (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError, SystemExit) as exc:
print("********************************************")
print("ERROR: The C extension could not be compiled")
print("********************************************")
print(exc)
raise exc
| 9,384 | 32.398577 | 116 |
py
|
PySDD
|
PySDD-master/pysdd-cli.py
|
#!/usr/bin/env python3
# encoding: utf-8
"""
pysdd-cli
~~~~~~~~~
PySDD command line interface.
:author: Wannes Meert, Arthur Choi
:copyright: Copyright 2018 KU Leuven and Regents of the University of California.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
import sys
from pysdd import cli
if __name__ == "__main__":
sys.exit(cli.main())
| 369 | 18.473684 | 81 |
py
|
PySDD
|
PySDD-master/pysdd/__main__.py
|
# -*- coding: UTF-8 -*-
"""
pysdd.__main__
~~~~~~~~~~~~~~
Command Line Interface.
Will be run when `python -m pysdd` is used.
:author: Wannes Meert, Arthur Choi
:copyright: Copyright 2017-2019 KU Leuven and Regents of the University of California.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
from .cli import main
if __name__ == "__main__":
main()
| 382 | 18.15 | 86 |
py
|
PySDD
|
PySDD-master/pysdd/wmcstochastic.py
|
# -*- coding: UTF-8 -*-
"""
pysdd.wmcstochastic
~~~~~~~~~~~~~~~~~~~
Apply stochastic computing to Sentential Decision Diagrams (SDD).
:author: Wannes Meert
:copyright: Copyright 2018 KU Leuven.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
import random
from itertools import accumulate
from .sdd import WmcManager
class WmcStochastic(WmcManager):
"""Weighted Model Counting using Stochastic Computing and a Sentential Decision Diagram (SDD).
This is an example implementation on how you can write your own custom Weighted Model Counting
algorithm on top of the PySDD library.
This class is a demonstration how stochastic computing [1,2] could be used for weighted model counting
given an SDD datastructure. The basic concept is that instead of propagating floating point numbers
through the SDD, only True and False samples are passed through. Each leaf generates samples from
a Bernouilli distribution with as parameter the probability associated, the samples collected at the
root node then represent a Bernouilli distribution with as mean the Weighted Model Count.
The main change is that a conjunction or multiplication is replaced with an AND gate and a
disjunction or addition is replaced with a MUX gate. Since a MUX gate rescales the result of
the addition back to the range [0,1], we need to keep track of scaling factors to tune the MUX
gates in the network.
While Stochastic Computing has some advantages, like small circuit surface, cheap hardware, and
robust against noise, it also has some significant disadvantages. For high precision computation
it needs an exponential large number of bits (2^n for an n-bit binary number) and the random
number generator is expensive.
[1] Von Neumann, John. "Probabilistic logics and the synthesis of reliable organisms from unreliable
components." Automata studies 34 (1956): 43-98.
[2] A. Alaghi and J. P. Hayes. Computing with randomness. IEEE Spectrum, (3), March 2018.
"""
def __init__(self, node, log_mode=1):
super().__init__(node, log_mode)
self.cache = dict()
self.scalings = dict()
self.or_cumweights = dict()
self.compute_scalings()
def propagate_normal(self):
"""Comparison to normal weighted model counting given an SDD.
Note that this simple example does not perform smoothing and will thus not
return a correct result for non-smoothed SDDs. For a correct weighted model
counter smoothing should also be implemented.
"""
return self.depth_first_normal(self.node)
def depth_first_normal(self, node):
"""Depth first search to compute the WMC.
This does not yet perform smoothing!
"""
if node.is_decision():
rvalue = 0
for prime, sub in node.elements():
# Conjunction
result = self.depth_first_normal(prime) * self.depth_first_normal(sub)
# Disjunction
rvalue += result
elif node.is_true():
rvalue = self.one_weight
elif node.is_false():
rvalue = self.zero_weight
elif node.is_literal():
rvalue = self.literal_weight(node)
else:
raise Exception(f"Unknown node type: {node}")
return rvalue
def propagate(self, bitlength=100):
"""Weighted Model Counting using Stochastic Computation."""
nb_pos, nb_neg, scaling = self.propagate_counts(bitlength)
return (nb_pos / (nb_pos + nb_neg)) * scaling
def propagate_counts(self, bitlength=100):
nb_pos, nb_neg = 0, 0
scaling = self.scalings[self.node.id]
for i in range(bitlength):
if self.counting_df(self.node):
nb_pos += 1
else:
nb_neg += 1
return nb_pos, nb_neg, scaling
def counting_df(self, node):
"""Depth-first counting using Stochastic Computing.
Missing from this example to enable a full features WMC:
- Smoothing should be included.
- Subtrees that always have zero or one as values should be pruned to increase the resolution.
"""
self.cache = dict()
return self.counting_df_rec(node)
def counting_df_rec(self, node):
if node in self.cache:
return self.cache[node]
if node.is_decision():
rcounts = []
for prime, sub in node.elements():
# Conjunction
count_p = self.counting_df(prime)
count_s = self.counting_df(sub)
result_count = count_p & count_s
rcounts.append(result_count)
# Disjunction
cumweights = self.or_cumweights[node.id]
randval = random.random()
choice = None
for i, cw in enumerate(cumweights):
if cw > randval:
choice = i
break
if choice is None:
choice = len(rcounts) - 1
rvalue = rcounts[choice]
elif node.is_true():
rvalue = True
elif node.is_false():
rvalue = False
elif node.is_literal():
w = self.literal_weight(node)
if w > 1.0:
raise Exception(f"Stochastic WMC expects probabilities as weights, got {w} for {node}.")
randval = random.random()
if randval < w:
rvalue = True
else:
rvalue = False
else:
raise Exception(f"Unknown node type: {node}")
self.cache[node] = rvalue
return rvalue
def compute_scalings(self):
"""Compute all the scaling factors.
This is a pre-processing step to set up the scaling necessary to make the MUX gates compute
correct results. This is similar to the scaling operations that are also required for
performing computations with fixed-point integers
(https://en.wikipedia.org/wiki/Fixed-point_arithmetic#Operations).
"""
self.cache = dict()
self.compute_scalings_df(self.node)
def compute_scalings_df(self, node):
"""Computing all the scaling factors using depth-first search."""
if node in self.cache:
return self.cache[node]
if node.is_decision():
scalings = []
for prime, sub in node.elements():
# Conjunction
scaling_p = self.compute_scalings_df(prime)
scaling_s = self.compute_scalings_df(sub)
result_scaling = scaling_p * scaling_s
scalings.append(result_scaling)
# Disjunction
total_scaling = sum(scalings)
self.or_cumweights[node.id] = list(accumulate(s / total_scaling for s in scalings))
self.scalings[node.id] = total_scaling
rvalue = total_scaling
elif node.is_true():
self.scalings[node.id] = 1
rvalue = 1
elif node.is_false():
self.scalings[node.id] = 1
rvalue = 1
elif node.is_literal():
self.scalings[node.id] = 1
rvalue = 1
else:
raise Exception(f"Unknown node type: {node}")
self.cache[node] = rvalue
return rvalue
| 7,399 | 38.153439 | 106 |
py
|
PySDD
|
PySDD-master/pysdd/iterator.py
|
# -*- coding: UTF-8 -*-
"""
pysdd.iterator
~~~~~~~~~~~~~~
:author: Wannes Meert, Arthur Choi
:copyright: Copyright 2017-2019 KU Leuven and Regents of the University of California.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
from collections import deque
from .sdd import SddManager, Vtree, SddNode
from .util import BitArray
MYPY = False
if MYPY:
from typing import Dict, Set, Optional, List, Tuple, Callable, Union
class SddIterator:
def __init__(self, sdd, smooth=True, smooth_to_root=False):
"""Simple iterator to iterate over the SDD graph.
Supports smoothing: An arithmetic circuit AC(X) is smooth iff
(1) it contains at least one indicator for each variable in X, and
(2) for every child c of '+'-node n, we have vars(n) = vars(c).
Note, if you know that you will be performing WMC, smoothing can be implemented more efficient
by keeping track of the expected WMC of used and unused variables in the vtree instead of keeping
track of the sets of variables as is done in this iterator.
:param sdd: WmcManager
:param smooth: Perform smoothing while iterating over the graph
:param smooth_to_root: Perform smoothing wrt root instead of a given node
"""
self.sdd = sdd # type: SddManager
self.vtree = sdd.vtree() # type: Vtree
self._wmc_cache = dict() # type: Dict[SddNode, Union[float, int]]
# Map Vtree node positions to expected variables
self._expected_vars = None # type: Optional[Dict[int, Set[int]]]
# Map Sdd nodes to missing variables
self._missing_vars = dict() # type: Dict[SddNode, Set[int]]
self.smooth = smooth # type: bool
self.smooth_to_root = smooth_to_root # type: bool
if self.smooth:
self._cache_expected_vars()
def _cache_expected_vars(self):
self._expected_vars = dict()
nb_vtree_nodes = self.sdd.var_count() * 2 - 1
# visited = [False] * nb_vtree_nodes
visited = BitArray(nb_vtree_nodes)
queue = deque([self.vtree])
while len(queue) > 0:
node = queue.pop() # type: Vtree
pos = node.position()
if node.is_leaf():
self._expected_vars[pos] = {node.var()}
else:
if visited[pos]:
self._expected_vars[pos] = self._expected_vars[node.left().position()] | \
self._expected_vars[node.right().position()]
else:
visited[pos] = True
queue.append(node)
queue.append(node.right())
queue.append(node.left())
def depth_first_from_root(self, func):
# type: (SddIterator, Callable) -> List[Union[int, float]]
"""Depth first iterator starting from the root nodes.
See `depth_first` for more details.
"""
results = []
for node in self.vtree.get_sdd_rootnodes(self.sdd):
results.append(self.depth_first(node, func))
return results
def depth_first(self, node, func):
# type: (SddIterator, SddNode, Callable) -> Union[int, float]
"""Depth first iterator
:param node: Start node
:param func: Function to be called for each node:
``rvalue = func(node, [(prime_rvalue, sub_rvalue, prime_vars, sub_vars)],
expected_prime_vars, expected_sub_vars)``
The given arguments are:
* node: Current node, if None, it should be considered a decision node
* prime_rvalue: Return value from prime node
* sub_rvalue: Return value from sub node
* prime_vars: Variables present in prime
* sub_vars: Variables present in sub
* expected_prime_vars: Variables that are expected in prime
* expected_sub_vars: Variables that are expected in sub
The return value can be any type, there are no assumptions.
For WMC this is typically float, and for MC int.
:return:
"""
self._wmc_cache = dict()
if self.smooth and self._expected_vars is None:
self._cache_expected_vars()
if self.smooth and (node.is_true() or node.is_literal()):
wmc = func(node, None, self._expected_vars[self.vtree.position()], set())
else:
wmc = self.depth_first_rec(node, func)
if self.smooth_to_root and not (node.is_true() or node.is_literal() or node.is_false()):
root = self.vtree.root()
if root != node.vtree():
wmc_prime = wmc
wmc_sub = func(self.sdd.true(), None, None, None)
used_prime_vars = self._expected_vars[node.vtree().position()]
used_sub_vars = set()
rvalues = [(wmc_prime, wmc_sub, used_prime_vars, used_sub_vars)]
expected_prime_vars = used_prime_vars
expected_sub_vars = self._expected_vars[root.position()] - used_prime_vars
wmc = func(None, rvalues, expected_prime_vars, expected_sub_vars)
return wmc
def depth_first_rec(self, node, func):
# type: (SddIterator, SddNode, Callable) -> Union[int, float]
if node in self._wmc_cache:
return self._wmc_cache[node]
if node.is_false():
result = func(node, None, None, None)
self._wmc_cache[node] = result
return result
if node.is_true():
result = func(node, None, None, None)
self._wmc_cache[node] = result
return result
vtree = node.vtree()
if node.is_decision():
rvalues = []
vtree_left = vtree.left()
if not self.smooth or vtree_left is None:
expected_prime_vars = set()
else:
expected_prime_vars = self._expected_vars[vtree_left.position()]
vtree_right = vtree.right()
if not self.smooth or vtree_right is None:
expected_sub_vars = set()
else:
expected_sub_vars = self._expected_vars[vtree_right.position()]
for prime, sub in node.elements():
wmc_prime = self.depth_first_rec(prime, func)
wmc_sub = self.depth_first_rec(sub, func)
if not self.smooth:
used_prime_vars = None
used_sub_vars = None
else:
if prime.vtree() is None:
used_prime_vars = set()
else:
used_prime_vars = self._expected_vars[prime.vtree().position()]
if sub.vtree() is None:
used_sub_vars = set()
else:
used_sub_vars = self._expected_vars[sub.vtree().position()]
rvalues.append((wmc_prime, wmc_sub, used_prime_vars, used_sub_vars))
rvalue = func(node, rvalues, expected_prime_vars, expected_sub_vars)
else:
rvalue = func(node, None, None, None)
if self._wmc_cache is not None:
self._wmc_cache[node] = rvalue
return rvalue
@staticmethod
def func_modelcounting(node, rvalues, expected_prime_vars, expected_sub_vars):
# type: (SddNode, List[Tuple[int, int, Set[int], Set[int]]], Set[int], Set[int]) -> int
"""Method to pass on to ``depth_first`` to perform model counting.
Note that the WmcManager method to perform WMC is much more efficient, and supports weights.
"""
if rvalues is None:
# Leaf
if node.is_true():
prime_smooth_factor = 2**len(expected_prime_vars) if expected_prime_vars is not None else 1
sub_smooth_factor = 2**len(expected_sub_vars) if expected_sub_vars is not None else 1
return prime_smooth_factor * sub_smooth_factor
elif node.is_false():
return 0
elif node.is_literal():
if expected_prime_vars is not None:
nb_missing_vars = len(expected_prime_vars)
if abs(node.literal) in expected_prime_vars:
nb_missing_vars -= 1
prime_smooth_factor = 2**nb_missing_vars
else:
prime_smooth_factor = 1
if expected_sub_vars is not None:
nb_missing_vars = len(expected_sub_vars)
if abs(node.literal) in expected_sub_vars:
nb_missing_vars -= 1
sub_smooth_factor = 2**nb_missing_vars
else:
sub_smooth_factor = 1
return prime_smooth_factor * sub_smooth_factor
else:
raise Exception("Unknown leaf type for node {}".format(node))
else:
# Decision node
if node is not None and not node.is_decision():
raise Exception("Expected a decision node for node {}".format(node))
rvalue = 0
for mc_prime, mc_sub, prime_vars, sub_vars in rvalues:
if prime_vars is not None:
nb_missing_vars = len(expected_prime_vars) - len(prime_vars)
prime_smooth_factor = 2**nb_missing_vars
else:
prime_smooth_factor = 1
if sub_vars is not None:
nb_missing_vars = len(expected_sub_vars) - len(sub_vars)
sub_smooth_factor = 2**nb_missing_vars
else:
sub_smooth_factor = 1
rvalue += mc_prime * prime_smooth_factor * mc_sub * sub_smooth_factor
return rvalue
| 9,854 | 42.995536 | 107 |
py
|
PySDD
|
PySDD-master/pysdd/cli.py
|
#!/usr/bin/env python3
# encoding: utf-8
"""
pysdd-cli
~~~~~~~~~
PySDD command line interface.
:author: Wannes Meert, Arthur Choi
:copyright: Copyright 2019 KU Leuven and Regents of the University of California.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
from pathlib import Path
import sys
import time
import argparse
import logging
from .sdd import SddManager, Vtree, Fnf, CompilerOptions
logger = logging.getLogger(__name__)
def main(argv=None):
options, args = getopt(argv)
fnf = None
weights = None
if options.cnf_filename is not None:
print("reading cnf...")
fnf = Fnf.from_cnf_file(bytes(options.cnf_filename))
weights = read_weights(options.cnf_filename)
elif options.dnf_filename is not None:
print("reading dnf...")
fnf = Fnf.from_dnf_file(bytes(options.dnf_filename))
weights = read_weights(options.dnf_filename)
if options.vtree_filename is not None:
print("reading initial vtree...")
vtree = Vtree.from_file(bytes(options.vtree_filename))
else:
if fnf is None:
raise argparse.ArgumentTypeError("CNF or DNF file required")
print(f"creating initial vtree {options.initial_vtree_type.decode()}")
vtree = Vtree(var_count=fnf.var_count, vtree_type=options.initial_vtree_type)
print("creating manager...")
manager = SddManager.from_vtree(vtree)
manager.set_options(options)
if options.sdd_filename is None:
print("compiling...")
c1 = time.time()
node = manager.fnf_to_sdd(fnf)
c2 = time.time()
secs = c2 - c1
print("")
print(f"compilation time : {secs:.3f} sec")
else:
print("reading sdd from file...")
c1 = time.time()
node = manager.read_sdd_file(options.sdd_filename)
c2 = time.time()
secs = c2 - c1
print(f"read time : {secs:.3f} sec")
weights = read_weights(options.sdd_filename)
wmc = create_wmc(node, weights, args)
print_node(node, wmc)
if options.verbose:
manager.print_stdout()
if options.minimize_cardinality:
print("\nminimizing cardinality...", end="")
c1 = time.time()
node = manager.global_minimize_cardinality(node)
c2 = time.time()
min_card = manager.minimum_cardinality(node)
print("")
wmc = create_wmc(node, weights, args)
print_node(node, wmc)
print(f" min cardinality : {min_card} {c2-c1:.3f} sec")
manager_vtree = manager.vtree()
if options.post_search:
node.ref()
print("dynamic vtree (post compilation)")
print(f" sdd initial size : {node.size()}")
c1 = time.time()
manager.minimize_limited()
c2 = time.time()
print(f" dynamic vtree time : {c2-c1:.3f} sec")
wmc = create_wmc(node, weights, args)
print_node(node, wmc)
node.deref()
if options.verbose:
manager.print_stdout()
if options.output_sdd_filename is not None:
print("saving compiled sdd ...", end="")
node.save(options.output_sdd_filename)
print("done")
if options.output_sdd_dot_filename is not None:
print("saving compiled sdd (dot) ...", end="")
node.save_as_dot(options.output_sdd_dot_filename)
print("done")
if options.output_vtree_filename is not None:
print("saving vtree ...", end="")
manager_vtree.save(options.output_vtree_filename)
print("done")
if options.output_vtree_dot_filename is not None:
print("saving vtree (dot) ...", end="")
manager_vtree.save_as_dot(options.output_vtree_dot_filename)
print("done")
print("done")
def create_wmc(node, weights, args):
if weights is None:
return None
wmc = node.wmc(log_mode=args.log_mode)
for i in range(len(weights)):
lit = (i // 2) + 1
if (i % 2) == 1:
lit = -lit
w = weights[i]
wmc.set_literal_weight(node.manager.literal(lit), w)
return wmc
def read_weights(nnf_path):
"""
Format: c weights PW_1 NW_1 ... PW_n NW_n
:param nnf_path: Path to NNF file
:return: list of weights
"""
weight_str = None
with open(nnf_path, "r") as ifile:
for line in ifile.readlines():
if "c weights " in line:
weight_str = line[10:].strip()
break
if weight_str is None:
return None
weight_strs = weight_str.split(" ")
weights = [float(w) for w in weight_strs]
return weights
def print_node(node, wmc=None):
print(f" sdd size : {node.size()}")
print(f" sdd node count : {node.count()}")
c1 = time.time()
mc = node.global_model_count()
c2 = time.time()
print(f" sdd model count : {mc} {c2-c1:.3f} sec")
if wmc is not None:
c1 = time.time()
mc = wmc.propagate()
c2 = time.time()
print(f" sdd weighted model count: {mc} {c2-c1:.3f} sec")
class CustomHelpFormatter(argparse.HelpFormatter):
def _format_text(self, text):
text = super()._format_text(text)
if "Copyright" in text:
a, b = text.split("Copyright")
text = a + "\n\n" + super()._format_text("Copyright" + b)
return text
def create_parser():
def bytes_path(p):
return bytes(Path(p))
def bytes_str(s):
return s.encode()
epilog = ("Weighted Model Counting is performed if the NNF file containts a line formatted as follows: "
"\"c weights PW_1 NW_1 ... PW_n NW_n\"."
"Copyright 2017-2019, Regents of the University of California and KU Leuven")
parser = argparse.ArgumentParser(description='Sentential Decision Diagram, Compiler',
epilog=epilog,
formatter_class=CustomHelpFormatter)
input_group = parser.add_mutually_exclusive_group()
input_group.add_argument('-c', metavar='FILE', type=bytes_path,
dest='cnf_filename', help='set input CNF file')
input_group.add_argument('-d', metavar='FILE', type=bytes_path,
dest='dnf_filename', help='set input DNF file')
input_group.add_argument('-s', metavar='FILE', type=bytes_path,
dest='sdd_filename', help='set input SDD file')
parser.add_argument('-v', metavar='FILE', type=bytes_path,
dest='vtree_filename', help='set input VTREE file')
parser.add_argument('-W', metavar='FILE', type=bytes_path,
dest='output_vtree_filename', help='set output VTREE file')
parser.add_argument('-V', metavar='FILE', type=bytes_path,
dest='output_vtree_dot_filename', help='set output VTREE (dot) file')
parser.add_argument('-R', metavar='FILE', type=bytes_path,
dest='output_sdd_filename', help='set output SDD file')
parser.add_argument('-S', metavar='FILE', type=bytes_path,
dest='output_sdd_dot_filename', help='set output SDD (dot) file')
parser.add_argument('-m', action='store_true',
dest='minimize_cardinality', help='minimize the cardinality of compiled sdd')
parser.add_argument('-t', metavar='TYPE', type=bytes_str, default='balanced',
choices=[b'left', b'right', b'vertical', b'balanced', b'random'],
dest='initial_vtree_type',
help='set initial vtree type (left/right/vertical/balanced/random)')
parser.add_argument('-r', metavar='K', type=int, default=-1,
dest='vtree_search_mode',
help='if K>0: invoke vtree search every K clauses. If K=0: disable vtree search. ' +
'By default (no -r option), dynamic vtree search is enabled')
parser.add_argument('-q', action='store_true',
dest='post_search', help='perform post-compilation vtree search')
parser.add_argument('-p', action='store_true',
dest='verbose', help='verbose output')
parser.add_argument('--log_mode', action='store_true',
dest='log_mode', help='weights in log')
return parser
def getopt(argv=None):
parser = create_parser()
args = parser.parse_args(argv)
if args.verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
logger.addHandler(logging.StreamHandler(sys.stdout))
options = CompilerOptions(cnf_filename=args.cnf_filename,
dnf_filename=args.dnf_filename,
vtree_filename=args.vtree_filename,
sdd_filename=args.sdd_filename,
output_vtree_filename=args.output_vtree_filename,
output_vtree_dot_filename=args.output_vtree_dot_filename,
output_sdd_filename=args.output_sdd_filename,
output_sdd_dot_filename=args.output_sdd_dot_filename,
initial_vtree_type=args.initial_vtree_type,
minimize_cardinality=args.minimize_cardinality,
vtree_search_mode=args.vtree_search_mode,
post_search=args.post_search,
verbose=args.verbose)
# print(str(options))
return options, args
| 9,639 | 35.793893 | 108 |
py
|
PySDD
|
PySDD-master/pysdd/util.py
|
# -*- coding: UTF-8 -*-
"""
pysdd.util
~~~~~~~~~~
Utility functions on top of the ``sdd`` package.
:author: Wannes Meert, Arthur Choi
:copyright: Copyright 2017-2019 KU Leuven and Regents of the University of California.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
import math
import array
from .sdd import SddNode, SddManager, Vtree
MYPY = False
if MYPY:
# from .sdd import Vtree
from typing import List, Optional, Dict, Set, Union, Tuple
LitNameMap = Dict[Union[int, str], str]
node_count = 0
def sdd_to_dot(node, litnamemap=None, show_id=False, merge_leafs=False):
# type: (Union[SddNode, SddManager], Optional[LitNameMap], bool, bool) -> str
"""Generate (alternative) Graphviz DOT string for SDD with given root.
This method is an alternative to SddManager.dot() and SddNode.dot().
:param node: Root node for graph or SddManager
:param litnamemap: Dictionary for node labels. For variable 1 the keys are 1 and -1 for positive and negative.
For multiplication and addition the keys are 'mult' and 'add'. And for true and false, the keys are 'true'
and 'false'.
:param show_id: Show internal node ids, useful for debugging
:param merge_leafs: Variable nodes are shown multiple times to improve the visualisation. Set this argument
to True to disable this.
:return: String in the Graphviz DOT format
"""
if isinstance(node, SddNode):
nodes = [node]
elif isinstance(node, SddManager):
mgr = node
vtree = mgr.vtree()
nodes = vtree.get_sdd_rootnodes(mgr)
else:
raise AttributeError(f"Unknown type {type(node)}")
global node_count
node_count = 0
if litnamemap is None:
litnamemap = {}
if node is None:
raise ValueError("No root node given")
s = [
"digraph sdd {",
"overlap=false;"
]
visited = set()
for node in nodes:
nodeid, root_s = _sddnode_to_dot_int(node, visited, litnamemap, show_id, merge_leafs)
s += root_s
s += [
"}"
]
return "\n".join(s)
def _format_sddnode_label(node, name=None, litnamemap=None):
# type: (SddNode, Optional[str], Optional[LitNameMap]) -> str
if name is not None:
pass
elif node.is_true():
name = litnamemap.get("true", "⟙")
elif node.is_false():
name = litnamemap.get("false", "⟘")
else:
name = node.literal
if litnamemap is not None:
name = litnamemap.get(name, name)
return f"{name}"
def _format_sddnode_xlabel(node):
# type: (SddNode) -> str
if node.vtree() is not None:
vtree_pos = node.vtree().position()
else:
vtree_pos = "n"
return f"Id:{node.id}\\nVp:{vtree_pos}"
def _sddnode_to_dot_int(node, visited, litnamemap=None, show_id=False, merge_leafs=False):
# type: (SddNode, Set[SddNode], Optional[LitNameMap], bool, bool) -> Tuple[str, List[str]]
if node in visited:
return str(node.id), []
if node.is_false() or node.is_true() or node.is_literal():
# Leaf node
if merge_leafs:
visited.add(node)
label = _format_sddnode_label(node, None, litnamemap)
extra_options = ""
if show_id:
extra_options += (",xlabel=\"" + _format_sddnode_xlabel(node) + "\"")
if merge_leafs:
nodeid = str(node.id)
else:
global node_count
nodeid = f"n{node_count}_{node.id}"
node_count += 1
return nodeid, [f"{nodeid} [shape=rectangle,label=\"{label}\"{extra_options}];"]
elif node.is_decision():
# Decision node
shape_format = ",shape=circle,style=filled,fillcolor=gray95"
visited.add(node)
extra_options = ""
if show_id:
extra_options += (",xlabel=\"" + _format_sddnode_xlabel(node) + "\"")
nodeid = str(node.id)
s = [f"{nodeid} [label=\"{litnamemap.get('add', '+')}\"{shape_format}{extra_options}];"]
# same_rank_nodes = []
for idx, (prime, sub) in enumerate(node.elements()):
prime_id, prime_s = _sddnode_to_dot_int(prime, visited, litnamemap, show_id, merge_leafs)
sub_id, sub_s = _sddnode_to_dot_int(sub, visited, litnamemap, show_id, merge_leafs)
ps_id = "ps_{}_{}".format(node.id, idx)
s += [
f"{ps_id} [label=\"{litnamemap.get('mult', '×')}\"{shape_format}{extra_options}];",
"{} -> {} [arrowhead=none];".format(node.id, ps_id),
"{} -> {} [arrowsize=.50,style=dashed];".format(ps_id, prime_id),
"{} -> {} [arrowsize=.50];".format(ps_id, sub_id),
]
s += prime_s
s += sub_s
# same_rank_nodes += [prime_id, sub_id]
# s += ["{rank=same;" + ";".join(same_rank_nodes) + "};"]
return nodeid, s
def vtree_to_dot(vtree, mgr, litnamemap=None, show_id=False):
# type: (Vtree, SddManager, Optional[LitNameMap], bool) -> str
"""Generate (alternative) Graphviz DOT string for given Vtree.
This method is an alternative to Vtree.dot().
:param vtree: Vtree to plot
:param mgr: SddManager associated with this Vtree
:param litnamemap: Dictionary for node labels. For variable 1 the keys are 1 and -1 for positive and negative.
For multiplication and addition the keys are 'mult' and 'add'. And for true and false, the keys are 'true'
and 'false'.
:param show_id: Show internal node ids, useful for debugging
:return: String in the Graphviz DOT format
"""
s = [
"digraph vtree {"
]
s += _vtree_to_dot_int(vtree, mgr, litnamemap, show_id)
s += [
"}"
]
return "\n".join(s)
def _vtree_to_dot_int(vtree, mgr, litnamemap=None, show_id=False):
# type: (Vtree, SddManager, Optional[LitNameMap], bool) -> List[str]
s = []
left = vtree.left()
right = vtree.right()
if left is None and right is None:
name = vtree.var()
if litnamemap is not None:
name = litnamemap.get(name, name)
extra_options = ""
if show_id:
extra_options += f",xlabel=\"{vtree.position()} (" +\
",".join(litnamemap.get(node.literal, node.literal)
for node in vtree.get_sdd_nodes(mgr)) +\
")\""
s += [f"{vtree.position()} [label=\"{name}\",shape=\"box\"{extra_options}];"]
else:
extra_options = ""
if show_id:
extra_options += f",xlabel=\"{vtree.position()} (" + \
",".join(str(litnamemap.get(node.literal, node.literal))
for node in vtree.get_sdd_nodes(mgr)) + \
")\""
s += [f"{vtree.position()} [shape=\"point\"{extra_options}];"]
if left is not None:
s += [f"{vtree.position()} -> {left.position()} [arrowhead=none];"]
s += _vtree_to_dot_int(left, mgr, litnamemap, show_id)
if right is not None:
s += [f"{vtree.position()} -> {right.position()} [arrowhead=none];"]
s += _vtree_to_dot_int(right, mgr, litnamemap, show_id)
return s
def nnf_file_wmc(nnf_filename, weights=None):
"""Perform non-smoothed Weighted Model Counting on the given NNF file.
This is an auxiliary function to perform WMC given an NNF file with only
Python code. This function will thus also work, even if the C SDD library
is not available.
A typical NNF file looks like:
nnf 12 12 3
L 1
...
A 2 3 9
O 2 2 2 10
"""
wmc = [] # type: List[Optional[float]]
ln = 0
detected_nnf = False
true_weight = 1.0
false_weight = 0.0
with open(nnf_filename, 'r') as nnf_file:
for line in nnf_file.readlines():
cols = line.strip().split(' ')
if cols[0] == 'c':
continue
if cols[0] == 'nnf':
wmc = [None] * int(cols[1])
detected_nnf = True
continue
if not detected_nnf:
raise Exception(f"An NNF file should start with 'nnf'")
if cols[0] == 'L':
lit = int(cols[1])
if lit in weights:
wmc[ln] = weights[lit]
else:
wmc[ln] = true_weight
if cols[0] == 'A':
wmc[ln] = 1.0
for i in range(int(cols[1])):
wmc[ln] *= wmc[int(cols[2 + i])]
if cols[0] == 'O':
wmc[ln] = false_weight
for i in range(int(cols[2])):
wmc[ln] += wmc[int(cols[3 + i])]
ln += 1
return wmc[-1]
def sdd_file_wmc(sdd_filename, weights=None):
"""Perform non-smoothed Weighted Model Counting on the given SDD file.
This is an auxiliary function to perform WMC given an SDD file with only
Python code. This function will thus also work, even if the C SDD library
is not available.
A typical SDD file looks like:
sdd 11
L 1 0 1
...
D 0 1 2 1 2 7 8
"""
wmc = [] # type: List[Optional[float]]
ln = 0
detected_sdd = False
true_weight = 1.0
false_weight = 0.0
with open(sdd_filename, 'r') as sdd_file:
for line in sdd_file.readlines():
cols = line.strip().split(' ')
if cols[0] == 'c':
continue
if cols[0] == 'sdd':
detected_sdd = True
wmc = [None] * int(cols[1])
continue
if not detected_sdd:
raise Exception(f"An SDD file should start with 'sdd'")
if cols[0] == 'L':
nodeid = int(cols[1])
lit = int(cols[3])
if lit in weights:
wmc[nodeid] = weights[lit]
else:
wmc[nodeid] = 1.0
if cols[0] == 'F':
nodeid = int(cols[1])
wmc[nodeid] = false_weight
if cols[0] == 'T':
nodeid = int(cols[1])
wmc[nodeid] = true_weight
if cols[0] == 'D':
nodeid = int(cols[1])
nb_elmts = int(cols[3])
elmts = [int(col) for col in cols[4:]]
w = 0.0
for idx in range(nb_elmts):
w += wmc[elmts[2 * idx]] * wmc[elmts[2 * idx + 1]]
wmc[nodeid] = w
ln += 1
return wmc[0]
def psdd_file_wmc(psdd_filename, observations=None):
"""Perform Weighted Model Counting on the given PSDD file.
This is an auxiliary function to perform WMC given a PSDD file with only
Python code. This function will thus also work, even if the C SDD library
is not available.
A typical PSDD file looks like (Yitao's version):
c ids of psdd nodes start at 0
c psdd nodes appear bottom-up, children before parents
c
c file syntax:
c psdd count-of-sdd-nodes
c L id-of-literal-sdd-node id-of-vtree literal
c T id-of-trueNode-sdd-node id-of-vtree variable log(litProb)
c D id-of-decomposition-sdd-node id-of-vtree number-of-elements {id-of-prime id-of-sub log(elementProb)}*
psdd 49
T 0 20 11 -0.6931471805599453
:return: log(WMC)
"""
wmc = [] # type: List[Optional[float]]
ln = 0
detected_psdd = False
with open(psdd_filename, 'r') as sdd_file:
for line in sdd_file.readlines():
cols = line.strip().split(' ')
if cols[0] == 'c':
continue
if cols[0] == 'psdd':
detected_psdd = True
wmc = [None] * int(cols[1])
continue
if not detected_psdd:
raise Exception(f"An SDD file should start with 'sdd'")
if cols[0] == 'L':
nodeid = int(cols[1])
lit = int(cols[2])
if observations is not None and lit in observations:
if var > 0:
wmc[nodeid] = 0.0 if observations[lit] else -math.inf
else:
wmc[nodeid] = 0.0 if not observations[lit] else -math.inf
else:
wmc[nodeid] = 0.0
if cols[0] == 'F':
raise Exception("There should be no false nodes")
# nodeid = int(cols[1])
# wmc[nodeid] = -math.inf
if cols[0] == 'T':
nodeid, vtreeid, lit = [int(val) for val in cols[1:4]]
var = abs(lit)
theta = float(cols[4])
if observations is not None and var in observations:
if lit > 0:
logprob = 0.0 if observations[var] else -math.inf
else:
logprob = 0.0 if not observations[var] else -math.inf
else:
logprob = theta
wmc[nodeid] = logprob
if cols[0] == 'D':
nodeid, vtree_id, nb_elmts = [int(val) for val in cols[1:4]]
elmts = cols[4:]
w = -math.inf
for idx in range(nb_elmts):
p, s, t = elmts[3 * idx: 3 * idx + 3]
wmc_p = wmc[int(p)]
wmc_s = wmc[int(s)]
theta = float(s)
add = wmc_p + wmc_s + theta
if math.isinf(w) and w < 0:
w = add
elif math.isinf(add) and add < 0:
pass
elif w < add:
w = add + math.log1p(math.exp(w - add))
else:
w = w + math.log1p(math.exp(add - w))
wmc[nodeid] = w
ln += 1
return wmc[0]
class BitArray:
def __init__(self, size, fill=0):
"""Array of boolean values.
Based on https://wiki.python.org/moin/BitArrays
:param size: Length of array
:param fill: Default value to set. Should be 0 or 1.
"""
int_size = size >> 5 # number of 32 bit integers
if size & 31: # if bitSize != (32 * n) add
int_size += 1 # a record for stragglers
if fill == 1:
fill = 4294967295 # all bits set
else:
fill = 0 # all bits cleared
self.bits = array.array('I') # 'I' = unsigned 32-bit integer
self.bits.extend((fill,) * int_size)
def is_set(self, bit_num):
"""Returns a nonzero result, 2**offset, if the bit at 'bit_num' is set to 1."""
record = bit_num >> 5
offset = bit_num & 31
mask = 1 << offset
return self.bits[record] & mask > 0
def __getitem__(self, item):
return self.is_set(item)
def set(self, bit_num):
"""Set the bit at 'bit_num' to 1."""
record = bit_num >> 5
offset = bit_num & 31
mask = 1 << offset
self.bits[record] |= mask
def __setitem__(self, key, value):
if value == 0:
self.clear(key)
elif value == 1:
self.set(key)
else:
raise ValueError("Key should be 0 or 1")
def clear(self, bit_num):
"""Clear the bit at 'bit_num'."""
record = bit_num >> 5
offset = bit_num & 31
mask = ~(1 << offset)
self.bits[record] &= mask
def toggle(self, bit_num):
"""Invert the bit at 'bit_num', 0 -> 1 and 1 -> 0."""
record = bit_num >> 5
offset = bit_num & 31
mask = 1 << offset
self.bits[record] ^= mask
def __str__(self):
s = ""
for bits in self.bits:
s += "{0:032b}".format(bits)
return s
| 15,843 | 34.13082 | 114 |
py
|
PySDD
|
PySDD-master/pysdd/__init__.py
|
__version__ = "0.2.11"
| 23 | 11 | 22 |
py
|
PySDD
|
PySDD-master/examples/test-3.py
|
#!/usr/bin/env python3
from pathlib import Path
from pysdd.sdd import SddManager, Vtree
def main():
# set up vtree and manager
vtree = Vtree.from_file("input/opt-swap.vtree".encode())
manager = SddManager.from_vtree(vtree)
print("reading sdd from file ...")
alpha = manager.read_sdd_file("input/opt-swap.sdd".encode())
print(f" sdd size = {alpha.size()}")
# ref, perform the minimization, and then de-ref
alpha.ref()
print("minimizing sdd size ... ", end="")
manager.minimize() # see also manager.minimize_limited()
print("done!")
print(f" sdd size = {alpha.size()}")
alpha.deref()
# augment the SDD
print("augmenting sdd ...")
beta = alpha * (manager.l(4) + manager.l(5))
print(f" sdd size = {beta.size()}")
# ref, perform the minimization again on new SDD, and then de-ref
beta.ref()
print("minimizing sdd ... ", end="")
manager.minimize()
print("done!")
print(f" sdd size = {beta.size()}")
beta.deref()
if __name__ == "__main__":
main()
| 1,008 | 23.02381 | 67 |
py
|
PySDD
|
PySDD-master/examples/wmc-1.py
|
#!/usr/bin/env python3
from pathlib import Path
import math
from pysdd.sdd import SddManager, Vtree, WmcManager
here = Path(__file__).parent
def main():
# Start from a given CNF and VTREE file
vtree = Vtree.from_file(bytes(here / "input" / "simple.vtree"))
sdd = SddManager.from_vtree(vtree)
print(f"Created an SDD with {sdd.var_count()} variables")
root = sdd.read_cnf_file(bytes(here / "input" / "simple.cnf"))
# For DNF functions use `read_dnf_file`
# If the vtree is not given, you can also use 'from_cnf_file`
# Model Counting
wmc = root.wmc(log_mode=True)
w = wmc.propagate()
print(f"Model count: {int(math.exp(w))}")
# Weighted Model Counting
lits = [None] + [sdd.literal(i) for i in range(1, sdd.var_count() + 1)]
# Positive literal weight
wmc.set_literal_weight(lits[1], math.log(0.5))
# Negative literal weight
wmc.set_literal_weight(-lits[1], math.log(0.5))
w = wmc.propagate()
print(f"Weighted model count: {math.exp(w)}")
# Visualize SDD and VTREE
print("saving sdd and vtree ... ", end="")
with open(here / "output" / "sdd.dot", "w") as out:
print(sdd.dot(), file=out)
with open(here / "output" / "vtree.dot", "w") as out:
print(vtree.dot(), file=out)
print("done")
if __name__ == "__main__":
main()
| 1,340 | 28.8 | 75 |
py
|
PySDD
|
PySDD-master/examples/test-4.py
|
#!/usr/bin/env python3
from pathlib import Path
from pysdd.sdd import SddManager, Vtree
here = Path(__file__).parent
def main():
# set up vtree and manager
vtree = Vtree.from_file(bytes(here / "input" / "rotate-left.vtree"))
manager = SddManager.from_vtree(vtree)
# construct the term X_1 ^ X_2 ^ X_3 ^ X_4
x = [None] + [manager.literal(i) for i in range(1, 5)]
alpha = x[1]*x[2]*x[3]*x[4]
# to perform a rotate, we need the manager's vtree
manager_vtree = manager.vtree()
manager_vtree_right = manager_vtree.right()
print("saving vtree & sdd ...")
if not Path("output").is_dir():
raise Exception(f"Directory 'output' does not exist")
manager_vtree.save_as_dot("output/before-rotate-vtree.dot".encode())
alpha.save_as_dot("output/before-rotate-sdd.dot".encode())
# ref alpha (so it is not gc'd)
alpha.ref()
# garbage collect (no dead nodes when performing vtree operations)
print(f"dead sdd nodes = {manager.dead_count()}")
print("garbage collection ...")
manager.garbage_collect()
print(f"dead sdd nodes = {manager.dead_count()}")
print("left rotating ... ", end="")
succeeded = manager_vtree_right.rotate_left(manager, 0)
print("succeeded!" if succeeded == 1 else "did not succeed!")
# deref alpha, since ref's are no longer needed
alpha.deref()
# the root changed after rotation, so get the manager's vtree again
# this time using root_location
manager_vtree = manager.vtree()
print("saving vtree & sdd ...")
if not Path("output").is_dir():
raise Exception(f"Directory 'output' does not exist")
manager_vtree.save_as_dot("output/after-rotate-vtree.dot".encode())
alpha.save_as_dot("output/after-rotate-sdd.dot".encode())
if __name__ == "__main__":
main()
| 1,757 | 27.819672 | 70 |
py
|
PySDD
|
PySDD-master/examples/test-2.py
|
#!/usr/bin/env python3
from pathlib import Path
from pysdd.sdd import SddManager, Vtree
def main():
# set up vtree and manager
var_count = 4
vtree_type = "right".encode()
vtree = Vtree(var_count=var_count, vtree_type=vtree_type)
manager = SddManager(vtree=vtree)
x = [None] + [manager.literal(i) for i in range(1,5)]
# construct the term X_1 ^ X_2 ^ X_3 ^ X_4
alpha = x[1] & x[2] & x[3] & x[4]
# construct the term ~X_1 ^ X_2 ^ X_3 ^ X_4
beta = ~x[1] & x[2] & x[3] & x[4]
# construct the term ~X_1 ^ ~X_2 ^ X_3 ^ X_4
gamma = ~x[1] & ~x[2] & x[3] & x[4]
print("== before referencing:")
print(f" live sdd size = {manager.live_size()}")
print(f" dead sdd size = {manager.dead_size()}")
# ref SDDs so that they are not garbage collected
alpha.ref()
beta.ref()
gamma.ref()
print("== after referencing:")
print(f" live sdd size = {manager.live_size()}")
print(f" dead sdd size = {manager.dead_size()}")
# garbage collect
manager.garbage_collect()
print("== after garbage collection:");
print(f" live sdd size = {manager.live_size()}")
print(f" dead sdd size = {manager.dead_size()}")
alpha.deref()
beta.deref()
gamma.deref()
print("saving vtree & shared sdd ...")
if not Path("output").is_dir():
raise Exception(f"Directory 'output' does not exist")
vtree.save_as_dot("output/shared-vtree.dot".encode())
manager.shared_save_as_dot("output/shared.dot".encode())
if __name__ == "__main__":
main()
| 1,492 | 25.192982 | 59 |
py
|
PySDD
|
PySDD-master/examples/test-1.py
|
#!/usr/bin/env python3
from pysdd.sdd import SddManager, Vtree
def main():
# set up vtree and manager
var_count = 4
var_order = [2,1,4,3]
vtree_type = "balanced"
vtree = Vtree(var_count, var_order, vtree_type)
manager = SddManager.from_vtree(vtree)
# construct a formula (A^B)v(B^C)v(C^D)
print("constructing SDD ... ")
a, b, c, d = [manager.literal(i) for i in range(1, 5)]
alpha = (a & b) | (b & c) | (c & d)
print("done")
print("saving sdd and vtree ... ")
with open("output/sdd.dot", "w") as out:
print(alpha.dot(), file=out)
with open("output/vtree.dot", "w") as out:
print(vtree.dot(), file=out)
print("done")
if __name__ == "__main__":
main()
| 738 | 22.83871 | 58 |
py
|
PySDD
|
PySDD-master/examples/test-5.py
|
#!/usr/bin/env python3
from pathlib import Path
from pysdd.sdd import SddManager, Vtree
here = Path(__file__).parent
def main():
# set up vtree and manager
vtree = Vtree.from_file(bytes(here / "input" / "big-swap.vtree"))
manager = SddManager.from_vtree(vtree)
print("reading sdd from file ...")
alpha = manager.read_sdd_file("input/big-swap.sdd".encode())
print(f" sdd size = {alpha.size()}")
# to perform a swap, we need the manager's vtree
manager_vtree = manager.vtree()
# ref alpha (no dead nodes when swapping)
alpha.ref()
# using size of sdd normalized for manager_vtree as baseline for limit
manager.init_vtree_size_limit(manager_vtree)
limit = 2.0
manager.set_vtree_operation_size_limit(limit)
print(f"modifying vtree (swap node 7) (limit growth by {limit:.1f}x) ... ", end="")
succeeded = manager_vtree.swap(manager, 1) # limited
print("succeeded!" if succeeded == 1 else "did not succeed!")
print(f" sdd size = {alpha.size()}")
print("modifying vtree (swap node 7) (no limit) ... ", end="")
succeeded = manager_vtree.swap(manager, 0) # not limited
print("succeeded!" if succeeded == 1 else "did not succeed!")
print(f" sdd size = {alpha.size()}")
print("updating baseline of size limit ...")
manager.update_vtree_size_limit()
left_vtree = manager_vtree.left()
limit = 1.2
manager.set_vtree_operation_size_limit(limit)
print(f"modifying vtree (swap node 5) (limit growth by {limit}x) ... ", end="")
succeeded = left_vtree.swap(manager, 1) # limited
print("succeeded!" if succeeded == 1 else "did not succeed!")
print(f" sdd size = {alpha.size()}")
limit = 1.3
manager.set_vtree_operation_size_limit(limit)
print(f"modifying vtree (swap node 5) (limit growth by {limit}x) ... ", end="")
succeeded = left_vtree.swap(manager, 1) # limited
print("succeeded!" if succeeded == 1 else "did not succeed!")
print(f" sdd size = {alpha.size()}")
# deref alpha, since ref's are no longer needed
alpha.deref()
if __name__ == "__main__":
main()
| 2,052 | 29.641791 | 85 |
py
|
PySDD
|
PySDD-master/tests/test_vtree.py
|
from pysdd.sdd import SddManager, Vtree
from pysdd.iterator import SddIterator
import sys
import os
import logging
from pathlib import Path
logger = logging.getLogger("pysdd")
directory = None
counter = 0
def test_dot():
vtree = Vtree(var_count=4, var_order=[1, 2, 3, 4], vtree_type="right")
if directory is not None:
with (directory / "vtree1.gv").open("w") as ofile:
s = vtree.dot()
print(s, file=ofile)
with (directory / "vtree2.gv").open("w") as ofile:
s = vtree.dot2()
print(s, file=ofile)
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(sys.stdout)
logger.addHandler(sh)
directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))
print(f"Saving files to {directory}")
test_dot()
| 832 | 25.03125 | 74 |
py
|
PySDD
|
PySDD-master/tests/test_wmcstochastic.py
|
from pysdd.sdd import SddManager, Vtree, WmcManager
from pysdd.wmcstochastic import WmcStochastic
import pytest
from array import array
import random
def test_wmc1(verbose=False):
vtree = Vtree(var_count=4, var_order=[2, 1, 4, 3], vtree_type="balanced")
sdd = SddManager.from_vtree(vtree)
a, b, c, d = [sdd.literal(i) for i in range(1, 5)]
# formula = (a & b) | c
formula = (a & b) | (b & c) | (c & d)
if verbose:
with open("sdd.dot", "w") as out:
print(formula.dot(), file=out)
# -d -c -b -a a b c d
weights = array('d', [0.8, 0.7, 0.6, 0.5, 0.5, 0.4, 0.3, 0.2])
# Normal WMC
wmc = WmcManager(formula, log_mode=False)
print(f"MC = {wmc.propagate()}")
wmc.set_literal_weights_from_array(weights)
wmc_result = wmc.propagate()
print(f"WMC-Normal = {wmc_result}")
# Stochastic WMC
wmcs = WmcStochastic(formula, log_mode=False)
wmcs.set_literal_weights_from_array(weights)
wmcs_result = wmcs.propagate(bitlength=1000)
print(f"WMC-Stochastic = {wmcs_result}")
# assert wmc_result == pytest.approx(wmcs_result)
@pytest.mark.skip(reason="Takes too long to generate the figure and is the same test as test_wmc1")
def test_wmc2(verbose=False):
vtree = Vtree(var_count=4, var_order=[2, 1, 4, 3], vtree_type="balanced")
sdd = SddManager.from_vtree(vtree)
a, b, c, d = [sdd.literal(i) for i in range(1, 5)]
# formula = (a & b) | c
formula = (a & b) | (b & c) | (c & d)
# -d -c -b -a a b c d
weights = array('d', [0.8, 0.7, 0.6, 0.5, 0.5, 0.4, 0.3, 0.2])
# Normal WMC
wmc = WmcManager(formula, log_mode=False)
wmc.set_literal_weights_from_array(weights)
wmc_result = wmc.propagate()
# Stochastic WMC
wmcs = WmcStochastic(formula, log_mode=False)
wmcs.set_literal_weights_from_array(weights)
nb_pos, nb_neg, scaling = 0, 0, 1
wmcs_results = []
iterations = 1000
for i in range(iterations):
random.seed()
nb_pos_i, nb_neg_i, scaling_i = wmcs.propagate_counts(bitlength=10)
nb_pos += nb_pos_i
nb_neg += nb_neg_i
scaling = scaling_i
wmcs_results.append((nb_pos / (nb_pos + nb_neg)) * scaling)
print(wmcs_results[-1])
if verbose:
import matplotlib.pyplot as plt
plt.plot([i*10 for i in range(iterations)], [wmc_result]*iterations)
plt.plot([i*10 for i in range(iterations)], wmcs_results)
plt.savefig("stochastic_wmc.png")
if __name__ == "__main__":
test_wmc2(verbose=True)
| 2,607 | 33.315789 | 99 |
py
|
PySDD
|
PySDD-master/tests/test_util.py
|
from pysdd.util import BitArray
import sys
import os
import logging
from pathlib import Path
logger = logging.getLogger("pysdd")
def test_bitarray1():
b = BitArray(10)
print(b)
assert b[4] is False
assert b[3] is False
assert b[2] is False
b[4] = 1
b[2] = True
print(b)
assert b[4] is True
assert b[3] is False
assert b[2] is True
b[4] = 0
b[2] = False
print(b)
assert b[4] is False
assert b[3] is False
assert b[2] is False
def test_bitarray2():
b = BitArray(100) # more than 32 bits
print(b)
assert b[60] is False
assert b[90] is False
b[60] = True
b[90] = True
print(b)
assert b[60] is True
assert b[90] is True
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(sys.stdout)
logger.addHandler(sh)
directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))
print(f"Saving files to {directory}")
test_bitarray1()
| 992 | 19.265306 | 70 |
py
|
PySDD
|
PySDD-master/tests/test_nnf.py
|
from pysdd.util import nnf_file_wmc, sdd_file_wmc, psdd_file_wmc
from pysdd.sdd import Fnf, Vtree, SddManager
from pysdd import cli
import sys
import os
import math
import logging
from pathlib import Path
import filecmp
logger = logging.getLogger("pysdd")
directory = None
counter = 0
here = Path(__file__).parent
def test_nnf1():
weights = {
+3: 0.5, +2: 0.5, +1: 1,
-3: 0.5, -2: 0.5, -1: 1
}
wmc = nnf_file_wmc(here / "rsrc" / "test.cnf.nnf", weights)
assert wmc == 1.0
def test_nnf2():
weights = {
+3: 0.5, +2: 0.5, +1: 1,
-3: 0.5, -2: 0.5, -1: 0
}
wmc = nnf_file_wmc(here / "rsrc" / "test.cnf.nnf", weights)
assert wmc == 0.75
# def test_dnf1():
# dnf_filename = str(here / "rsrc" / "test.cnf.nnf")
# fnf = Fnf.from_dnf_file(bytes(dnf_filename, encoding='utf8'))
# # weights = read_weights(dnf_filename)
# vtree = Vtree(var_count=fnf.var_count)
# manager = SddManager.from_vtree(vtree)
# node = manager.fnf_to_sdd(fnf)
# print(node)
def test_sdd1():
weights = {
+3: 0.5, +2: 0.5, +1: 1,
-3: 0.5, -2: 0.5, -1: 1
}
wmc = sdd_file_wmc(here / "rsrc" / "test.sdd", weights)
print("WMC", wmc)
assert wmc == 1.0, f"{wmc} != 1.0"
def test_sdd2():
weights = {
+3: 0.5, +2: 0.5, +1: 1,
-3: 0.5, -2: 0.5, -1: 0
}
wmc = sdd_file_wmc(here / "rsrc" / "test.sdd", weights)
print("WMC", wmc)
assert wmc == 0.75, f"{wmc} != 0.75"
def test_psdd1():
wmc = psdd_file_wmc(here / "rsrc" / "test.psdd", None)
wmc = math.exp(wmc)
print("WMC", wmc)
def test_dimacs1():
fn_dimacs = here / "rsrc" / "dimacs1.txt"
fn_vtree = here / "rsrc" / "dimacs1.vtree"
fn_vtree_sol = here / "rsrc" / "dimacs1_solution.vtree"
fn_sdd = here / "rsrc" / "dimacs1.sdd"
cli.main(["-c", str(fn_dimacs), "-W", str(fn_vtree), "-R", str(fn_sdd)])
assert filecmp.cmp(fn_vtree, fn_vtree_sol)
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(sys.stdout)
logger.addHandler(sh)
directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))
print(f"Saving files to {directory}")
# test_nnf1()
# test_sdd2()
# test_psdd1()
# test_dnf1()
test_dimacs1()
| 2,298 | 23.98913 | 76 |
py
|
PySDD
|
PySDD-master/tests/test_psdd.py
|
from pysdd.util import psdd_file_wmc
import sys
import os
import math
import logging
from pathlib import Path
import tempfile
logger = logging.getLogger("pysdd")
here = Path(__file__).parent
directory = Path(tempfile.gettempdir())
counter = 0
def test_psdd1():
wmc = psdd_file_wmc(here / "rsrc" / "loop.psdd")
wmc = math.exp(wmc)
assert wmc == 1.0, f"1.0 != {wmc}"
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(sys.stdout)
logger.addHandler(sh)
directory = Path(os.environ.get('TESTDIR', Path(".")))
print(f"Saving files to {directory}")
test_psdd1()
| 632 | 20.827586 | 58 |
py
|
PySDD
|
PySDD-master/tests/test_minimize.py
|
from pysdd.sdd import SddManager, Vtree
from pysdd.iterator import SddIterator
from pysdd.util import sdd_to_dot, vtree_to_dot
import sys
import os
import logging
from pathlib import Path
import tempfile
logger = logging.getLogger("pysdd")
directory = Path(tempfile.gettempdir())
counter = 0
def test_min1():
vtree = Vtree(var_count=4, var_order=[1, 4, 2, 3], vtree_type="right")
sdd = SddManager.from_vtree(vtree)
sdd.auto_gc_and_minimize_off()
a, b, c, d = sdd.vars
f = ((a & b) | (c & d))
f.ref()
if directory:
names = {
1: 'a', -1: '-a',
2: 'b', -2: '-b',
3: 'c', -3: '-c',
4: 'd', -4: '-d'
}
# with (directory / "vtree1_before_a.gv").open("w") as out:
# print(sdd.vtree().dot(), file=out)
# with (directory / "vtree1_before_b.gv").open("w") as out:
# print(vtree_to_dot(sdd.vtree(), sdd, litnamemap=names, show_id=True), file=out)
# with (directory / "sdd1_before_a.gv").open("w") as out:
# print(sdd.dot(), file=out)
# with (directory / "sdd1_before_b.gv").open("w") as out:
# print(sdd_to_dot(sdd), file=out)
sdd.minimize()
# if directory:
# with (directory / "vtree2_after.gv").open("w") as out:
# print(sdd.vtree().dot(), file=out)
# with (directory / "sdd1_after.gv").open("w") as out:
# print(sdd.dot(), file=out)
f.deref()
wmc = f.wmc(log_mode=False)
mc = wmc.propagate()
# print(f"mc = {mc}")
assert mc == 7.0
def test_min2():
sdd = SddManager(var_count=3)
a, b, c = sdd.vars
fa = b | c
fa.ref()
fb = b
fb.ref()
fc = c
fc.ref()
if directory:
names = {
1: 'a', -1: '-a',
2: 'b', -2: '-b',
3: 'c', -3: '-c'
}
# with (directory / "vtree2_before_a.gv").open("w") as out:
# print(sdd.vtree().dot(), file=out)
# with (directory / "vtree2_before_b.gv").open("w") as out:
# print(vtree_to_dot(sdd.vtree(), sdd, litnamemap=names, show_id=True), file=out)
# with (directory / "sdd2_before_a.gv").open("w") as out:
# print(sdd.dot(), file=out)
# with (directory / "sdd2_before_b.gv").open("w") as out:
# print(sdd_to_dot(sdd), file=out)
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(sys.stdout)
logger.addHandler(sh)
directory = Path(os.environ.get('TESTDIR', Path(".")))
print(f"Saving files to {directory}")
test_min1()
test_min2()
| 2,621 | 29.488372 | 93 |
py
|
PySDD
|
PySDD-master/tests/test_iterator.py
|
from pysdd.sdd import SddManager, Vtree
from pysdd.iterator import SddIterator
from pysdd.util import sdd_to_dot, vtree_to_dot
import sys
import os
import logging
from pathlib import Path
logger = logging.getLogger("pysdd")
directory = None
counter = 0
def test_it1():
vtree = Vtree(var_count=4, var_order=[1, 2, 3, 4], vtree_type="right")
sdd = SddManager.from_vtree(vtree)
a, b, c, d = sdd.vars[:5]
f = ((a & b) | (c & d))
if directory:
litnamemap = {1: 'a', 2: 'b', 3: 'c', 4: 'd'}
for key, val in list(litnamemap.items()):
litnamemap[-key] = f"¬{val}"
with (directory / "sdd1.gv").open("w") as out:
print(f.dot(), file=out)
with (directory / "sdd2.gv").open("w") as out:
print(sdd_to_dot(f, litnamemap=litnamemap, show_id=True), file=out)
with (directory / "vtree1.gv").open("w") as out:
print(sdd.vtree().dot(), file=out)
with (directory / "vtree2.gv").open("w") as out:
print(vtree_to_dot(sdd.vtree(), sdd, litnamemap=litnamemap, show_id=True), file=out)
wmc = f.wmc(log_mode=False)
mc = wmc.propagate()
# print(f"mc = {mc}")
assert mc == 7.0
it = SddIterator(sdd, smooth=True, smooth_to_root=True)
mc = it.depth_first(f, SddIterator.func_modelcounting)
assert mc == 7, "MC {} != 7".format(mc)
it = SddIterator(sdd, smooth=False, smooth_to_root=True)
mc = it.depth_first(f, SddIterator.func_modelcounting)
assert mc == 3, "MC (non-smooth) {} != 3".format(mc)
def test_it2():
""" Test case where formula = True """
vtree = Vtree(var_count=4, var_order=[1, 2, 3, 4], vtree_type="right")
sdd = SddManager.from_vtree(vtree)
a, b, c, d = sdd.vars[:5]
f = (a | -a) # = SddNode(True)
wmc = f.wmc(log_mode=False)
mc = wmc.propagate()
# print(f"mc = {mc}")
assert mc == 16.0
it = SddIterator(sdd, smooth=True, smooth_to_root=True)
mc = it.depth_first(f, SddIterator.func_modelcounting)
assert mc == 16, "MC {} != 16".format(mc)
it = SddIterator(sdd, smooth=False, smooth_to_root=True)
mc = it.depth_first(f, SddIterator.func_modelcounting)
assert mc == 1, "MC (non-smooth) {} != 1".format(mc)
f = (a & -a) # = SddNode(False)
wmc = f.wmc(log_mode=False)
mc = wmc.propagate()
# print(f"mc = {mc}")
assert mc == 0.0
it = SddIterator(sdd, smooth=True, smooth_to_root=True)
mc = it.depth_first(f, SddIterator.func_modelcounting)
assert mc == 0, "MC {} != 0".format(mc)
it = SddIterator(sdd, smooth=False, smooth_to_root=True)
mc = it.depth_first(f, SddIterator.func_modelcounting)
assert mc == 0, "MC (non-smooth) {} != 0".format(mc)
def test_it3():
""" Test case where formula = literal or -literal """
vtree = Vtree(var_count=4, var_order=[1, 2, 3, 4], vtree_type="right")
sdd = SddManager.from_vtree(vtree)
a, b, c, d = sdd.vars[:5]
f = a
wmc = f.wmc(log_mode=False)
mc = wmc.propagate()
# print(f"mc = {mc}")
assert mc == 8.0
it = SddIterator(sdd, smooth=True, smooth_to_root=True)
mc = it.depth_first(f, SddIterator.func_modelcounting)
assert mc == 8, "MC {} != 8".format(mc)
it = SddIterator(sdd, smooth=False, smooth_to_root=True)
mc = it.depth_first(f, SddIterator.func_modelcounting)
assert mc == 1, "MC (non-smooth) {} != 1".format(mc)
f = -a
wmc = f.wmc(log_mode=False)
mc = wmc.propagate()
# print(f"mc = {mc}")
assert mc == 8.0
it = SddIterator(sdd, smooth=True, smooth_to_root=True)
mc = it.depth_first(f, SddIterator.func_modelcounting)
assert mc == 8, "MC {} != 8".format(mc)
it = SddIterator(sdd, smooth=False, smooth_to_root=True)
mc = it.depth_first(f, SddIterator.func_modelcounting)
assert mc == 1, "MC (non-smooth) {} != 1".format(mc)
def test_it4():
vtree = Vtree(var_count=4, var_order=[4, 3, 2, 1], vtree_type="right")
sdd = SddManager.from_vtree(vtree)
a, b, c, d = sdd.vars
f1 = a | b
f2 = f1 | c
f3 = f2 | d
f1.ref()
f2.ref()
f3.ref()
if directory:
litnamemap = {1: 'a', 2: 'b', 3: 'c', 4: 'd'}
for key, val in list(litnamemap.items()):
litnamemap[-key] = f"¬{val}"
with (directory / "sdd.gv").open("w") as out:
print(sdd_to_dot(f3, litnamemap=litnamemap, show_id=True), file=out)
with (directory / "vtree.gv").open("w") as out:
print(vtree_to_dot(sdd.vtree(), litnamemap=litnamemap, show_id=True), file=out)
it = SddIterator(sdd, smooth=True)
mc = it.depth_first(f1, SddIterator.func_modelcounting)
assert mc == 3, "MC {} != 3".format(mc)
it = SddIterator(sdd, smooth=True, smooth_to_root=True)
mc = it.depth_first(f1, SddIterator.func_modelcounting)
assert mc == 12, "MC {} != 3 * 2**2 = 12".format(mc)
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(sys.stdout)
logger.addHandler(sh)
directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))
print(f"Saving files to {directory}")
test_it1()
# test_it2()
# test_it3()
# test_it4()
| 5,176 | 31.974522 | 96 |
py
|
PySDD
|
PySDD-master/docs/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PySDD documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 21 16:36:46 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# here = os.path.abspath(os.path.dirname(__file__))
# sys.path.insert(0, os.path.join(here, '..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax']
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySDD'
copyright = '2018, Wannes Meert, Arthur Choi'
author = 'Wannes Meert, Arthur Choi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PySDDdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PySDD.tex', 'PySDD Documentation',
'Wannes Meert, Arthur Choi', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysdd', 'PySDD Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PySDD', 'PySDD Documentation',
author, 'PySDD', 'One line description of project.',
'Miscellaneous'),
]
| 4,896 | 28.859756 | 79 |
py
|
CA-BNE
|
CA-BNE-master/misc/scripts/llg_fp_anim_BNE.py
|
#!/usr/bin/python3
import os
import sys
import glob
import re
import math
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anim
import itertools
import time
def main(path):
plot_BNE(path)
def plot_BNE(path):
data = []
max_iter = 0
with open(path) as fd:
for line in fd.readlines():
iteration, time_ms, *xyz = line.split()
iteration = int(iteration)
#time_ms = int(time_ms)
itr = iter(xyz)
triples = [(float(x), float(y), float(z)) for (x,y,z) in zip(itr, itr, itr)]
xx, yy, zz = zip(*triples)
data.append((xx, yy, zz))
max_iter = max(max_iter, iteration)
fig = plt.figure(figsize=(12,12))
def anim_update(i):
fig.clear()
fig.suptitle("LLG First Price")
plt.xlim(0.0, 2.0)
plt.ylim(0.0, 2.0)
plt.xticks(np.arange(0, 2.01, 0.1))
plt.yticks(np.arange(0, 2.01, 0.1))
xx, yy, zz = data[i]
plt.plot(xx[:len(xx)//2], yy[:len(xx)//2], "-", clip_box=mpl.transforms.Bbox([[0,0],[0.1,0.3]]), clip_on=True, label="Local Players")
plt.plot(xx, zz, "-", clip_box=mpl.transforms.Bbox([[0,0],[0.1,0.3]]), clip_on=True, label="Global Player")
#plt.plot(xx, yy, ".-")
# analytical BNE
#xx, yy = data[-1]
#plt.plot(xx, yy, "-")
plt.legend(loc='upper left')
a = anim.FuncAnimation(fig, anim_update, frames=max_iter + 1, repeat=False, interval=300)
plt.show()
for path in sys.argv[1:]:
main(path)
| 1,698 | 22.597222 | 141 |
py
|
CA-BNE
|
CA-BNE-master/misc/scripts/llg_anim_BNE.py
|
#!/usr/bin/python3
import os
import sys
import glob
import re
import math
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anim
import itertools
import time
def main(path):
plot_BNE(path)
def plot_BNE(path):
data = []
max_iter = 0
with open(path) as fd:
for line in fd.readlines():
iteration, time_ms, *xy = line.split()
iteration = int(iteration)
#time_ms = int(time_ms)
itr = iter(xy)
xy_pairs = [(float(x), float(y)) for (x,y) in zip(itr, itr)]
if len(xy_pairs) == 0:
xx, yy = [0,1], [.5,.5]
else:
xx, yy = zip(*xy_pairs)
data.append((xx,yy))
max_iter = max(max_iter, iteration)
fig = plt.figure(figsize=(12,12))
def anim_update(i):
fig.clear()
fig.suptitle("LLG")
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.xticks(np.arange(0, 1.01, 0.05))
plt.yticks(np.arange(0, 1.01, 0.05))
xx, yy = data[i]
plt.plot(xx, yy, "-", clip_box=mpl.transforms.Bbox([[0,0],[0.1,0.3]]), clip_on=True)
#plt.plot(xx, yy, ".-")
# analytical BNE
#xx, yy = data[-1]
#plt.plot(xx, yy, "-")
a = anim.FuncAnimation(fig, anim_update, frames=max_iter + 1, repeat=False, interval=300)
plt.show()
for path in sys.argv[1:]:
main(path)
| 1,531 | 20.885714 | 93 |
py
|
CA-BNE
|
CA-BNE-master/misc/scripts/llllgg_anim_BNE.py
|
#!/usr/bin/python3
import os
import sys
import glob
import re
import math
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anim
import itertools
import time
class MySubplot(object):
def __init__(self, data, ax, title, zlim):
self.data = data
self.ax = ax
self.title = title
self.zlim = zlim
self.x = {}
self.y = {}
for k,v in data.items():
n = len(v)
self.x[k] = np.kron(np.ones((1,n)), np.matrix(np.linspace(zlim[0], zlim[1], n)).T)
self.y[k] = self.x[k].T
# parameters are elevation and azimuth (in degrees, not radians)
ax.view_init(None, -135)
def anim_update(self, i):
self.ax.clear()
self.ax.set_zlim(self.zlim)
self.ax.set_title(self.title)
self.ax.plot_wireframe(self.x[i], self.y[i], self.data[i])
class Main(object):
data = {}
titles = ["Local Left", "Local Right", "Global Left", "Global Right"]
zlims = [(0,1),(0,1),(0,2),(0,2)]
def __init__(self, path, *sliceargs):
suffix = "strats"
if len(sliceargs) == 0:
self.slice = slice(9999999)
else:
self.slice = slice(*[int(x) for x in sliceargs])
if 0:
# plot utilitylosses instead of strats
suffix = "utilitylosses"
self.zlims = [(0,0.01),(0,0.01),(0,0.01),(0,0.01)]
if len(sliceargs) == 0:
self.slice = slice(1, 9999999)
else:
self.slice = slice(*[max(1, int(x)) for x in sliceargs])
#read in data
strategy_files = [os.path.join(path, f) for f in os.listdir(path)
if re.fullmatch("iter\d*\." + suffix, f)]
print(strategy_files)
for f in strategy_files:
self.parse_strategy_file(f)
self.anim()
def parse_strategy_file(self, path):
players = []
with open(path) as fd:
lines = iter(fd)
#read header line
iteration, = re.match('''"Iteration","(\d+)"''', next(lines)).groups()
#iteration, = re.match(".*?iter(\d+)\\.txt\\.epsilon", path).groups()
#next(lines)
player = None
for line in lines:
numbers = re.findall('''\d+\.\d+''', line)
if not numbers:
# we are at a new player
player = line.strip('"')
players.append([])
continue
players[-1].append([float(i) for i in numbers])
# add data for global player's other strategy
players.append(list(zip(*players[-1])))
self.data[int(iteration)] = players
def anim(self):
myplots = []
fig = plt.figure(figsize=(16,12))
for i in range(4):
sp = MySubplot(
data={k:v[i] for k,v in self.data.items()},
ax=fig.add_subplot(221 + i, projection='3d'),
title=self.titles[i],
zlim=self.zlims[i],
)
myplots.append(sp)
frames = range(max(self.data.keys())+1)[self.slice]
print(frames)
def anim_update(i):
i = min(i, len(frames)-1)
i = frames[i]
for k in myplots:
k.anim_update(i)
#a = anim.FuncAnimation(fig, anim_update, frames=len(frames) + 5, repeat=True, interval=300)
a = anim.FuncAnimation(fig, anim_update, frames=len(frames) + 5, repeat=True, interval=500)
plt.show()
Main(*sys.argv[1:])
| 3,855 | 27.992481 | 100 |
py
|
panoptes-python-client
|
panoptes-python-client-master/setup.py
|
from setuptools import setup, find_packages
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setup(
name='panoptes_client',
url='https://github.com/zooniverse/panoptes-python-client',
author='Adam McMaster / Zooniverse',
author_email='[email protected]',
description=(
'This package is the Python SDK for Panoptes, the platform behind the Zooniverse. This module is intended to allow programmatic management of projects, providing high level access to the API for common project management tasks.'
),
long_description=long_description,
long_description_content_type='text/markdown',
version='1.6.1',
packages=find_packages(),
include_package_data=True,
install_requires=[
'requests>=2.4.2,<2.29',
'future>=0.16,<0.19',
'python-magic>=0.4,<0.5',
'redo>=1.7',
'six>=1.9',
],
extras_require={
'testing': [
'mock>=2.0,<5.2',
],
'docs': [
'sphinx',
],
':python_version == "2.7"': ['futures'],
}
)
| 1,147 | 30.888889 | 236 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/inaturalist.py
|
from panoptes_client.panoptes import Panoptes
class Inaturalist(object):
"""
The class that interacts with the Panoptes' iNaturalist functionality.
Currently, this includes a single route that allows the importing of
iNaturalist Observations as Zooniverse Subjects.
"""
def inat_import(
taxon_id,
subject_set_id,
updated_since=None
):
"""
Begins an import of iNaturalist Observations as Zooniverse Subjects.
Response is a 200 if Panoptes begins the import successfully.
Requires owner or collaborator access to the subject set's linked project.
Takes three arguments:
taxon_id: the iNat taxon ID of a particular species
subject_set_id: the Zoo subject set id subjects should be imported into.
Updated observations will upsert their respective subjects.
updated_since: a date range limiter on the iNat Observations query.
Warning: defaults to None and will import ALL Observations
by default. This will likely be a lot and take a while.
Examples::
# Import gray squirrel observations updated during or after Halloween 2022 to subject set id 3:
Inaturalist.inat_import(46017, 3, '2022-10-31')
# Import all royal flycatcher observations to subject set id 4:
Inaturalist.inat_import(16462, 4)
"""
return Panoptes.client().post(
f'/inaturalist/import',
json={
'taxon_id': taxon_id,
'subject_set_id': subject_set_id,
'updated_since': updated_since
}
)
| 1,734 | 39.348837 | 107 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/user.py
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.utils import isiterable, split
BATCH_SIZE = 50
class User(PanoptesObject):
_api_slug = 'users'
_link_slug = 'users'
_edit_attributes = (
'valid_email',
)
@classmethod
def where(cls, **kwargs):
email = kwargs.get('email')
login = kwargs.get('login')
if email and login:
raise ValueError(
'Queries are supported on at most ONE of email and login'
)
# This is a workaround for
# https://github.com/zooniverse/Panoptes/issues/2733
kwargs['page_size'] = BATCH_SIZE
if email:
if not isiterable(email):
email = [email]
for batch in split(email, BATCH_SIZE):
kwargs['email'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
elif login:
if not isiterable(login):
login = [login]
for batch in split(login, BATCH_SIZE):
kwargs['login'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
else:
for user in super(User, cls).where(**kwargs):
yield user
@property
def avatar(self):
"""
A dict containing metadata about the user's avatar.
"""
return User.http_get('{}/avatar'.format(self.id))[0]
LinkResolver.register(User)
LinkResolver.register(User, 'owner')
| 1,655 | 26.147541 | 73 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/subject_workflow_status.py
|
from panoptes_client.panoptes import PanoptesObject
class SubjectWorkflowStatus(PanoptesObject):
"""
Retrieve SubjectWorkflowStatus responses from Panoptes i.e. the retirement
status (current state, retirement date, retirement reason) of a
subject/workflow pair.
Example use:
Get the status of a given subject:
subject_workflow_status = next(
SubjectWorkflowStatus.where(subject_id='30089908')
)
The .where(kwargs) method works with:
- id (i.e. the id of the SubjectWorkflowStatus, which is *not* the same as
the subject_id)
- subject_id
- workflow_id
Remember that one subject may be classified on many workflows, and hence
may have many SubjectWorkflowStatus' (one per subject/workflow pair).
"""
_api_slug = 'subject_workflow_statuses'
_edit_attributes = {}
| 861 | 29.785714 | 78 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/classification.py
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import LinkResolver, PanoptesObject
class Classification(PanoptesObject):
_api_slug = 'classifications'
_link_slug = 'classification'
_edit_attributes = ( )
@classmethod
def where(cls, **kwargs):
"""
where(scope=None, **kwargs)
Like :py:meth:`.PanoptesObject.where`, but also allows setting the
query scope.
- **scope** can be any of the values given in the `Classification
Collection API documentation <http://docs.panoptes.apiary.io/#reference/classification/classification/list-all-classifications>`_
without the leading slash.
Examples::
my_classifications = Classification.where()
my_proj_123_classifications = Classification.where(project_id=123)
all_proj_123_classifications = Classification.where(
scope='project',
project_id=123,
)
"""
scope = kwargs.pop('scope', None)
if not scope:
return super(Classification, cls).where(**kwargs)
return cls.paginated_results(*cls.http_get(scope, params=kwargs))
LinkResolver.register(Classification)
| 1,265 | 30.65 | 139 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/project_role.py
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
class ProjectRole(PanoptesObject):
_api_slug = 'project_roles'
_link_slug = 'project_roles'
_edit_attributes = ()
LinkResolver.register(ProjectRole)
| 296 | 23.75 | 65 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/subject.py
|
from __future__ import absolute_import, division, print_function
from panoptes_client.subject_workflow_status import SubjectWorkflowStatus
_OLD_STR_TYPES = (str,)
try:
_OLD_STR_TYPES = _OLD_STR_TYPES + (unicode,)
except NameError:
pass
from builtins import range, str
import logging
import requests
import threading
import time
from copy import deepcopy
from concurrent.futures import ThreadPoolExecutor
try:
import magic
MEDIA_TYPE_DETECTION = 'magic'
except ImportError:
import pkg_resources
try:
pkg_resources.require("python-magic")
logging.getLogger('panoptes_client').warn(
'Broken libmagic installation detected. The python-magic module is'
' installed but can\'t be imported. Please check that both '
'python-magic and the libmagic shared library are installed '
'correctly. Uploading media other than images may not work.'
)
except pkg_resources.DistributionNotFound:
pass
import imghdr
MEDIA_TYPE_DETECTION = 'imghdr'
from panoptes_client.panoptes import (
LinkResolver,
Panoptes,
PanoptesAPIException,
PanoptesObject,
)
from redo import retry
UPLOAD_RETRY_LIMIT = 5
RETRY_BACKOFF_INTERVAL = 5
ASYNC_SAVE_THREADS = 5
class Subject(PanoptesObject):
_api_slug = 'subjects'
_link_slug = 'subjects'
_edit_attributes = (
'locations',
'metadata',
{
'links': (
'project',
),
},
)
_local = threading.local()
@classmethod
def async_saves(cls):
"""
Returns a context manager to allow asynchronously creating subjects.
Using this context manager will create a pool of threads which will
create multiple subjects at once and upload any local files
simultaneously.
The recommended way to use this is with the `with` statement::
with Subject.async_saves():
local_files = [...]
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
Alternatively, you can manually shut down the thread pool::
pool = Subject.async_saves()
local_files = [...]
try:
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
finally:
pool.shutdown()
"""
cls._local.save_exec = ThreadPoolExecutor(
max_workers=ASYNC_SAVE_THREADS
)
return cls._local.save_exec
def __init__(self, raw={}, etag=None):
super(Subject, self).__init__(raw, etag)
if not self.locations:
self.locations = []
if not self.metadata:
self.metadata = {}
self._original_metadata = {}
self._media_files = [None] * len(self.locations)
def save(self, client=None):
"""
Like :py:meth:`.PanoptesObject.save`, but also uploads any local files
which have previosly been added to the subject with
:py:meth:`add_location`. Automatically retries uploads on error.
If multiple local files are to be uploaded, several files will be
uploaded simultaneously to save time.
"""
if not client:
client = Panoptes.client()
async_save = hasattr(self._local, 'save_exec')
with client:
if async_save:
try:
# The recursive call will exec in a new thread, so
# self._local.save_exec will be undefined above
self._async_future = self._local.save_exec.submit(
self.save,
client=client,
)
return
except RuntimeError:
del self._local.save_exec
async_save = False
if not self.metadata == self._original_metadata:
self.modified_attributes.add('metadata')
response = retry(
super(Subject, self).save,
attempts=UPLOAD_RETRY_LIMIT,
sleeptime=RETRY_BACKOFF_INTERVAL,
retry_exceptions=(PanoptesAPIException,),
log_args=False,
)
if not response:
return
try:
if async_save:
upload_exec = self._local.save_exec
else:
upload_exec = ThreadPoolExecutor(
max_workers=ASYNC_SAVE_THREADS,
)
for location, media_data in zip(
response['subjects'][0]['locations'],
self._media_files
):
if not media_data:
continue
for media_type, url in location.items():
upload_exec.submit(
retry,
self._upload_media,
args=(url, media_data, media_type),
attempts=UPLOAD_RETRY_LIMIT,
sleeptime=RETRY_BACKOFF_INTERVAL,
retry_exceptions=(
requests.exceptions.RequestException,
),
log_args=False,
)
self._media_files = [None] * len(self.locations)
finally:
if not async_save:
upload_exec.shutdown()
def _upload_media(self, url, media_data, media_type):
upload_response = requests.put(
url,
headers={
'Content-Type': media_type,
'x-ms-blob-type': 'BlockBlob',
},
data=media_data,
)
upload_response.raise_for_status()
return upload_response
@property
def async_save_result(self):
"""
Retrieves the result of this subject's asynchronous save.
- Returns `True` if the subject was saved successfully.
- Raises `concurrent.futures.CancelledError` if the save was cancelled.
- If the save failed, raises the relevant exception.
- Returns `False` if the subject hasn't finished saving or if the
subject has not been queued for asynchronous save.
"""
if hasattr(self, "_async_future") and self._async_future.done():
self._async_future.result()
return True
else:
return False
def set_raw(self, raw, etag=None, loaded=True):
super(Subject, self).set_raw(raw, etag, loaded)
if loaded and self.metadata:
self._original_metadata = deepcopy(self.metadata)
elif loaded:
self._original_metadata = None
def subject_workflow_status(self, workflow_id):
"""
Returns SubjectWorkflowStatus of Subject in Workflow
Example::
subject.subject_workflow_status(4321)
"""
return next(SubjectWorkflowStatus.where(subject_id=self.id, workflow_id=workflow_id))
def add_location(self, location):
"""
Add a media location to this subject.
- **location** can be an open :py:class:`file` object, a path to a
local file, or a :py:class:`dict` containing MIME types and URLs for
remote media.
Examples::
subject.add_location(my_file)
subject.add_location('/data/image.jpg')
subject.add_location({'image/png': 'https://example.com/image.png'})
"""
if type(location) is dict:
self.locations.append(location)
self._media_files.append(None)
self.modified_attributes.add('locations')
return
elif type(location) in (str,) + _OLD_STR_TYPES:
f = open(location, 'rb')
else:
f = location
try:
media_data = f.read()
if MEDIA_TYPE_DETECTION == 'magic':
media_type = magic.from_buffer(media_data, mime=True)
else:
media_type = imghdr.what(None, media_data)
if not media_type:
raise UnknownMediaException(
'Could not detect file type. Please try installing '
'libmagic: https://panoptes-python-client.readthedocs.'
'io/en/latest/user_guide.html#uploading-non-image-'
'media-types'
)
media_type = 'image/{}'.format(media_type)
self.locations.append(media_type)
self._media_files.append(media_data)
self.modified_attributes.add('locations')
finally:
f.close()
class UnknownMediaException(Exception):
pass
LinkResolver.register(Subject)
LinkResolver.register(Subject, 'subject')
| 9,185 | 31.574468 | 93 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/set_member_subject.py
|
from panoptes_client.panoptes import PanoptesObject, LinkResolver
class SetMemberSubject(PanoptesObject):
_api_slug = 'set_member_subjects'
_link_slug = 'set_member_subjects'
_edit_attributes = ()
LinkResolver.register(SetMemberSubject)
LinkResolver.register(SetMemberSubject, 'set_member_subject')
| 314 | 27.636364 | 65 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/project.py
|
from __future__ import absolute_import, division, print_function
from copy import deepcopy
from panoptes_client.panoptes import (
LinkCollection,
LinkResolver,
PanoptesAPIException,
PanoptesObject,
)
from panoptes_client.project_role import ProjectRole
from panoptes_client.exportable import Exportable
from panoptes_client.utils import batchable
class ProjectLinkCollection(LinkCollection):
def add(self, objs):
from panoptes_client.workflow import Workflow
from panoptes_client.subject_set import SubjectSet
result = super(ProjectLinkCollection, self).add(objs)
# Some classes are copied into the project as new objects
# So we reload to pick those up.
if self._cls in (SubjectSet, Workflow):
self._parent.reload()
return result
class Project(PanoptesObject, Exportable):
_api_slug = 'projects'
_link_slug = 'project'
_edit_attributes = (
'display_name',
'description',
'tags',
'introduction',
'private',
'primary_language',
'configuration',
)
_link_collection = ProjectLinkCollection
def __init__(self, raw={}, etag=None):
super(Project, self).__init__(raw, etag)
if not self.configuration:
self.configuration = {}
self._original_configuration = {}
def set_raw(self, raw, etag=None, loaded=True):
super(Project, self).set_raw(raw, etag, loaded)
if loaded and self.configuration:
self._original_configuration = deepcopy(self.configuration)
elif loaded:
self._original_configuration = None
def save(self):
"""
Adds project configuration to the list of savable attributes
if it has changed.
"""
if not self.configuration == self._original_configuration:
self.modified_attributes.add('configuration')
super(Project, self).save()
@classmethod
def find(cls, id='', slug=None):
"""
Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug
as well as ID.
Examples::
project_1234 = Project.find(1234)
galaxy_zoo = Project.find(slug="zooniverse/galaxy-zoo")
"""
if not id and not slug:
return None
try:
return cls.where(id=id, slug=slug).next()
except StopIteration:
raise PanoptesAPIException(
"Could not find project with slug='{}'".format(slug)
)
def collaborators(self, *roles):
"""
Returns a list of :py:class:`.User` who are collaborators on this
project.
Zero or more role arguments can be passed as strings to narrow down the
results. If any roles are given, users who possess at least one of the
given roles are returned.
Examples::
all_collabs = project.collaborators()
moderators = project.collaborators("moderators")
moderators_and_translators = project.collaborators(
"moderators",
"translators",
)
"""
return [
r.links.owner for r in ProjectRole.where(project_id=self.id)
if len(roles) == 0 or len(set(roles) & set(r.roles)) > 0
]
@batchable
def _add_links(self, linked_objects, link_type):
object_ids = []
for linked_object in linked_objects:
if hasattr(linked_object, 'id'):
object_ids.append(linked_object.id)
else:
object_ids.append(str(linked_object))
self.http_post(
'{}/links/{}'.format(self.id, link_type),
json={
link_type: object_ids
}
)
def add_subject_sets(self, subject_sets):
"""
Links the given subject sets to this project. New subject sets are
created as copies of the given sets.
- **subject_sets** can be a list of :py:class:`.SubjectSet`
instances, a list of subject set IDs, a single
:py:class:`.SubjectSet` instance, or a single subject set ID.
Examples::
project.add_subject_sets(1234)
project.add_subject_sets([1,2,3,4])
project.add_subject_sets(SubjectSet(1234))
project.add_subject_sets([SubjectSet(12), SubjectSet(34)])
"""
return self._add_links(
subject_sets,
'subject_sets',
)
def add_workflows(self, workflows):
"""
Links the given workflows to this project. New workflows are
created as copies of the given workflows.
- **workflows** can be a list of :py:class:`.Workflow` instances,
a list of workflow IDs, a single :py:class:`.Workflow`
instance, or a single workflow ID.
Examples::
project.add_workflows(1234)
project.add_workflows([1,2,3,4])
project.add_workflows(Workflow(1234))
project.add_workflows([Workflow(12), Workflow(34)])
"""
return self._add_links(
workflows,
'workflows',
)
@property
def avatar(self):
"""
A dict containing metadata about the project's avatar.
"""
return self.http_get('{}/avatar'.format(self.id))[0]
@property
def attached_images(self):
return self.http_get('{}/attached_images'.format(self.id))[0]
def add_attached_image(
self,
src,
content_type='image/png',
external_link=True,
metadata={},
):
return self.http_post(
'{}/attached_images'.format(self.id),
json={'media': {
'src': src,
'content_type': content_type,
'external_link': external_link,
'metadata': metadata,
}},
)
def copy(self, new_subject_set_name=None):
"""
Copy this project to a new project that will be owned by the
currently authenticated user.
A new_subject_set_name string argument can be passed which will be
used to name a new SubjectSet for the copied project.
This is useful for having an upload target straight after cloning.
Examples::
project.copy()
project.copy("My new subject set for uploading")
"""
payload = {}
if new_subject_set_name:
payload['create_subject_set'] = new_subject_set_name
response = self.http_post(
'{}/copy'.format(self.id),
json=payload,
)
# find the API resource response in the response tuple
resource_response = response[0]
# save the etag from the copied project response
etag = response[1]
# extract the raw copied project resource response
raw_resource_response = resource_response[self._api_slug][0]
# convert it into a new project model representation
# ensure we provide the etag - without it the resource won't be savable
copied_project = Project(raw_resource_response, etag)
return copied_project
LinkResolver.register(Project)
LinkResolver.register(Project, 'projects')
| 7,315 | 29.739496 | 79 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/utils.py
|
from __future__ import absolute_import, division, print_function
from builtins import range
import functools
ITERABLE_TYPES = (
list,
set,
tuple,
)
MISSING_POSITIONAL_ERR = 'Required positional argument (pos 1) not found'
try:
from numpy import ndarray
ITERABLE_TYPES = ITERABLE_TYPES + (ndarray,)
except ImportError:
pass
def isiterable(v):
return isinstance(v, ITERABLE_TYPES)
def split(to_batch, batch_size):
if type(to_batch) == set:
to_batch = tuple(to_batch)
for batch in [
to_batch[i:i + batch_size]
for i in range(0, len(to_batch), batch_size)
]:
yield batch
def batchable(func=None, batch_size=100):
@functools.wraps(func)
def do_batch(*args, **kwargs):
if len(args) <= 1:
raise TypeError(MISSING_POSITIONAL_ERR)
_batch_size = kwargs.pop('batch_size', batch_size)
_self = args[0]
to_batch = args[1]
args = args[2:]
if not isiterable(to_batch):
to_batch = [to_batch]
if isinstance(to_batch, set):
to_batch = list(to_batch)
for batch in split(to_batch, _batch_size):
if _self is None:
func(batch, *args, **kwargs)
else:
func(_self, batch, *args, **kwargs)
# This avoids us having to call batchable wherever it's used, so we can
# just write:
# @batchable
# def func(self, ...):
#
# Rather than:
# @batchable()
# def func(self, ...):
#
# While still allowing this:
# @batchable(batch_size=10)
# def func(self, ...):
if func is None:
return functools.partial(batchable, batch_size=batch_size)
return do_batch
| 1,742 | 22.554054 | 75 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/project_preferences.py
|
from __future__ import absolute_import, division, print_function
from builtins import str
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.project import Project
from panoptes_client.user import User
class ProjectPreferences(PanoptesObject):
"""
Contains the settings for a :py:class:`.User` on a :py:class:`.Project`.
"""
_api_slug = 'project_preferences'
_link_slug = 'project_preferences'
_edit_attributes = (
'preferences',
'settings',
)
@classmethod
def find(cls, id='', user=None, project=None):
"""
Like :py:meth:`.PanoptesObject.find` but can also query by user and
project.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
"""
if not id:
if not (user and project):
raise ValueError('Both user and project required')
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
id = cls.where(user_id=_user_id, project_id=_project_id).next().id
return super(ProjectPreferences, cls).find(id)
@classmethod
def save_settings(cls, project=None, user=None, settings=None):
"""
Save settings for a user without first fetching their preferences.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
- **settings** is a :py:class:`dict` containing the settings to be
saved.
"""
if (isinstance(settings, dict)):
_to_update = settings
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
cls.http_post(
'update_settings',
json={
'project_preferences': {
'user_id': _user_id,
'project_id': _project_id,
'settings': _to_update,
}
}
)
else:
raise TypeError
LinkResolver.register(ProjectPreferences)
| 3,085 | 32.182796 | 78 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/workflow_version.py
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import (
Panoptes,
PanoptesAPIException,
PanoptesObject,
)
from panoptes_client.workflow import Workflow
class WorkflowVersion(PanoptesObject):
_api_slug = 'versions'
_edit_attributes = tuple()
@classmethod
def http_get(cls, path, params={}, headers={}):
workflow = params.pop('workflow')
return Panoptes.client().get(
Workflow.url(workflow.id) + cls.url(path),
params,
headers,
)
@classmethod
def find(cls, _id, workflow):
"""
Like :py:meth:`.PanoptesObject.find` but also allows lookup by
workflow.
- **workflow** must be a :py:class:`.Workflow` instance.
"""
try:
return cls.where(id=_id, workflow=workflow).next()
except StopIteration:
raise PanoptesAPIException(
"Could not find {} with id='{}'".format(cls.__name__, _id)
)
def save(self):
"""
Not implemented for this class. It is not possible to modify workflow
versions once they are created.
"""
raise NotImplementedError(
'It is not possible to manually create workflow versions. '
'Modify the workflow instead.'
)
@property
def workflow(self):
"""
The :py:class:`.Workflow` to which this version refers.
"""
return self.links.item
| 1,518 | 25.189655 | 77 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/collection_role.py
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
class CollectionRole(PanoptesObject):
_api_slug = 'collection_roles'
_link_slug = 'collection_roles'
_edit_attributes = (
'roles',
{
'links': (
'collection',
'user',
),
},
)
LinkResolver.register(CollectionRole)
| 444 | 20.190476 | 65 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/collection.py
|
from __future__ import absolute_import, division, print_function
from builtins import str
from panoptes_client.panoptes import (
PanoptesAPIException,
PanoptesObject,
)
from panoptes_client.subject import Subject
from panoptes_client.utils import batchable
class Collection(PanoptesObject):
_api_slug = 'collections'
_link_slug = 'collections'
_edit_attributes = (
'name',
'description',
'display_name',
'private',
{
'links': (
'project',
),
},
)
@classmethod
def find(cls, id='', slug=None):
"""
Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug
as well as ID.
Examples::
collection_1234 = Collection.find(1234)
my_collection = Collection.find(slug="example/my-collection")
"""
if not id and not slug:
return None
try:
return cls.where(id=id, slug=slug).next()
except StopIteration:
raise PanoptesAPIException(
"Could not find collection with slug='{}'".format(slug)
)
@property
def subjects(self):
"""
A generator which yields each :py:class:`.Subject` in this collection.
"""
return self.links.subjects
def add(self, subjects):
"""
A wrapper around :py:meth:`.LinkCollection.add`. Equivalent to::
collection.links.add(subjects)
"""
return self.links.subjects.add(subjects)
def remove(self, subjects):
"""
A wrapper around :py:meth:`.LinkCollection.remove`. Equivalent to::
collection.links.remove(subjects)
"""
return self.links.subjects.remove(subjects)
def set_default_subject(self, subject):
"""
Sets the subject's location media URL as a link.
It displays as the default subject on PFE.
- **subject** can be a single :py:class:`.Subject` instance or a single
subject ID.
Examples::
collection.set_default_subject(1234)
collection.set_default_subject(Subject(1234))
"""
if not (
isinstance(subject, Subject)
or isinstance(subject, (int, str,))
):
raise TypeError
if isinstance(subject, Subject):
_subject_id = subject.id
else:
_subject_id = str(subject)
self.http_post(
'{}/links/default_subject'.format(self.id),
json={'default_subject': _subject_id},
)
| 2,627 | 25.019802 | 79 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/caesar.py
|
from panoptes_client.panoptes import Panoptes, PanoptesAPIException
class Caesar(object):
"""
The low-level Caesar HTTP client class. Use this class to interact with the
Caesar API. User credentials are shared with Panoptes, so log in via
:py:meth:`.Panoptes.connect` before use.
"""
EXTRACTOR_TYPES = ['blank', 'external', 'question', 'survey', 'who', 'pluck_field', 'shape']
REDUCER_TYPES = [
'consensus', 'count', 'placeholder', 'external', 'first_extract', 'stats',
'unique_count', 'rectangle', 'sqs'
]
RULE_TO_ACTION_TYPES = {
'subject': ['retire_subject', 'add_subject_to_set', 'add_to_collection', 'external', 'external_with_basic_auth'],
'user': ['promote_user']
}
def __init__(
self,
endpoint='https://caesar.zooniverse.org',
redirect_url='https://caesar.zooniverse.org/auth/zooniverse/callback'
):
self.endpoint = endpoint
self.headers = {
'Accept': 'application/json'
}
def http_get(self, *args, **kwargs):
kwargs['endpoint'] = self.endpoint
kwargs['headers'] = self.headers
return Panoptes.client().get(*args, **kwargs)
def http_post(self, *args, **kwargs):
kwargs['endpoint'] = self.endpoint
kwargs['headers'] = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
return Panoptes.client().post(*args, **kwargs)
def http_put(self, *args, **kwargs):
kwargs['endpoint'] = self.endpoint
kwargs['headers'] = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
return Panoptes.client().put(*args, **kwargs)
def http_delete(self, *args, **kwargs):
kwargs['endpoint'] = self.endpoint
return Panoptes.client().delete(*args, **kwargs)
def get_workflow(self, workflow_id):
"""
Returns workflow object if exists in Caesar
"""
return self.http_get(f'workflows/{workflow_id}')[0]
def get_reductions_by_workflow_and_subject(self, workflow_id, subject_id):
"""
Returns a list of all subject reductions as dicts from Caesar given the ids of the workflow and subject.
"""
return self.http_get(f'workflows/{workflow_id}/subjects/{subject_id}/reductions')[0]
def get_workflow_extractors(self, workflow_id):
"""
Returns a list of extractors as dicts from Caesar for workflow with provided workflow_id
"""
return self.http_get(f'workflows/{workflow_id}/extractors')[0]
def get_workflow_reducers(self, workflow_id):
"""
Returns a list of reducers as dicts from Caesar for workflow with provided workflow_id
"""
return self.http_get(f'workflows/{workflow_id}/reducers')[0]
def get_extracts_by_workflow_and_subject(self, workflow_id, subject_id):
"""
Returns a list of extracts as dicts from Caesar for workflow with provided workflow_id
"""
return self.http_get(
f'workflows/{workflow_id}/extractors/extractor/extracts', params={'subject_id': subject_id})[0]
def save_workflow(self, workflow_id, public_extracts=False, public_reductions=False):
"""
Adds/updates workflow with provided workflow_id to Caesar. Checks to see if workflow exists in Caesar, if not
then creates workflow and returns workflow as a dict from Caesar if created.
If workflow is already in Caesar, will update the Caesar workflow.
Examples::
Caesar().save_workflow(123, public_extracts=True, public_reductions=True)
"""
try:
self.get_workflow(workflow_id)
except PanoptesAPIException as err:
if "couldn't find workflow with 'id'" in str(err).lower():
return self.http_post('workflows', json={
'workflow': {
'id': workflow_id,
'public_extracts': public_extracts,
'public_reductions': public_reductions
}
})[0]
else:
raise err
else:
return self.http_put(f'workflows/{workflow_id}', json={
'workflow': {
'id': workflow_id,
'public_extracts': public_extracts,
'public_reductions': public_reductions
}
})[0]
def create_workflow_extractor(self, workflow_id, extractor_key,
extractor_type, task_key='T0', other_extractor_attributes=None):
"""
Adds a Caesar extractor for workflow with id workflow_id. Will return extractor as a dict with 'id' if success.
- **extractor_type** can be one of the following: 'blank', 'external', 'question', 'survey', 'who', 'pluck_field', or 'shape'
- **extractor_key** is the unique key that you want to give to the extractor. The key will be used to track this specific reducer within Caesar.
Examples::
Caesar().create_workflow_extractor(12, 'question', 'complete', 'T0', {'if_missing': ignore })
"""
self.validate_extractor_type(extractor_type)
if other_extractor_attributes is None:
other_extractor_attributes = {}
payload = {
'extractor': {
'type': extractor_type,
'key': extractor_key,
'task_key': task_key,
**other_extractor_attributes
}
}
return self.http_post(f'workflows/{workflow_id}/extractors', json=payload)[0]
def create_workflow_reducer(self, workflow_id, reducer_type, key, other_reducer_attributes=None):
"""
Adds a Caesar reducer for given workflow. Will return reducer as dict with 'id' if successful.
- **reducer_type** can be one of the following:
'consensus', 'count', 'placeholder', 'external', 'first_extract',
'stats', 'unique_count', 'rectangle', 'sqs'
- **key** is a unique name for your reducer. This key will be used to track this specific reducer within Caesar.
Examples::
Caesar().create_workflow_reducer(1234, 'count', 'count', {'filters' : {'extractor_keys': ['complete']}})
"""
self.validate_reducer_type(reducer_type)
if other_reducer_attributes is None:
other_reducer_attributes = {}
payload = {
'reducer': {
'type': reducer_type,
'key': key,
**other_reducer_attributes
}
}
return self.http_post(f'workflows/{workflow_id}/reducers', json=payload)[0]
def create_workflow_rule(self, workflow_id, rule_type, condition_string='[]'):
"""
Adds a Caesar rule for given workflow. Will return rule as a dict with 'id' if successful.
- **condition_string** is a string that represents a single operation (sometimes nested).
The general syntax is like if you'd write Lisp in json.
It is a stringified array with the first item being a string identifying the operator.
See for examples of condition strings https://zooniverse.github.io/caesar/#rules
- **rule_type** can either be 'subject' or 'user'
Examples::
caesar = Caesar()
workflow = Workflow(1234)
caesar.create_workflow_rule(workflow.id, 'subject','["gte", ["lookup", "complete.0", 0], ["const", 3]]')
"""
self.validate_rule_type(rule_type)
payload = {
f'{rule_type}_rule': {
'condition_string': condition_string
}
}
return self.http_post(f'workflows/{workflow_id}/{rule_type}_rules', json=payload)[0]
def create_workflow_rule_effect(self, workflow_id, rule_type, rule_id, action, config=None):
"""
Adds a Caesar effect for workflow with id `workflow_id` and rule with id `rule_id`.
Method will return effect as a dict with 'id' if successful.
- **rule_type** can either be 'subject' or 'user'
- **rule_id** is the id of the subject rule or user rule that the effect should run
- **action** can be one of the following:
- **(actions for subject rules)** - 'retire_subject', 'add_subject_to_set', 'add_to_collection', 'external'
- **(actions for user rules)** - 'promote_user'
Examples::
retirement_config = {'reason': 'classification_count'}
Caesar().create_workflow_rule_effect(1, 'subject', subject_rule['id'], 'retire_subject', retirement_config)
"""
self.validate_rule_type(rule_type)
self.validate_action(rule_type, action)
if config is None:
config = {}
payload = {
f'{rule_type}_rule_effect': {
'action': action,
'config': config
}
}
request_url = f'workflows/{workflow_id}/{rule_type}_rules/{rule_id}/{rule_type}_rule_effects'
return self.http_post(request_url, json=payload)[0]
def import_data_extracts(self, workflow_id, csv_source):
"""
Imports machine-learnt data extracts into Caesar.
- **csv_source** must be a publicly accessible csv at the time of import.
Eg. csv can be hosted via an AWS S3 Bucket, Azure Blob Storage, or Panoptes media item.
See `this csv <https://panoptes-uploads-staging.zooniverse.org/project_attached_image/f1ab241f-2896-4efc-a1bc-3baaff64d783.csv>`_ as an example.
`csv_source`'s csv must have header/titles/rows of the following:
- `extractor_key` (key corresponding to the extractor in Caesar)
- `subject_id`
- `data` (the machine learnt data for the corresponding subject). This entry should be JSON.
Example::
caesar = Caesar(endpoint='https://caesar-staging.zooniverse.org')
caesar.import_data_extracts(1234, 'https://panoptes-uploads-staging.zooniverse.org/project_attached_image/f1ab241f-2896-4efc-a1bc-3baaff64d783.csv')
"""
return self.http_post(f'workflows/{workflow_id}/extracts/import', json={'file': csv_source})
def validate_rule_type(self, rule_type):
if rule_type not in self.RULE_TO_ACTION_TYPES.keys():
raise ValueError(f'Invalid rule type: {rule_type}. Rule types can either be by "subject" or "user"')
def validate_reducer_type(self, reducer_type):
if reducer_type not in self.REDUCER_TYPES:
raise ValueError('Invalid reducer type')
def validate_extractor_type(self, extractor_type):
if extractor_type not in self.EXTRACTOR_TYPES:
raise ValueError('Invalid extractor type')
def validate_action(self, rule_type, action):
if action not in self.RULE_TO_ACTION_TYPES[rule_type]:
raise ValueError('Invalid action for rule type')
| 11,043 | 40.675472 | 160 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/exportable.py
|
from __future__ import absolute_import, division, print_function
import csv
import datetime
import functools
import time
import requests
from panoptes_client.panoptes import (
PanoptesAPIException,
Talk,
)
TALK_EXPORT_TYPES = (
'talk_comments',
'talk_tags',
)
talk = Talk()
class Exportable(object):
"""
Abstract class containing methods for generating and downloading data
exports.
"""
def get_export(
self,
export_type,
generate=False,
wait=False,
wait_timeout=None,
):
"""
Downloads a data export over HTTP. Returns a `Requests Response
<http://docs.python-requests.org/en/master/api/#requests.Response>`_
object containing the content of the export.
- **export_type** is a string specifying which type of export should be
downloaded.
- **generate** is a boolean specifying whether to generate a new export
and wait for it to be ready, or to just download the latest export.
- **wait** is a boolean specifying whether to wait for an in-progress
export to finish, if there is one. Has no effect if ``generate`` is
``True``.
- **wait_timeout** is the number of seconds to wait if ``wait`` is
``True``. Has no effect if ``wait`` is ``False`` or if ``generate``
is ``True``.
The returned :py:class:`.Response` object has two additional attributes
as a convenience for working with the CSV content; **csv_reader** and
**csv_dictreader**, which are wrappers for :py:meth:`.csv.reader`
and :py:class:`csv.DictReader` respectively. These wrappers take care
of correctly decoding the export content for the CSV parser.
Example::
classification_export = Project(1234).get_export('classifications')
for row in classification_export.csv_reader():
print(row)
classification_export = Project(1234).get_export('classifications')
for row in classification_export.csv_dictreader():
print(row)
"""
if generate:
self.generate_export(export_type)
if generate or wait:
export = self.wait_export(export_type, wait_timeout)
else:
export = self.describe_export(export_type)
if export_type in TALK_EXPORT_TYPES:
media_url = export['data_requests'][0]['url']
else:
media_url = export['media'][0]['src']
response = requests.get(media_url, stream=True)
response.csv_reader = functools.partial(
csv.reader,
response.iter_lines(decode_unicode=True),
)
response.csv_dictreader = functools.partial(
csv.DictReader,
response.iter_lines(decode_unicode=True),
)
return response
def wait_export(
self,
export_type,
timeout=None,
):
"""
Blocks until an in-progress export is ready.
- **export_type** is a string specifying which type of export to wait
for.
- **timeout** is the maximum number of seconds to wait.
If ``timeout`` is given and the export is not ready by the time limit,
:py:class:`.PanoptesAPIException` is raised.
"""
success = False
if timeout:
end_time = datetime.datetime.now() + datetime.timedelta(
seconds=timeout
)
while (not timeout) or (datetime.datetime.now() < end_time):
export_description = self.describe_export(
export_type,
)
if export_type in TALK_EXPORT_TYPES:
export_metadata = export_description['data_requests'][0]
else:
export_metadata = export_description['media'][0]['metadata']
if export_metadata.get('state', '') in ('ready', 'finished'):
success = True
break
time.sleep(2)
if not success:
raise PanoptesAPIException(
'{}_export not ready within {} seconds'.format(
export_type,
timeout
)
)
return export_description
def generate_export(self, export_type):
"""
Start a new export.
- **export_type** is a string specifying which type of export to start.
Returns a :py:class:`dict` containing metadata for the new export.
"""
if export_type in TALK_EXPORT_TYPES:
return talk.post_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)
return self.http_post(
self._export_path(export_type),
json={"media": {"content_type": "text/csv"}},
)[0]
def describe_export(self, export_type):
"""
Fetch metadata for an export.
- **export_type** is a string specifying which type of export to look
up.
Returns a :py:class:`dict` containing metadata for the export.
"""
if export_type in TALK_EXPORT_TYPES:
return talk.get_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)[0]
return self.http_get(
self._export_path(export_type),
)[0]
def _export_path(self, export_type):
return '{}/{}_export'.format(self.id, export_type)
| 5,571 | 29.448087 | 79 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/__init__.py
|
from panoptes_client.classification import Classification
from panoptes_client.collection import Collection
from panoptes_client.collection_role import CollectionRole
from panoptes_client.organization import Organization
from panoptes_client.panoptes import Panoptes
from panoptes_client.project import Project
from panoptes_client.project_preferences import ProjectPreferences
from panoptes_client.project_role import ProjectRole
from panoptes_client.subject import Subject
from panoptes_client.subject_set import SubjectSet
from panoptes_client.user import User
from panoptes_client.workflow import Workflow
from panoptes_client.subject_workflow_status import SubjectWorkflowStatus
from panoptes_client.caesar import Caesar
from panoptes_client.inaturalist import Inaturalist
| 778 | 47.6875 | 73 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/workflow.py
|
from __future__ import absolute_import, division, print_function
from builtins import str
from copy import deepcopy
from panoptes_client.set_member_subject import SetMemberSubject
from panoptes_client.subject_workflow_status import SubjectWorkflowStatus
from panoptes_client.exportable import Exportable
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.subject import Subject
from panoptes_client.subject_set import SubjectSet
from panoptes_client.utils import batchable
from panoptes_client.caesar import Caesar
class Workflow(PanoptesObject, Exportable):
_api_slug = 'workflows'
_link_slug = 'workflows'
_edit_attributes = (
'active',
'configuration',
'display_name',
'first_task',
'mobile_friendly',
'primary_language',
'retirement',
'tasks',
{
'links': (
'project',
)
},
)
def __init__(self, raw={}, etag=None):
super(Workflow, self).__init__(raw, etag)
if not self.configuration:
self.configuration = {}
self._original_configuration = {}
if not self.retirement:
self.retirement = {}
self._original_retirement = {}
if not self.tasks:
self.tasks = {}
self._original_tasks = {}
def set_raw(self, raw, etag=None, loaded=True):
super(Workflow, self).set_raw(raw, etag, loaded)
if loaded:
if self.configuration:
self._original_configuration = deepcopy(self.configuration)
if self.retirement:
self._original_retirement = deepcopy(self.retirement)
if self.tasks:
self._original_tasks = deepcopy(self.tasks)
elif loaded:
self._original_configuration = None
self._original_retirement = None
self._original_tasks = None
def save(self):
"""
Adds workflow configuration, retirement, and tasks dicts to the list of
savable attributes if it has changed.
"""
if not self.configuration == self._original_configuration:
self.modified_attributes.add('configuration')
if not self.retirement == self._original_retirement:
self.modified_attributes.add('retirement')
if not self.tasks == self._original_tasks:
self.modified_attributes.add('tasks')
super(Workflow, self).save()
@batchable
def retire_subjects(self, subjects, reason='other'):
"""
Retires subjects in this workflow.
- **subjects** can be a list of :py:class:`Subject` instances, a list
of subject IDs, a single :py:class:`Subject` instance, or a single
subject ID.
- **reason** gives the reason the :py:class:`Subject` has been retired.
Defaults to **other**.
Examples::
workflow.retire_subjects(1234)
workflow.retire_subjects([1,2,3,4])
workflow.retire_subjects(Subject(1234))
workflow.retire_subjects([Subject(12), Subject(34)])
"""
subjects = [s.id if isinstance(s, Subject) else s for s in subjects]
return Workflow.http_post(
'{}/retired_subjects'.format(self.id),
json={
'subject_ids': subjects,
'retirement_reason': reason
},
)
@batchable
def unretire_subjects(self, subjects):
"""
Un-retires subjects in this workflow by subjects.
- **subjects** can be a list of :py:class:`Subject` instances, a list
of subject IDs, a single :py:class:`Subject` instance, or a single
subject ID.
"""
subjects = [s.id if isinstance(s, Subject) else s for s in subjects]
return Workflow.http_post(
'{}/unretire_subjects'.format(self.id),
json={
'subject_ids': subjects
},
)
@batchable
def unretire_subjects_by_subject_set(self, subject_sets):
"""
Un-retires subjects in this workflow by subject_sets.
- **subjects_sets** can be a list of :py:class:`SubjectSet` instances, a
list of subject_set IDs, a single :py:class:`SubjectSet` instance, or
a single subject_set ID.
"""
subject_sets = [s.id if isinstance(
s, SubjectSet) else s for s in subject_sets]
return Workflow.http_post(
'{}/unretire_subjects'.format(self.id),
json={
'subject_set_ids': subject_sets
},
)
def add_subject_sets(self, subject_sets):
"""
A wrapper around :py:meth:`.LinkCollection.add`. Equivalent to::
workflow.links.subject_sets.add(subject_sets)
"""
return self.links.subject_sets.add(subject_sets)
def remove_subject_sets(self, subject_sets):
"""
A wrapper around :py:meth:`.LinkCollection.remove`. Equivalent to::
workflow.links.subject_sets.remove(subject_sets)
"""
return self.links.subject_sets.remove(subject_sets)
def subject_workflow_status(self, subject_id):
"""
Returns SubjectWorkflowStatus of the current workflow given subject_id
Example::
workflow.subject_workflow_status(1234)
"""
return next(SubjectWorkflowStatus.where(subject_id=subject_id, workflow_id=self.id))
def subject_workflow_statuses(self, subject_set_id):
"""
A generator which yields :py:class:`.SubjectWorkflowStatus` objects for subjects in the
subject set of the given workflow
Examples::
for status in workflow.subject_workflow_statuses(1234):
print(status.retirement_reason)
"""
subject_ids = []
for sms in SetMemberSubject.where(subject_set_id=subject_set_id):
subject_ids.append(sms.links.subject.id)
subject_ids = ','.join(map(str, subject_ids))
for status in SubjectWorkflowStatus.where(subject_ids=subject_ids, workflow_id=self.id):
yield status
""" CAESAR METHODS """
def save_to_caesar(self, public_extracts=False, public_reductions=False):
"""
Adds/updates selected Workflow to Caesar. Returns workflow as a dict from Caesar if created.
- **public_extracts** set to True to Enable Public Extracts, Defaults to False
- **public_reductions** set to True to Enable Public Reductions. Defaults to False
Examples::
workflow.save_to_caesar()
workflow.save_to_caesar(public_extracts=True, public_reductions=True)
"""
return Caesar().save_workflow(self.id, public_extracts, public_reductions)
def caesar_subject_extracts(self, subject_id):
"""
Returns a list of subject extracts as a dict from Caesar for a given subject.
Examples::
workflow.caesar_subject_extracts(1234)
s = Subject(1234)
workflow.caesar_subject_extracts(s.id)
"""
url = f'{self._api_slug}/{self.id}/extractors/all/extracts'
return Caesar().http_get(url, params={'subject_id': subject_id})[0]
def caesar_subject_reductions(self, subject_id, reducer_key=""):
"""
Returns a list of subject reductions as dicts from Caesar for a given subject.
Defaults to return all subject reductions for a given subject.
- **reducer_key** If given, will filter and return reductions for the reducer with that reducer_key.
Examples::
workflow.caesar_subject_reductions(1234)
workflow.caesar_subject_reductions(1234,'points')
"""
url = f'{self._api_slug}/{self.id}/subjects/{subject_id}/reductions'
if reducer_key.strip():
url += f'?reducer_key={reducer_key.strip()}'
return Caesar().http_get(url)[0]
def caesar_extractors(self):
"""
Returns a list of extractors as dicts from Caesar for particular workflow.
Examples::
workflow.caesar_extractors()
"""
return Caesar().http_get(f'{self._api_slug}/{self.id}/extractors')[0]
def caesar_reducers(self):
"""
Returns a list of reducers as dicts from Caesar for particular workflow.
Examples::
workflow.caesar_reducers()
"""
return Caesar().http_get(f'{self._api_slug}/{self.id}/reducers')[0]
def caesar_rules(self, rule_type):
"""
Returns a list of Caesar workflow rules as dicts.
- **rule_type** can either be 'subject' or 'user';
if 'subject' will return subject rules,
if 'user' will return user rules
Examples::
workflow.caesar_rules('subject')
workflow.caesar_rules('user')
"""
return Caesar().http_get(f'{self._api_slug}/{self.id}/{rule_type}_rules')[0]
def caesar_effects(self, rule_type, rule_id):
"""
Returns a list of Caesar workflow effects as dicts for the workflow rule with id `rule_id`.
- **rule_type** can either be 'subject' or 'user';
if 'subject' will return effects of subject rules with id `rule_id`,
if 'user' will return will return effects of user rules with id `rule_id`
Examples::
workflow.caesar_effects('subject', 123)
workflow.caesar_effects('user', 321)
"""
return Caesar().http_get(f'{self._api_slug}/{self.id}/{rule_type}_rules/{rule_id}/{rule_type}_rule_effects')[0]
def add_caesar_extractor(self, extractor_type, extractor_key, task_key='T0', extractor_other_attributes=None):
"""
Adds a Caesar extractor for given workflow. Will return extractor as a dict with 'id' if successful.
- **extractor_type** can be one of the following:
'blank', 'external', 'question', 'survey', 'who', 'pluck_field', or 'shape'
- **extractor_key** is the unique key that you want to give to the extractor.
The key will be used to track this specific reducer within Caesar.
Examples::
workflow.add_caesar_extractor('question', 'complete', 'T0', {'if_missing': ignore })
"""
caesar = Caesar()
caesar.validate_extractor_type(extractor_type)
if extractor_other_attributes is None:
extractor_other_attributes = {}
payload = {
'extractor': {
'type': extractor_type,
'key': extractor_key,
'task_key': task_key,
**extractor_other_attributes
}
}
return caesar.http_post(f'{self._api_slug}/{self.id}/extractors', json=payload)[0]
def add_caesar_reducer(self, reducer_type, key, other_reducer_attributes=None):
"""
Adds a Caesar reducer for given workflow. Will return reducer as dict with 'id' if successful.
- **reducer_type** can be one of the following:
'consensus', 'count', 'placeholder', 'external', 'first_extract',
'stats', 'unique_count', 'rectangle', 'sqs'
- **key** is a unique name for your reducer. This key will be used to track this specific reducer within Caesar.
Examples::
workflow.add_caesar_reducer('count', 'count', {'filters' : {'extractor_keys': ['complete']}})
"""
caesar = Caesar()
caesar.validate_reducer_type(reducer_type)
if other_reducer_attributes is None:
other_reducer_attributes = {}
payload = {
'reducer': {
'type': reducer_type,
'key': key,
**other_reducer_attributes
}
}
return caesar.http_post(f'{self._api_slug}/{self.id}/reducers', json=payload)[0]
def add_caesar_rule(self, condition_string, rule_type):
"""
Adds a Caesar rule for given workflow. Will return rule as a dict with 'id' if successful.
- **condition_string** is a string that represents a single operation (sometimes nested).
The general syntax is like if you'd write Lisp in json.
It is a stringified array with the first item being a string identifying the operator.
See https://zooniverse.github.io/caesar/#rules for examples of condition strings
- **rule_type** can either be 'subject' or 'user'
Examples::
workflow.add_caesar_rule('["gte", ["lookup", "complete.0", 0], ["const", 3]]', 'subject')
"""
caesar = Caesar()
caesar.validate_rule_type(rule_type)
payload = {f'{rule_type}_rule': {
'condition_string': condition_string
}}
return caesar.http_post(f'{self._api_slug}/{self.id}/{rule_type}_rules', json=payload)[0]
def add_caesar_rule_effect(self, rule_type, rule_id, action, effect_config=None):
"""
Adds a Caesar effect for workflow and given the workflow rule with id rule_id.
Method will return effect as a dict with 'id' if successful.
- **rule_type** can either be 'subject' or 'user'
- **rule_id** is the id of the subject rule or user rule that the effect should run
- **action** can be one of the following:
- **(actions for subject rules)** - 'retire_subject', 'add_subject_to_set', 'add_to_collection', 'external'
- **(actions for user rules)** - 'promote_user'
Examples::
workflow.add_caesar_rule_effect('subject', subject_rule['id'], 'retire_subject',
{'reason': 'classification_count'})
"""
caesar = Caesar()
caesar.validate_rule_type(rule_type)
caesar.validate_action(rule_type, action)
if effect_config is None:
effect_config = {}
payload = {
f'{rule_type}_rule_effect': {
'action': action,
'config': effect_config
}
}
return caesar.http_post(
f'{self._api_slug}/{self.id}/{rule_type}_rules/{rule_id}/{rule_type}_rule_effects',
json=payload
)[0]
def import_caesar_data_extracts(self, csv_source):
"""
Imports machine-learnt data as extracts into Caesar.
- **csv_source** must be a publicly accessible csv at the time of import.
Eg. csv can be hosted via an AWS S3 Bucket, Azure Blob Storage, or Panoptes media item.
See `this csv <https://panoptes-uploads-staging.zooniverse.org/project_attached_image/f1ab241f-2896-4efc-a1bc-3baaff64d783.csv>`_ as an example.
`csv_source`'s csv must have header/titles/rows of the following:
- `extractor_key` (key corresponding to the extractor in Caesar)
- `subject_id`
- `data` (the machine learnt data for the corresponding subject)
Example::
workflow.import_caesar_data_extracts('https://panoptes-uploads-staging.zooniverse.org/project_attached_image/f1ab241f-2896-4efc-a1bc-3baaff64d783.csv')
"""
return Caesar().http_post(f'{self._api_slug}/{self.id}/extracts/import', json={'file': csv_source})
def add_alice_extractors(self, alice_task_key='T0', question_task_key='T1',
question_extractor_if_missing='ignore',
other_question_extractor_attrib=None,
other_alice_extractor_attrib=None):
"""
Adds ALICE Extractors (two extractors: Question and External).
- QuestionExtractor getting created will have a key of `complete`
- **question_task_key** - Task ID that reflects placement of:
“Have all the volunteer-made underline marks turned grey?” step. Defaults to T1
- ExternalExtractor getting created will have a key of `alice`
- **alice_task_key** - Task ID that reflects placement of Transcription Task step (Defaults to T0)
Examples::
workflow.add_alice_extractors()
"""
if other_question_extractor_attrib is None:
other_question_extractor_attrib = {}
if other_alice_extractor_attrib is None:
other_alice_extractor_attrib = {}
question_extractor_attributes = {
'if_missing': question_extractor_if_missing,
**other_question_extractor_attrib
}
alice_extractor_attributes = {
'url': f'https://aggregation-caesar.zooniverse.org/extractors/line_text_extractor?task={alice_task_key}',
**other_alice_extractor_attrib
}
self.add_caesar_extractor('question', 'complete', question_task_key, question_extractor_attributes)
self.add_caesar_extractor('external', 'alice', alice_task_key, alice_extractor_attributes)
def add_alice_reducers(self, alice_min_views=5, low_consensus_threshold=3):
"""
Adds ALICE Reducers for given workflow (three reducers: External, Stats, Count).
- **alice_min_views** - This is the threshold number of classifications in order to "gray-out" a transcribed line.
Default is 5.
- **low_consensus_threshold** - This is the threshold number of classifications in agreement for good consensus.
Default is 3
"""
external_reducer_url = 'https://aggregation-caesar.zooniverse.org/reducers/optics_line_text_reducer'
if alice_min_views or low_consensus_threshold:
external_reducer_url += f'?minimum_views={alice_min_views}&'
external_reducer_url += f'low_consensus_threshold={low_consensus_threshold}'
default_filter_attribs = {
'repeated_classifications': 'keep_first'
}
external_reducer_attributes = {
'url': external_reducer_url,
'filters': {
'extractor_keys': ['alice'],
**default_filter_attribs
}
}
self.add_caesar_reducer('external', 'alice', external_reducer_attributes)
complete_reducer_attribs = {
'filters': {
'extractor_keys': ['complete'],
**default_filter_attribs
}
}
self.add_caesar_reducer('stats', 'complete', complete_reducer_attribs)
self.add_caesar_reducer('count', 'count', complete_reducer_attribs)
def add_alice_rules_and_effects(self, question_retirement_limit=3, count_retirement_limit=30):
"""
Adds subject rules and corresponding effects for ALICE configuration of the given workflow.
Two subject rules are created that will trigger retirement: a Question rule and a Count rule.
A total of 4 subject rule effects should get created.
There should be 2 effects related to the Question Rule condition
(one to send to ALICE and the other to retire subject).
There should also be 2 effects related to the Count Rule condition
(one to send to alice and the other to retire subject)
- **question_retirement_limit** - Question subject rule created will trigger retirement when the answer to:
"is this complete" question reaches this threshhold limit (defaults to 3)
- **count_retirement_limit** - Count Subject Rule created will trigger retirement when the classification count reaches this limit (defaults to 30)
"""
question_subject_rule = self.add_caesar_rule(
f'["gte", ["lookup", "complete.0", 0], ["const", {question_retirement_limit}]]',
'subject'
)
send_to_alice_effect_config = {
'url': 'https://tove.zooniverse.org/import',
'reducer_key': 'alice'
}
self.add_caesar_rule_effect('subject', question_subject_rule['id'], 'external', send_to_alice_effect_config)
self.add_caesar_rule_effect('subject', question_subject_rule['id'], 'retire_subject', {'reason': 'consensus'})
count_subject_rule = self.add_caesar_rule(
f'["gte", ["lookup", "count.classifications", 0], ["const", {count_retirement_limit}]]',
'subject'
)
self.add_caesar_rule_effect('subject', count_subject_rule['id'], 'external', send_to_alice_effect_config)
self.add_caesar_rule_effect('subject', count_subject_rule['id'], 'retire_subject', {'reason': 'classification_count'})
def configure_for_alice(self):
"""
Configures workflow for ALICE/TOVE.
- This method will add workflow to Caesar
- This method will create Caesar Extractors needed for ALICE with defaults.
- This method will also create Caesar Reducers needed for ALICE with defaults.
(In particular, `minimum_views` = 5, and `low_consensus_threshold` = 3)
- And this method will also create Caesar Subject Rules and Effects needed for ALICE with defaults.
(In particular, Question-based retirement's retirement limit is 3 and Count-based retirement default is 30.)
"""
self.save_to_caesar(public_extracts=True, public_reductions=True)
self.add_alice_extractors()
self.add_alice_reducers()
self.add_alice_rules_and_effects()
@property
def versions(self):
"""
A generator which yields all :py:class:`.WorkflowVersion` instances for
this workflow.
"""
return WorkflowVersion.where(workflow=self)
LinkResolver.register(Workflow)
LinkResolver.register(Workflow, 'active_workflows', readonly=True)
# Keep import WorkflowVersion import on bottom to avoid circular import
from panoptes_client.workflow_version import WorkflowVersion
| 21,737 | 38.667883 | 163 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/panoptes.py
|
from __future__ import absolute_import, division, print_function
from builtins import str
import getpass
import logging
import os
import requests
import threading
import pkg_resources
from datetime import datetime, timedelta
from redo import retrier
import six
from panoptes_client.utils import isiterable, batchable
HTTP_RETRY_LIMIT = 5
RETRY_BACKOFF_INTERVAL = 5
if os.environ.get('PANOPTES_DEBUG'):
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig()
class Panoptes(object):
"""
The low-level Panoptes HTTP client class. Use this class to log into the
API. In most cases you can just call :py:meth:`.Panoptes.connect` once and
all subsequent API requests will be authenticated.
If you want to configure multiple clients, e.g. to perform operations as
multiple users, you should initialise the client as a context manager,
using the `with` statement instead of using :py:meth:`.Panoptes.connect`.
In this example, we modify a project by authenticating as the project
owner, then log in as a regular user to add a subject to a collection,
then switch back to the project owner's account to retire some subjects::
owner_client = Panoptes(username='example-project-owner', password='')
with owner_client:
project = Project(1234)
project.display_name = 'New name'
project.save()
with Panoptes(username='example-user', password=''):
Collection(1234).add(Subject(1234))
with owner_client:
Workflow(1234).retire_subjects([1234, 5678, 9012])
Using the `with` statement in this way ensures it is clear which user will
be used for each action.
"""
_http_headers = {
'default': {
'Accept': 'application/vnd.api+json; version=1',
'User-Agent': 'panoptes-python-client/version=' + pkg_resources.require('panoptes_client')[0].version
},
'GET': {},
'PUT': {
'Content-Type': 'application/json',
},
'POST': {
'Content-Type': 'application/json',
},
'DELETE': {
'Content-Type': 'application/json',
},
}
_endpoint_client_ids = {
'default': (
'ce310d45f951de68c4cc8ef46ca38cc0a008f607a2026680295757bfef99f43c'
),
'https://panoptes-staging.zooniverse.org': (
'e094b63362fdef0548e0bbcc6e6cb5996c422d3a770074ef972432d57d41049c'
),
}
_local = threading.local()
@classmethod
def connect(cls, *args, **kwargs):
"""
connect(username=None, password=None, endpoint=None, admin=False)
Configures the Panoptes client for use.
Note that there is no need to call this unless you need to pass one or
more of the below arguments. By default, the client will connect to
the public Zooniverse.org API as an anonymous user.
All arguments are optional:
- **username** is your Zooniverse.org username.
- **password** is your Zooniverse.org password.
- **endpoint** is the HTTP API endpoint you'd like to connect to.
Defaults to **https://www.zooniverse.org**. Should not include a
trailing slash.
- **admin** is a boolean, switching on admin mode if ``True``. Has no
effect if the given username is not a Zooniverse.org administrator.
Examples::
Panoptes.connect(username='example', password='example')
Panoptes.connect(endpoint='https://panoptes.example.com')
"""
cls._local.panoptes_client = cls(*args, **kwargs)
cls._local.panoptes_client.login()
return cls._local.panoptes_client
@classmethod
def client(cls, *args, **kwargs):
local_client = getattr(cls._local, "panoptes_client", None)
if not local_client:
return cls(*args, **kwargs)
return local_client
def __init__(
self,
endpoint=None,
client_id=None,
client_secret=None,
redirect_url=None,
username=None,
password=None,
login=None,
admin=False
):
self.session = requests.session()
self.endpoint = endpoint or os.environ.get(
'PANOPTES_ENDPOINT',
'https://www.zooniverse.org'
)
self.logged_in = False
self.username = None
self.password = None
self._auth(login, username, password)
self.login()
self.redirect_url = \
redirect_url or os.environ.get('PANOPTES_REDIRECT_URL')
self.client_secret = \
client_secret or os.environ.get('PANOPTES_CLIENT_SECRET')
if client_id:
self.client_id = client_id
elif os.environ.get('PANOPTES_CLIENT_ID'):
self.client_id = os.environ.get('PANOPTES_CLIENT_ID')
else:
self.client_id = self._endpoint_client_ids.get(
self.endpoint,
self._endpoint_client_ids['default']
)
self.logged_in = False
self.bearer_token = None
self.admin = admin
self.logger = logging.getLogger('panoptes_client')
def __enter__(self):
self._local.previous_client = getattr(
self._local,
'panoptes_client',
None,
)
self._local.panoptes_client = self
return self
def __exit__(self, *exc):
self._local.panoptes_client = self._local.previous_client
def http_request(
self,
method,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
_headers = self._http_headers['default'].copy()
_headers.update(self._http_headers[method])
_headers.update(headers)
headers = _headers
token = self.get_bearer_token()
if self.logged_in:
headers.update({
'Authorization': 'Bearer %s' % token,
})
if etag:
headers.update({
'If-Match': etag,
})
if endpoint:
url = endpoint + '/' + path
else:
url = self.endpoint + '/api' + path
# Setting the parameter at all (even False) turns on admin mode
if self.admin:
params.update({'admin': self.admin})
if params:
self.logger.debug(
"params={}".format(params)
)
if json:
self.logger.debug(
"json={}".format(json)
)
if retry:
retry_attempts = HTTP_RETRY_LIMIT
else:
retry_attempts = 1
for _ in retrier(
attempts=retry_attempts,
sleeptime=RETRY_BACKOFF_INTERVAL,
):
response = self.session.request(
method,
url,
params=params,
headers=headers,
json=json,
)
if response.status_code < 500:
break
else:
raise PanoptesAPIException(
'Received HTTP status code {} from API'.format(
response.status_code
)
)
return response
def json_request(
self,
method,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
response = self.http_request(
method=method,
path=path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=endpoint,
retry=retry,
)
if (
response.status_code == 204 or
int(response.headers.get('Content-Length', -1)) == 0 or
len(response.text) == 0
):
json_response = None
else:
json_response = response.json()
if 'errors' in json_response:
raise PanoptesAPIException(', '.join(
map(lambda e: e.get('message', ''),
json_response['errors']
)
))
elif 'error' in json_response:
raise PanoptesAPIException(json_response['error'])
return (json_response, response.headers.get('ETag'))
def get_request(
self,
path,
params={},
headers={},
endpoint=None,
retry=False,
):
return self.http_request(
'GET',
path,
params=params,
headers=headers,
endpoint=endpoint,
retry=retry,
)
def get(
self,
path,
params={},
headers={},
endpoint=None,
retry=False,
):
return self.json_request(
'GET',
path,
params=params,
headers=headers,
endpoint=endpoint,
retry=retry,
)
def put_request(
self,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
return self.http_request(
'PUT',
path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=None,
retry=retry,
)
def put(
self,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
return self.json_request(
'PUT',
path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=endpoint,
retry=retry,
)
def post_request(
self,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
return self.http_request(
'post',
path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=endpoint,
retry=retry,
)
def post(
self,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
return self.json_request(
'POST',
path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=endpoint,
retry=retry,
)
def delete_request(
self,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
return self.http_request(
'delete',
path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=None,
retry=retry,
)
def delete(
self,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
return self.json_request(
'DELETE',
path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=endpoint,
retry=retry,
)
def _auth(self, auth_type, username, password):
if username is None or password is None:
if auth_type == 'interactive':
username, password = self.interactive_login()
elif auth_type == 'keyring':
# Get credentials from python keyring
pass
else:
username = os.environ.get('PANOPTES_USERNAME')
password = os.environ.get('PANOPTES_PASSWORD')
self.username = username
self.password = password
def login(self, username=None, password=None):
if self.logged_in:
return
if not username:
username = self.username
else:
self.username = username
if not password:
password = self.password
else:
self.password = password
if not username or not password:
return
login_data = {
'authenticity_token': self.get_csrf_token(),
'user': {
'login': username,
'password': password,
'remember_me': True,
},
}
response = self.session.post(
self.endpoint + '/users/sign_in',
json=login_data,
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
)
if response.status_code != 200:
raise PanoptesAPIException(
response.json().get('error', 'Login failed')
)
self.logged_in = True
return response
def interactive_login(self):
print('Enter your Zooniverse credentials...')
username = input('Username: ')
password = getpass.getpass()
return username, password
def get_csrf_token(self):
url = self.endpoint + '/users/sign_in'
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
return self.session.get(url, headers=headers).headers['x-csrf-token']
def get_bearer_token(self):
if not self.valid_bearer_token():
grant_type = 'password'
if self.client_secret:
grant_type = 'client_credentials'
if not self.logged_in:
if grant_type == 'password':
if not self.login():
return
if (self.bearer_token and self.refresh_token):
bearer_data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token,
'client_id': self.client_id,
}
else:
bearer_data = {
'grant_type': grant_type,
'client_id': self.client_id,
}
if grant_type == 'client_credentials':
bearer_data['client_secret'] = self.client_secret
bearer_data['url'] = self.redirect_url
token_response = self.session.post(
self.endpoint + '/oauth/token',
bearer_data
).json()
if 'errors' in token_response:
raise PanoptesAPIException(token_response['errors'])
self.bearer_token = token_response['access_token']
if (self.bearer_token and grant_type == 'client_credentials'):
self.logged_in = True
if 'refresh_token' in token_response:
self.refresh_token = token_response['refresh_token']
else:
self.refresh_token = None
self.bearer_expires = (
datetime.now()
+ timedelta(seconds=token_response['expires_in'])
)
return self.bearer_token
def valid_bearer_token(self):
# Return invalid if there is no token
if not self.has_bearer_token():
return False
now = datetime.now()
expires = self.bearer_expires
# Buffer to allow time for requests
# to fire without expiring in transit
buffer_ = timedelta(minutes=2)
# Add time to now --> pretend time is later
# Effect of making token expire earlier
return now + buffer_ <= expires
def has_bearer_token(self):
return self.bearer_token is not None
class PanoptesObject(object):
"""
The base class of all Panoptes model classes. You should never need to
create instances of this class, but the methods defined here are common to
all the model subclasses.
`PanoptesObject`s support lazy loading of attributes, where data is loaded
from the API only when it is first accessed. You can do this by passing an
object ID to the contructor::
project = Project(1234)
print(project.display_name)
This will not make any HTTP requests until the `print` statement.
"""
RESERVED_ATTRIBUTES = (
'_loaded',
'etag',
'links',
'modified_attributes',
'raw',
)
@classmethod
def url(cls, *args):
return '/'.join(['', cls._api_slug] + [str(a) for a in args if a])
@classmethod
def http_get(cls, path, params={}, headers={}, retry=True, **kwargs):
return Panoptes.client().get(
cls.url(path),
params,
headers,
retry=retry,
**kwargs
)
@classmethod
def http_post(cls, path, params={}, headers={}, json=None, **kwargs):
return Panoptes.client().post(
cls.url(path),
params,
headers,
json,
**kwargs
)
@classmethod
def http_put(cls, path, params={}, headers={}, json=None, **kwargs):
return Panoptes.client().put(
cls.url(path),
params,
headers,
json,
**kwargs
)
@classmethod
def http_delete(cls, path, params={}, headers={}, json=None, **kwargs):
return Panoptes.client().delete(
cls.url(path),
params,
headers,
json,
**kwargs
)
@classmethod
def where(cls, **kwargs):
"""
Returns a generator which yields instances matching the given query
arguments.
For example, this would yield all :py:class:`.Project`::
Project.where()
And this would yield all launch approved :py:class:`.Project`::
Project.where(launch_approved=True)
"""
_id = kwargs.pop('id', '')
return cls.paginated_results(*cls.http_get(_id, params=kwargs))
@classmethod
def find(cls, _id):
"""
Returns the individual instance with the given ID, if it exists. Raises
:py:class:`PanoptesAPIException` if the object with that ID is not
found.
"""
if not _id:
return None
try:
return next(cls.where(id=_id))
except StopIteration:
raise PanoptesAPIException(
"Could not find {} with id='{}'".format(cls.__name__, _id)
)
@classmethod
def paginated_results(cls, response, etag):
return ResultPaginator(cls, response, etag)
def __init__(self, raw={}, etag=None):
self._loaded = False
self.links = LinkResolver(self)
if type(raw) == dict:
self.set_raw(raw, etag)
else:
self.set_raw({}, loaded=False)
self.raw['id'] = raw
def __getattr__(self, name):
try:
if (
name not in PanoptesObject.RESERVED_ATTRIBUTES
and name != 'id'
and not self._loaded
):
self.reload()
return getattr(self, name)
return self.raw[name]
except KeyError:
if name == 'id':
return None
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__.__name__,
name
))
def __setattr__(self, name, value):
if name in PanoptesObject.RESERVED_ATTRIBUTES:
return super(PanoptesObject, self).__setattr__(name, value)
if not self._loaded:
self.reload()
if name not in self.raw:
return super(PanoptesObject, self).__setattr__(name, value)
if name not in self._edit_attributes:
raise ReadOnlyAttributeException(
'{} is read-only'.format(name)
)
self.raw[name] = value
self.modified_attributes.add(name)
def __repr__(self):
return '<{} {}>'.format(
self.__class__.__name__,
self.id
)
def set_raw(self, raw, etag=None, loaded=True):
self.raw = {}
self.raw.update(self._savable_dict(include_none=True))
self.raw.update(raw)
self.etag = etag
self.modified_attributes = set()
self._loaded = loaded
def _savable_dict(
self,
attributes=None,
modified_attributes=None,
include_none=False,
):
if not attributes:
attributes = self._edit_attributes
out = []
for key in attributes:
if type(key) == dict:
for subkey, subattributes in key.items():
if (
subkey == 'links' and
hasattr(self, 'links') and
modified_attributes and
'links' in modified_attributes
):
out.append(
(subkey, self.links._savable_dict(subattributes))
)
else:
links_out = (subkey, self._savable_dict(
attributes=subattributes,
include_none=include_none
))
if links_out[1]:
out.append(links_out)
elif modified_attributes and key not in modified_attributes:
continue
else:
value = self.raw.get(key)
if value is not None or include_none:
out.append((key, value))
return dict(out)
def save(self):
"""
Saves the object. If the object has not been saved before (i.e. it's
new), then a new object is created. Otherwise, any changes are
submitted to the API.
"""
if not self.id:
save_method = Panoptes.client().post
force_reload = False
else:
if not self.modified_attributes:
return
if not self._loaded:
self.reload()
save_method = Panoptes.client().put
force_reload = True
response, response_etag = save_method(
self.url(self.id),
json={self._api_slug: self._savable_dict(
modified_attributes=self.modified_attributes
)},
etag=self.etag
)
raw_resource_response = response[self._api_slug][0]
self.set_raw(raw_resource_response, response_etag)
if force_reload:
self._loaded = False
return response
def reload(self):
"""
Re-fetches the object from the API, discarding any local changes.
Returns without doing anything if the object is new.
"""
if not self.id:
return
reloaded_object = self.__class__.find(self.id)
self.set_raw(
reloaded_object.raw,
reloaded_object.etag
)
def delete(self):
"""
Deletes the object. Returns without doing anything if the object is
new.
"""
if not self.id:
return
if not self._loaded:
self.reload()
return self.http_delete(self.id, etag=self.etag)
class ResultPaginator(object):
def __init__(self, object_class, response, etag):
if response is None:
response = {}
self.object_class = object_class
self.set_page(response)
self.etag = etag
def __iter__(self):
return self
def __next__(self):
if self.object_index >= self.object_count:
if self.object_count and self.next_href:
response, _ = Panoptes.client().get(self.next_href)
self.set_page(response)
return next(self)
else:
raise StopIteration
i = self.object_index
self.object_index += 1
return self.object_class(self.object_list[i], etag=self.etag)
next = __next__
def set_page(self, response):
self.meta = response.get('meta', {})
self.meta = self.meta.get(self.object_class._api_slug, {})
self.page = self.meta.get('page', 1)
self.page_count = self.meta.get('page_count', 1)
self.next_href = self.meta.get('next_href')
self.object_list = response.get(self.object_class._api_slug, [])
self.object_count = len(self.object_list)
self.object_index = 0
class LinkResolver(object):
types = {}
readonly = set()
@classmethod
def register(cls, object_class, link_slug=None, readonly=False):
if not link_slug:
link_slug = object_class._link_slug
cls.types[link_slug] = object_class
if readonly:
cls.readonly.add(link_slug)
@classmethod
def isreadonly(cls, link_slug):
return link_slug in cls.readonly
def __init__(self, parent):
self.parent = parent
def __getattr__(self, name):
if not self.parent._loaded:
self.parent.reload()
linked_object = self.parent.raw['links'][name]
object_class = LinkResolver.types.get(name)
if (
not object_class and
type(linked_object == dict) and
'type' in linked_object
):
object_class = LinkResolver.types.get(linked_object['type'])
if isinstance(linked_object, LinkCollection):
return linked_object
if isinstance(linked_object, list):
lc = getattr(self.parent, '_link_collection', LinkCollection)(
object_class,
name,
self.parent,
linked_object
)
self.parent.raw['links'][name] = lc
return lc
if isinstance(linked_object, dict) and 'id' in linked_object:
return object_class(linked_object['id'])
else:
return object_class(linked_object)
def __setattr__(self, name, value):
reserved_names = ('raw', 'parent')
if name not in reserved_names and name not in dir(self):
if not self.parent._loaded:
self.parent.reload()
if isinstance(value, PanoptesObject):
value = value.id
self.parent.raw['links'][name] = value
self.parent.modified_attributes.add('links')
else:
super(LinkResolver, self).__setattr__(name, value)
def _savable_dict(self, edit_attributes):
out = []
for key, value in self.parent.raw['links'].items():
if not key in edit_attributes:
continue
if isiterable(value):
out.append((key, [getattr(o, 'id', o) for o in value]))
else:
if value:
out.append((key, value))
return dict(out)
class LinkCollection(object):
"""
A collection of :py:class:`.PanoptesObject` of one class which are linked
to a parent :py:class:`.PanoptesObject`.
Allows indexing, iteration, and membership testing::
project = Project(1234)
print(project.links.workflows[2].display_name)
for workflow in project.links.workflows:
print(workflow.id)
if Workflow(5678) in project.links.workflows:
print('Workflow found')
# Integers, strings, and PanoptesObjects are all OK
if 9012 not in project.links.workflows:
print('Workflow not found')
"""
def __init__(self, cls, slug, parent, linked_objects):
self._linked_object_ids = list(linked_objects)
self._cls = cls
self._slug = slug
self._parent = parent
self.readonly = LinkResolver.isreadonly(slug)
def __contains__(self, obj):
if isinstance(obj, self._cls):
obj_id = str(obj.id)
else:
obj_id = str(obj)
return obj_id in self._linked_object_ids
def __getitem__(self, i):
return self._cls(self._linked_object_ids[i])
def __iter__(self):
for obj_id in self._linked_object_ids:
yield self._cls(obj_id)
def __repr__(self):
return "[{}]".format(", ".join([
"<{} {}>".format(self._cls.__name__, obj)
for obj in self._linked_object_ids
]))
@batchable
def add(self, objs):
"""
Adds the given `objs` to this `LinkCollection`.
- **objs** can be a list of :py:class:`.PanoptesObject` instances, a
list of object IDs, a single :py:class:`.PanoptesObject` instance, or
a single object ID.
Examples::
organization.links.projects.add(1234)
organization.links.projects.add(Project(1234))
workflow.links.subject_sets.add([1,2,3,4])
workflow.links.subject_sets.add([Project(12), Project(34)])
"""
if self.readonly:
raise NotImplementedError(
'{} links can\'t be modified'.format(self._slug)
)
if not self._parent.id:
raise ObjectNotSavedException(
"Links can not be modified before the object has been saved."
)
_objs = [obj for obj in self._build_obj_list(objs) if obj not in self]
if not _objs:
return
self._parent.http_post(
'{}/links/{}'.format(self._parent.id, self._slug),
json={self._slug: _objs},
retry=True,
)
self._linked_object_ids.extend(_objs)
@batchable
def remove(self, objs):
"""
Removes the given `objs` from this `LinkCollection`.
- **objs** can be a list of :py:class:`.PanoptesObject` instances, a
list of object IDs, a single :py:class:`.PanoptesObject` instance, or
a single object ID.
Examples::
organization.links.projects.remove(1234)
organization.links.projects.remove(Project(1234))
workflow.links.subject_sets.remove([1,2,3,4])
workflow.links.subject_sets.remove([Project(12), Project(34)])
"""
if self.readonly:
raise NotImplementedError(
'{} links can\'t be modified'.format(self._slug)
)
if not self._parent.id:
raise ObjectNotSavedException(
"Links can not be modified before the object has been saved."
)
_objs = [obj for obj in self._build_obj_list(objs) if obj in self]
if not _objs:
return
_obj_ids = ",".join(_objs)
self._parent.http_delete(
'{}/links/{}/{}'.format(self._parent.id, self._slug, _obj_ids),
retry=True,
)
self._linked_object_ids = [
obj for obj in self._linked_object_ids if obj not in _objs
]
def _build_obj_list(self, objs):
_objs = []
for obj in objs:
if not (
isinstance(obj, self._cls)
or isinstance(obj, (int, six.string_types,))
):
raise TypeError
if isinstance(obj, self._cls):
_obj_id = str(obj.id)
else:
_obj_id = str(obj)
_objs.append(_obj_id)
return _objs
class PanoptesAPIException(Exception):
"""
Raised whenever the API returns an error. The exception will contain the
raw error message from the API.
"""
pass
class ReadOnlyAttributeException(Exception):
"""
Raised if an attempt is made to modify an attribute of a
:py:class:`PanoptesObject` which the API does not allow to be modified.
"""
pass
class ObjectNotSavedException(Exception):
"""
Raised if an attempt is made to perform an operation on an unsaved
:py:class:`PanoptesObject` which requires the object to be saved first.
"""
pass
class Talk(object):
def __init__(self, endpoint='https://talk.zooniverse.org/'):
self.endpoint = endpoint
def http_get(self, *args, **kwargs):
kwargs['endpoint'] = self.endpoint
return Panoptes.client().get(*args, **kwargs)
def http_post(self, *args, **kwargs):
kwargs['endpoint'] = self.endpoint
return Panoptes.client().post(*args, **kwargs)
def http_put(self, *args, **kwargs):
kwargs['endpoint'] = self.endpoint
return Panoptes.client().put(*args, **kwargs)
def http_delete(self, *args, **kwargs):
kwargs['endpoint'] = self.endpoint
return Panoptes.client().delete(*args, **kwargs)
def get_data_request(self, section, kind):
return self.http_get(
'data_requests',
params={
'section': section,
'kind': kind,
}
)
def post_data_request(self, section, kind):
return self.http_post(
'data_requests',
json={
'data_requests': {
'section': section,
'kind': kind,
}
}
)
| 33,577 | 27.552721 | 113 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/organization.py
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import (
LinkResolver,
PanoptesObject,
)
from panoptes_client.project import Project
from panoptes_client.utils import batchable
class Organization(PanoptesObject):
_api_slug = 'organizations'
_link_slug = 'organization'
_edit_attributes = (
'display_name',
'description',
'tags',
'introduction',
'primary_language',
)
def add(self, projects):
"""
A wrapper around :py:meth:`.LinkCollection.add`. Equivalent to::
organization.links.add(projects)
"""
return self.links.projects.add(projects)
def remove(self, projects):
"""
A wrapper around :py:meth:`.LinkCollection.remove`. Equivalent to::
organization.links.remove(projects)
"""
return self.links.projects.remove(projects)
def __contains__(self, project):
"""
A wrapper around :py:meth:`.LinkCollection.__contains__`. Equivalent
to::
project in organization.links.project
"""
return project in self
LinkResolver.register(Organization)
| 1,212 | 22.326923 | 76 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/subject_set.py
|
from __future__ import absolute_import, division, print_function
from builtins import str
from panoptes_client.subject_workflow_status import SubjectWorkflowStatus
from panoptes_client.panoptes import (
LinkCollection,
LinkResolver,
PanoptesAPIException,
PanoptesObject,
)
from panoptes_client.set_member_subject import SetMemberSubject
from panoptes_client.subject import Subject
from panoptes_client.exportable import Exportable
from panoptes_client.utils import batchable
from redo import retry
class SubjectSetLinkCollection(LinkCollection):
def __contains__(self, obj):
if self._cls == Subject:
if isinstance(obj, Subject):
_subject_id = str(obj.id)
else:
_subject_id = str(obj)
linked_subject_count = SetMemberSubject.where(
subject_set_id=self._parent.id,
subject_id=_subject_id
).object_count
return linked_subject_count == 1
return super(SubjectSetLinkCollection, self).__contains__(obj)
def add(self, objs):
from panoptes_client.workflow import Workflow
if self._cls == Workflow:
raise NotImplementedError(
'Workflows and SubjectSets can only be linked via '
'Workflow.links'
)
return super(SubjectSetLinkCollection, self).add(objs)
def remove(self, objs):
from panoptes_client.workflow import Workflow
if self._cls == Workflow:
raise NotImplementedError(
'Workflows and SubjectSets can only be unlinked via '
'Workflow.links'
)
return super(SubjectSetLinkCollection, self).remove(objs)
class SubjectSet(PanoptesObject, Exportable):
_api_slug = 'subject_sets'
_link_slug = 'subject_sets'
_edit_attributes = (
'display_name',
{
'links': (
'project',
),
'metadata': (
'category',
)
},
)
_link_collection = SubjectSetLinkCollection
@property
def subjects(self):
"""
A generator which yields :py:class:`.Subject` objects which are in this
subject set.
Examples::
for subject in subject_set.subjects:
print(subject.id)
"""
for sms in SetMemberSubject.where(subject_set_id=self.id):
yield sms.links.subject
def set_raw(self, raw, etag=None, loaded=True):
raw.setdefault('links', {}).setdefault('subjects', [])
return super(SubjectSet, self).set_raw(raw, etag, loaded)
def add(self, subjects):
"""
A wrapper around :py:meth:`.LinkCollection.add`. Equivalent to::
subject_set.links.add(subjects)
"""
# reload the subject set to make sure the online version not stale
self.reload()
return self.links.subjects.add(subjects)
def remove(self, subjects):
"""
A wrapper around :py:meth:`.LinkCollection.remove`. Equivalent to::
subject_set.links.remove(subjects)
"""
return self.links.subjects.remove(subjects)
def subject_workflow_statuses(self, workflow_id):
"""
A generator which yields :py:class:`.SubjectWorkflowStatus` objects for subjects in this
subject set and for the supplied workflow id.
Examples::
for status in subject_set.subject_workflow_statuses(1234):
print(status.retirement_reason)
"""
subject_ids = ', '.join((subject.id for subject in self.subjects))
for status in SubjectWorkflowStatus.where(subject_ids=subject_ids, workflow_id=workflow_id):
yield status
def __contains__(self, subject):
"""
A wrapper around :py:meth:`.LinkCollection.__contains__`. Equivalent
to::
subject in subject_set.links.subjects
"""
return subject in self.links.subjects
LinkResolver.register(SubjectSet)
LinkResolver.register(SubjectSet, 'subject_set')
| 4,116 | 28.833333 | 100 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/tests/test_subject_set.py
|
from __future__ import absolute_import, division, print_function
import unittest
import sys
if sys.version_info <= (3, 0):
from mock import patch, Mock
else:
from unittest.mock import patch, Mock
from panoptes_client.subject_set import SubjectSet
class TestSubjectSet(unittest.TestCase):
def test_create(self):
with patch('panoptes_client.panoptes.Panoptes') as pc:
pc.client().post = Mock(return_value=(
{
'subject_sets': [{
'id': 0,
'display_name': '',
}],
},
'',
))
subject_set = SubjectSet()
subject_set.links.project = 1234
subject_set.display_name = 'Name'
subject_set.save()
pc.client().post.assert_called_with(
'/subject_sets',
json={
'subject_sets': {
'display_name': 'Name',
'links': {
'project': 1234,
}
}
},
etag=None,
)
| 1,195 | 26.813953 | 64 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/tests/test_bearer_expiry.py
|
from panoptes_client.panoptes import Panoptes
import datetime
import unittest
import sys
if sys.version_info <= (3, 0):
from mock import patch
else:
from unittest.mock import patch
class MockDate(datetime.datetime):
_fake = None
@classmethod
def fake(cls, time):
cls._fake = time
@classmethod
def now(cls, tz=None):
return cls._fake
@patch('panoptes_client.panoptes.datetime', MockDate)
class TestBearer(unittest.TestCase):
def test_early(self):
target = datetime.datetime(2017, 1, 1, 10, 0, 0)
MockDate.fake(target)
client = Panoptes()
client.bearer_token = True
client.bearer_expires = datetime.datetime(2017, 1, 1, 12, 0, 0)
assert client.valid_bearer_token() is True
def test_early_2(self):
target = datetime.datetime(2017, 1, 1, 11, 58, 0)
MockDate.fake(target)
client = Panoptes()
client.bearer_token = True
client.bearer_expires = datetime.datetime(2017, 1, 1, 12, 0, 0)
assert client.valid_bearer_token() is True
def test_late(self):
target = datetime.datetime(2017, 1, 1, 14, 0, 0)
MockDate.fake(target)
client = Panoptes()
client.bearer_token = True
client.bearer_expires = datetime.datetime(2017, 1, 1, 12, 0, 0)
assert client.valid_bearer_token() is False
def test_late_2(self):
target = datetime.datetime(2017, 1, 1, 12, 0, 1)
MockDate.fake(target)
client = Panoptes()
client.bearer_token = True
client.bearer_expires = datetime.datetime(2017, 1, 1, 12, 0, 0)
assert client.valid_bearer_token() is False
def test_in_buffer(self):
target = datetime.datetime(2017, 1, 1, 11, 59, 0)
MockDate.fake(target)
client = Panoptes()
client.bearer_token = True
client.bearer_expires = datetime.datetime(2017, 1, 1, 12, 0, 0)
assert client.valid_bearer_token() is False
def test_has_token(self):
client = Panoptes()
client.bearer_token = True
assert client.has_bearer_token() is True
def test_has_no_token(self):
client = Panoptes()
assert client.has_bearer_token() is False
| 2,261 | 22.810526 | 71 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/tests/test_workflow.py
|
import unittest
import sys
from panoptes_client.panoptes import PanoptesAPIException
from panoptes_client.workflow import Workflow
from panoptes_client.caesar import Caesar
if sys.version_info <= (3, 0):
from mock import patch
else:
from unittest.mock import patch
class TestWorkflow(unittest.TestCase):
def setUp(self):
super().setUp()
caesar_post_patch = patch.object(Caesar, 'http_post')
caesar_put_patch = patch.object(Caesar, 'http_put')
caesar_get_patch = patch.object(Caesar, 'http_get')
self.caesar_post_mock = caesar_post_patch.start()
self.caesar_put_mock = caesar_put_patch.start()
self.caesar_get_mock = caesar_get_patch.start()
self.addCleanup(caesar_post_patch.stop)
self.addCleanup(caesar_get_patch.stop)
self.addCleanup(caesar_put_patch.stop)
def test_save_to_caesar_update(self):
workflow = Workflow(1)
workflow.save_to_caesar()
self.caesar_put_mock.assert_called_once()
self.caesar_put_mock.assert_called_with('workflows/1', json={
'workflow': {
'id': workflow.id,
'public_extracts': False,
'public_reductions': False
}
})
def test_save_to_caesar_create(self):
self.caesar_get_mock.side_effect = PanoptesAPIException("Couldn't find Workflow with 'id'=1")
workflow = Workflow(1)
workflow.save_to_caesar()
self.caesar_post_mock.assert_called_once()
self.caesar_post_mock.assert_called_with('workflows', json={
'workflow': {
'id': workflow.id,
'public_extracts': False,
'public_reductions': False
}
})
def test_save_to_caesar_raises_err(self):
self.caesar_get_mock.side_effect = PanoptesAPIException("Some other error not workflow_id missing error")
with self.assertRaises(PanoptesAPIException):
workflow = Workflow(1)
workflow.save_to_caesar()
self.caesar_post_mock.assert_not_called()
self.caesar_put_mock.assert_not_called()
def test_caesar_subject_extracts(self):
workflow = Workflow(1)
workflow.caesar_subject_extracts(1234)
self.caesar_get_mock.assert_called_with(
f'workflows/{workflow.id}/extractors/all/extracts', params={'subject_id': 1234})
def test_caesar_subject_reductions_get_all_reductions(self):
workflow = Workflow(1)
workflow.caesar_subject_reductions(1234)
self.caesar_get_mock.assert_called_with(
f'workflows/{workflow.id}/subjects/1234/reductions')
def test_caesar_subject_reductions_filter_by_reducer_key(self):
workflow = Workflow(1)
workflow.caesar_subject_reductions(1234, 'test_reducer_key')
self.caesar_get_mock.assert_called_with(
f'workflows/{workflow.id}/subjects/1234/reductions?reducer_key=test_reducer_key')
def test_caesar_extractors(self):
workflow = Workflow(1)
workflow.caesar_extractors()
self.caesar_get_mock.assert_called_with(
f'workflows/{workflow.id}/extractors')
def test_caesar_reducers(self):
workflow = Workflow(1)
workflow.caesar_reducers()
self.caesar_get_mock.assert_called_with(
f'workflows/{workflow.id}/reducers')
def test_caesar_rules_subject_rules(self):
workflow = Workflow(1)
workflow.caesar_rules('subject')
self.caesar_get_mock.assert_called_with(
f'workflows/{workflow.id}/subject_rules')
def test_caesar_rules_user_rules(self):
workflow = Workflow(1)
workflow.caesar_rules('user')
self.caesar_get_mock.assert_called_with(
f'workflows/{workflow.id}/user_rules')
def test_caesar_effects_subject_rule_effects(self):
workflow = Workflow(1)
workflow.caesar_effects('subject', 123)
self.caesar_get_mock.assert_called_with(
f'workflows/{workflow.id}/subject_rules/123/subject_rule_effects')
def test_caesar_effects_user_rule_effects(self):
workflow = Workflow(1)
workflow.caesar_effects('user', 123)
self.caesar_get_mock.assert_called_with(
f'workflows/{workflow.id}/user_rules/123/user_rule_effects')
def test_add_caesar_extractor_valid_extractor(self):
workflow = Workflow(1)
workflow.add_caesar_extractor('external', 'alice')
self.caesar_post_mock.assert_called_with(f'workflows/{workflow.id}/extractors', json={
'extractor': {
'type': 'external',
'key': 'alice',
'task_key': 'T0'
}
})
def test_add_caesar_extractor_invalid_extractor(self):
with self.assertRaises(ValueError) as extractor_error:
workflow = Workflow(1)
workflow.add_caesar_extractor('invalid_extractor_type', 'invalid')
self.caesar_post_mock.assert_not_called()
self.assertEqual('Invalid extractor type',
str(extractor_error.exception))
def test_add_caesar_reducer_valid_reducer(self):
workflow = Workflow(1)
workflow.add_caesar_reducer('count', 'count_key')
self.caesar_post_mock.assert_called_with(f'workflows/{workflow.id}/reducers', json={
'reducer': {
'type': 'count',
'key': 'count_key'
}
})
def test_add_caesar_reducer_invalid_reducer(self):
with self.assertRaises(ValueError) as invalid_reducer_err:
workflow = Workflow(1)
workflow.add_caesar_reducer('invalid_reducer_type', 'key')
self.caesar_post_mock.assert_not_called()
self.assertEqual('Invalid reducer type', str(
invalid_reducer_err.exception))
def test_add_caesar_rule_valid_rule_type(self):
workflow = Workflow(1)
condition_string = '["gte", ["lookup", "complete.0", 0], ["const", 3]]'
workflow.add_caesar_rule(condition_string, 'subject')
self.caesar_post_mock.assert_called_with(f'workflows/{workflow.id}/subject_rules', json={
'subject_rule': {
'condition_string': condition_string
}
})
def test_add_caesar_rule_invalid_rule_type(self):
with self.assertRaises(ValueError) as invalid_rule_type_err:
workflow = Workflow(1)
condition_string = '["gte", ["lookup", "complete.0", 0], ["const", 3]]'
invalid_rule_type = 'invalid_type'
workflow.add_caesar_rule(condition_string, invalid_rule_type)
self.caesar_post_mock.assert_not_called()
expected_message = f'Invalid rule type: {invalid_rule_type}. Rule types can either be by "subject" or "user"'
self.assertEqual(expected_message, str(invalid_rule_type_err.exception))
def test_add_caesar_rule_effect_valid_effect(self):
workflow = Workflow(1)
retire_reason = {
'reason': 'other'
}
workflow.add_caesar_rule_effect('subject', 12, 'retire_subject', retire_reason)
expected_endpoint = f'workflows/{workflow.id}/subject_rules/{12}/subject_rule_effects'
self.caesar_post_mock.assert_called_with(expected_endpoint, json={
'subject_rule_effect': {
'action': 'retire_subject',
'config': retire_reason
}
})
def test_add_caesar_rule_effect_invalid_effect(self):
with self.assertRaises(ValueError) as invalid_effect_err:
workflow = Workflow(1)
workflow.add_caesar_rule_effect('subject', 12, 'promote_user', {'some': 'config'})
self.caesar_post_mock.assert_not_called()
self.assertEqual('Invalid action for rule type', str(invalid_effect_err.exception))
| 7,882 | 36.36019 | 117 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/tests/test_linkcollection.py
|
from __future__ import absolute_import, division, print_function
from builtins import str
import unittest
import sys
if sys.version_info <= (3, 0):
from mock import Mock, patch
else:
from unittest.mock import Mock, patch
from panoptes_client.panoptes import LinkCollection, ObjectNotSavedException
LINKED_OBJECT_IDS = ('1', '2', '3', '4')
class MockPanoptesObject(Mock):
def __init__(self, raw=None, etag=None):
r = super(MockPanoptesObject, self).__init__()
self.id = str(raw)
return r
class TestLinkCollection(unittest.TestCase):
def link_collection(self, ids=LINKED_OBJECT_IDS, parent=None):
if parent:
mock_parent = parent
else:
mock_parent = Mock()
mock_slug = Mock()
lc = LinkCollection(
cls=MockPanoptesObject,
slug=mock_slug,
parent=mock_parent,
linked_objects=[str(_id) for _id in ids],
)
return lc, mock_parent, mock_slug
def test_contains_id_int(self):
self.assertTrue(1 in self.link_collection()[0])
def test_contains_id_str(self):
self.assertTrue('1' in self.link_collection()[0])
def test_contains_obj(self):
mock_obj = MockPanoptesObject(1)
self.assertTrue(mock_obj in self.link_collection()[0])
def test_not_contains_id_int(self):
self.assertFalse(9 in self.link_collection()[0])
def test_not_contains_id_str(self):
self.assertFalse('9' in self.link_collection()[0])
def test_not_contains_obj(self):
mock_obj = MockPanoptesObject(9)
self.assertFalse(mock_obj in self.link_collection()[0])
def test_getitem_exists(self):
lc = self.link_collection()[0]
for i, _id in zip(range(len(LINKED_OBJECT_IDS)), LINKED_OBJECT_IDS):
self.assertEqual(lc[i].id, _id)
def test_getitem_doesnt_exist(self):
with self.assertRaises(IndexError):
self.link_collection()[0][len(LINKED_OBJECT_IDS)]
def test_iter_empty(self):
m = Mock()
for _ in self.link_collection([])[0]:
m()
m.assert_not_called()
def test_iter_full(self):
m = Mock()
for _ in self.link_collection()[0]:
m()
self.assertEqual(m.call_count, len(LINKED_OBJECT_IDS))
def test_add_empty_noop(self):
m = Mock()
lc, parent, slug = self.link_collection([])
lc.add([])
parent.http_post.assert_not_called()
for _ in lc:
m()
m.assert_not_called()
def test_add_id_single(self):
lc, parent, slug = self.link_collection([])
lc.add(1)
parent.http_post.assert_called_with(
'{}/links/{}'.format(parent.id, slug),
json={slug: ['1']},
retry=True,
)
m = Mock()
for obj in lc:
self.assertEqual(obj.id, '1')
m()
self.assertEqual(m.call_count, 1)
def test_add_id_list(self):
lc, parent, slug = self.link_collection([])
lc.add(LINKED_OBJECT_IDS)
parent.http_post.assert_called_with(
'{}/links/{}'.format(parent.id, slug),
json={slug: list(LINKED_OBJECT_IDS)},
retry=True,
)
m = Mock()
for obj, _id in zip(lc, LINKED_OBJECT_IDS):
self.assertEqual(obj.id, _id)
m()
self.assertEqual(m.call_count, len(LINKED_OBJECT_IDS))
def test_add_object_single(self):
lc, parent, slug = self.link_collection([])
lc.add(MockPanoptesObject(1))
parent.http_post.assert_called_with(
'{}/links/{}'.format(parent.id, slug),
json={slug: ['1']},
retry=True,
)
m = Mock()
for obj in lc:
self.assertEqual(obj.id, '1')
m()
self.assertEqual(m.call_count, 1)
def test_add_object_list(self):
lc, parent, slug = self.link_collection([])
lc.add([MockPanoptesObject(_id) for _id in LINKED_OBJECT_IDS])
parent.http_post.assert_called_with(
'{}/links/{}'.format(parent.id, slug),
json={slug: list(LINKED_OBJECT_IDS)},
retry=True,
)
m = Mock()
for obj, _id in zip(lc, LINKED_OBJECT_IDS):
self.assertEqual(obj.id, _id)
m()
self.assertEqual(m.call_count, len(LINKED_OBJECT_IDS))
def test_add_readonly(self):
with patch('panoptes_client.panoptes.LinkResolver') as lr:
lr.isreadonly = lambda s: True
lc = self.link_collection()[0]
with self.assertRaises(NotImplementedError):
lc.add(1)
def test_add_not_saved(self):
parent = MockPanoptesObject()
parent.id = None
lc = self.link_collection(parent=parent)[0]
with self.assertRaises(ObjectNotSavedException):
lc.add(1)
def test_remove_empty_noop(self):
m = Mock()
lc, parent, slug = self.link_collection()
lc.remove([])
parent.http_delete.assert_not_called()
for obj, _id in zip(lc, LINKED_OBJECT_IDS):
self.assertEqual(obj.id, _id)
m()
self.assertEqual(m.call_count, len(LINKED_OBJECT_IDS))
def test_remove_id_single(self):
m = Mock()
lc, parent, slug = self.link_collection()
lc.remove(LINKED_OBJECT_IDS[0])
parent.http_delete.assert_called_with(
'{}/links/{}/1'.format(parent.id, slug),
retry=True,
)
for obj, _id in zip(lc, LINKED_OBJECT_IDS[1:]):
self.assertEqual(obj.id, _id)
m()
self.assertEqual(m.call_count, len(LINKED_OBJECT_IDS) - 1)
def test_remove_id_list(self):
m = Mock()
removed_ids = LINKED_OBJECT_IDS[:-1]
lc, parent, slug = self.link_collection()
lc.remove(removed_ids)
parent.http_delete.assert_called_with(
'{}/links/{}/{}'.format(
parent.id,
slug,
",".join(removed_ids),
),
retry=True,
)
for obj, _id in zip(lc, LINKED_OBJECT_IDS[-1:]):
self.assertEqual(obj.id, _id)
m()
self.assertEqual(m.call_count, 1)
def test_remove_object_single(self):
m = Mock()
lc, parent, slug = self.link_collection()
lc.remove(MockPanoptesObject(LINKED_OBJECT_IDS[0]))
parent.http_delete.assert_called_with(
'{}/links/{}/1'.format(parent.id, slug),
retry=True,
)
for obj, _id in zip(lc, LINKED_OBJECT_IDS[1:]):
self.assertEqual(obj.id, _id)
m()
self.assertEqual(m.call_count, len(LINKED_OBJECT_IDS) - 1)
def test_remove_object_list(self):
m = Mock()
removed_ids = LINKED_OBJECT_IDS[:-1]
lc, parent, slug = self.link_collection()
lc.remove([MockPanoptesObject(_id) for _id in removed_ids])
parent.http_delete.assert_called_with(
'{}/links/{}/{}'.format(
parent.id,
slug,
",".join(removed_ids),
),
retry=True,
)
for obj, _id in zip(lc, LINKED_OBJECT_IDS[-1:]):
self.assertEqual(obj.id, _id)
m()
self.assertEqual(m.call_count, 1)
def test_remove_readonly(self):
with patch('panoptes_client.panoptes.LinkResolver') as lr:
lr.isreadonly = lambda s: True
lc = self.link_collection()[0]
with self.assertRaises(NotImplementedError):
lc.remove(1)
def test_remove_not_saved(self):
parent = MockPanoptesObject()
parent.id = None
lc = self.link_collection(parent=parent)[0]
with self.assertRaises(ObjectNotSavedException):
lc.remove(1)
| 7,899 | 31.113821 | 76 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/tests/test_set_member_subject.py
|
import unittest
from panoptes_client.set_member_subject import SetMemberSubject
class TestSetMemberSubject(unittest.TestCase):
def test_find_id(self):
sms = SetMemberSubject.find(1000)
self.assertEqual(sms.id, '1000')
| 241 | 23.2 | 63 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/tests/test_inaturalist.py
|
from __future__ import absolute_import, division, print_function
import unittest
import sys
if sys.version_info <= (3, 0):
from mock import patch, Mock
else:
from unittest.mock import patch, Mock
from panoptes_client.inaturalist import Inaturalist
class TestInaturalist(unittest.TestCase):
def test_inat_import(self):
with patch('panoptes_client.panoptes.Panoptes.client') as pc:
pc().post = Mock(return_value=200)
Inaturalist.inat_import(16462, 4)
pc().post.assert_called_with(
'/inaturalist/import',
json={
'taxon_id': 16462,
'subject_set_id': 4,
'updated_since': None
}
)
def test_inat_import_updated_since(self):
with patch('panoptes_client.panoptes.Panoptes.client') as pc:
pc().post = Mock(return_value=200)
Inaturalist.inat_import(16462, 4, '2022-10-31')
pc().post.assert_called_with(
'/inaturalist/import',
json={
'taxon_id': 16462,
'subject_set_id': 4,
'updated_since': '2022-10-31'
}
)
| 1,249 | 28.069767 | 69 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/tests/test_linkresolver.py
|
from __future__ import absolute_import, division, print_function
import unittest
import sys
if sys.version_info <= (3, 0):
from mock import Mock
else:
from unittest.mock import Mock
from panoptes_client.panoptes import LinkResolver
class TestLinkResolver(unittest.TestCase):
def test_set_new_link(self):
parent = Mock()
parent.raw = {'links': {}}
target = Mock()
resolver = LinkResolver(parent)
resolver.newlink = target
self.assertEqual(parent.raw['links'].get('newlink', None), target)
| 555 | 22.166667 | 74 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/tests/test_project.py
|
from __future__ import absolute_import, division, print_function
import unittest
from panoptes_client import Project
from panoptes_client.panoptes import PanoptesAPIException
class TestProject(unittest.TestCase):
def test_find_id(self):
p = Project.find(1)
self.assertEqual(p.id, '1')
def test_find_slug(self):
p = Project.find(slug='zooniverse/snapshot-supernova')
self.assertEqual(p.id, '1')
def test_find_unknown_id(self):
p = Project.find(0)
self.assertEqual(p, None)
def test_find_unknown_slug(self):
with self.assertRaises(PanoptesAPIException):
Project.find(slug='invalid_slug')
| 677 | 26.12 | 64 |
py
|
panoptes-python-client
|
panoptes-python-client-master/panoptes_client/tests/test_http_retries.py
|
import unittest
import sys
if sys.version_info <= (3, 0):
from mock import patch, Mock
else:
from unittest.mock import patch, Mock
from panoptes_client.panoptes import (
HTTP_RETRY_LIMIT,
Panoptes,
PanoptesAPIException,
)
class TestRetries(unittest.TestCase):
def setUp(self):
self.http_result = Mock()
self.client = Panoptes()
self.client.valid_bearer_token = Mock()
self.client.valid_bearer_token.return_value = True
self.client.bearer_token = '1234'
self.client.session = Mock()
self.client.session.request = Mock()
self.client.session.request.return_value = self.http_result
def assert_retry(self, *args, **kwargs):
self.assertTrue(kwargs.get('retry', False))
result = Mock()
result.status_code = 204
return result
def assert_no_retry(self, *args, **kwargs):
self.assertFalse(kwargs.get('retry', True))
result = Mock()
result.status_code = 204
return result
@patch('panoptes_client.panoptes.RETRY_BACKOFF_INTERVAL', 1)
def test_request_retry_success(self):
self.http_result.status_code = 200
self.assertEqual(
self.client.http_request('GET', '', retry=True),
self.http_result,
)
self.assertEqual(
self.client.session.request.call_count,
1,
)
@patch('panoptes_client.panoptes.RETRY_BACKOFF_INTERVAL', 1)
def test_request_retry_no_success(self):
self.http_result.status_code = 500
with self.assertRaises(PanoptesAPIException):
self.assertEqual(
self.client.http_request('GET', '', retry=True),
self.http_result,
)
self.assertEqual(
self.client.session.request.call_count,
HTTP_RETRY_LIMIT,
)
@patch('panoptes_client.panoptes.RETRY_BACKOFF_INTERVAL', 1)
def test_request_no_retry_success(self):
self.http_result.status_code = 200
self.assertEqual(
self.client.http_request('GET', '', retry=False),
self.http_result,
)
self.assertEqual(
self.client.session.request.call_count,
1,
)
@patch('panoptes_client.panoptes.RETRY_BACKOFF_INTERVAL', 1)
def test_request_no_retry_no_success(self):
self.http_result.status_code = 500
with self.assertRaises(PanoptesAPIException):
self.assertEqual(
self.client.http_request('GET', '', retry=False),
self.http_result,
)
self.assertEqual(
self.client.session.request.call_count,
1,
)
def test_json_retry(self):
self.client.http_request = self.assert_retry
self.client.json_request('', '', retry=True)
def test_json_no_retry(self):
self.client.http_request = self.assert_no_retry
self.client.json_request('', '', retry=False)
def test_get_retry(self):
self.client.json_request = self.assert_retry
self.client.get('', retry=True)
def test_get_no_retry(self):
self.client.json_request = self.assert_no_retry
self.client.get('', retry=False)
def test_get_request_retry(self):
self.client.http_request = self.assert_retry
self.client.get_request('', retry=True)
def test_get_request_no_retry(self):
self.client.http_request = self.assert_no_retry
self.client.get_request('', retry=False)
def test_put_retry(self):
self.client.json_request = self.assert_retry
self.client.put('', retry=True)
def test_put_no_retry(self):
self.client.json_request = self.assert_no_retry
self.client.put('', retry=False)
def test_put_request_retry(self):
self.client.http_request = self.assert_retry
self.client.put_request('', retry=True)
def test_put_request_no_retry(self):
self.client.http_request = self.assert_no_retry
self.client.put_request('', retry=False)
def test_post_retry(self):
self.client.json_request = self.assert_retry
self.client.post('', retry=True)
def test_post_no_retry(self):
self.client.json_request = self.assert_no_retry
self.client.post('', retry=False)
def test_post_request_retry(self):
self.client.http_request = self.assert_retry
self.client.post_request('', retry=True)
def test_post_request_no_retry(self):
self.client.http_request = self.assert_no_retry
self.client.post_request('', retry=False)
def test_delete_retry(self):
self.client.json_request = self.assert_retry
self.client.delete('', retry=True)
def test_delete_no_retry(self):
self.client.json_request = self.assert_no_retry
self.client.delete('', retry=False)
def test_delete_request_retry(self):
self.client.http_request = self.assert_retry
self.client.delete_request('', retry=True)
def test_delete_request_no_retry(self):
self.client.http_request = self.assert_no_retry
self.client.delete_request('', retry=False)
| 5,208 | 30.762195 | 67 |
py
|
panoptes-python-client
|
panoptes-python-client-master/docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Panoptes Client documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 6 15:06:45 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import datetime
import os
import pkg_resources
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Panoptes Client'
copyright = u'2016-{}, Zooniverse'.format(datetime.datetime.now().year)
author = u'Zooniverse'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = pkg_resources.require("panoptes_client")[0].version
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Panoptes Client v0.4.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PanoptesClientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PanoptesClient.tex', u'Panoptes Client Documentation',
u'Zooniverse', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'panoptesclient', u'Panoptes Client Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PanoptesClient', u'Panoptes Client Documentation',
author, 'PanoptesClient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 10,041 | 28.107246 | 80 |
py
|
SLT-FAI
|
SLT-FAI-main/main.py
|
"""
The system trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) on the SNLI + MultiNLI (AllNLI) dataset
with softmax loss function. At every 1000 training steps, the model is evaluated on the
STS benchmark dataset
Usage:
python training_nli.py --seed 1234
OR
python training_nli.py --seed 1234 --model_name_or_path bert-base-uncased
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses
from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
import logging
from datetime import datetime
import sys
import os
import json
import copy
import gzip
import csv
import random
import torch
import numpy as np
import argparse
import shutil
from tensorboardX import SummaryWriter
from eval import eval_nli_unsup, eval_chinese_unsup
from data_utils import load_datasets, save_samples, load_senteval_binary, load_senteval_sst, load_senteval_trec, load_senteval_mrpc, load_chinese_tsv_data
from correlation_visualization import corr_visualization
logging.basicConfig(format='%(asctime)s - %(filename)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
def parse_args():
"""
Argument settings.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--train_data", type=str,
choices=["sst2", "trec", "mrpc", "mr", "cr", "subj", "mpqa", "nli", "stssick", "stsb"],
default="nli", help="Training data, on NLI or STS dataset")
parser.add_argument("--no_pair", action="store_true", help="If provided, do not pair two training texts")
parser.add_argument("--data_proportion", type=float, default=1.0, help="The proportion of training dataset")
parser.add_argument("--do_upsampling", action="store_true",
help="If provided, do upsampling to original size of training dataset")
parser.add_argument("--no_shuffle", action="store_true", help="If provided, do not shuffle the training data")
parser.add_argument("--seed", type=int, default=1, help="Random seed for reproducing experimental results")
parser.add_argument("--model_name_or_path", type=str, default="bert-base-uncased",
help="The model path or model name of pre-trained model")
parser.add_argument("--continue_training", action="store_true",
help="Whether to continue training or just from BERT")
parser.add_argument("--model_save_path", type=str, default='./output/debug', help="Custom output dir")
parser.add_argument("--tensorboard_log_dir", type=str, default=None, help="Custom tensorboard log dir")
parser.add_argument("--force_del", action="store_true",
help="Delete the existing save_path and do not report an error")
parser.add_argument("--use_apex_amp", action="store_true", help="Use apex amp or not")
parser.add_argument("--apex_amp_opt_level", type=str, default=None, help="The opt_level argument in apex amp")
parser.add_argument("--batch_size", type=int, default=16, help="Training mini-batch size")
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--learning_rate", type=float, default=0.0000005, help="The learning rate")
parser.add_argument("--evaluation_steps", type=int, default=200, help="The steps between every evaluations")
parser.add_argument("--max_seq_length", type=int, default=64, help="The max sequence length")
parser.add_argument("--loss_rate_scheduler", type=int, default=0,
help="The loss rate scheduler, default strategy 0 "
"(i.e. do nothing, see AdvCLSoftmaxLoss for more details)")
parser.add_argument("--no_dropout", action="store_true", help="Add no dropout when training")
parser.add_argument("--adv_loss_cof", type=float, default=1.0, help="frequency discriminator!!!")
parser.add_argument("--mask_loss_cof", type=float, default=1.0, help="mask discriminator!!!")
parser.add_argument("--low_rate", type=float, default=0.5, help="low words rate!!!")
parser.add_argument("--warmup_epoch", type=float, default=0.5, help="warmup discriminator!!!")
parser.add_argument("--high_rank", type=int, default=2, help="high-rank discriminator!!!")
parser.add_argument("--mask_high", action="store_true", help="mask high-frequency words!!!")
parser.add_argument("--mask_rate", type=float, default=0.2, help="mask tokens rate!!!")
parser.add_argument("--device", type=str, default='cuda:2', help="device")
parser.add_argument("--concatenation_sent_max_square", action="store_true",
help="Concat max-square features of two text representations when training classification")
parser.add_argument("--normal_loss_stop_grad", action="store_true", help="Use stop gradient to normal loss or not")
parser.add_argument("--adv_training", action="store_true", help="Use adversarial training or not")
parser.add_argument("--adv_loss_rate", type=float, default=1.0, help="The adversarial loss rate")
parser.add_argument("--noise_norm", type=float, default=0.5, help="The perturbation norm")
parser.add_argument("--adv_loss_stop_grad", action="store_true", help="Use stop gradient to adversarial loss or not")
parser.add_argument("--use_simsiam", action="store_true", help="Use simsiam training or not")
parser.add_argument("--use_simclr", action="store_true", help="Use simclr training or not")
parser.add_argument("--add_cl", action="store_true", help="Use contrastive loss or not")
parser.add_argument("--data_augmentation_strategy", type=str, default="adv", choices=["adv", "none", "meanmax", "shuffle", "cutoff", "shuffle-cutoff", "shuffle+cutoff", "shuffle_embeddings"], help="The data augmentation strategy in contrastive learning")
parser.add_argument("--cutoff_direction", type=str, default=None,
help="The direction of cutoff strategy, row, column or random")
parser.add_argument("--cutoff_rate", type=float, default=None, help="The rate of cutoff strategy, in (0.0, 1.0)")
parser.add_argument("--cl_loss_only", action="store_true",
help="Ignore the main task loss (e.g. the CrossEntropy loss) and use the contrastive loss only")
parser.add_argument("--cl_rate", type=float, default=0.01, help="The contrastive loss rate")
parser.add_argument("--regularization_term_rate", type=float, default=0.0,
help="The loss rate of regularization term for contrastive learning")
parser.add_argument("--cl_type", type=str, default="nt_xent", help="The contrastive loss type, nt_xent or cosine")
parser.add_argument("--temperature", type=float, default=0.1, help="The temperature for contrastive loss")
parser.add_argument("--mapping_to_small_space", type=int, default=None,
help="Whether to mapping sentence representations to a low dimension space (similar to SimCLR)"
" and give the dimension")
parser.add_argument("--add_contrastive_predictor", type=str, default=None,
help="Whether to use a predictor on one side (similar to SimSiam) "
"and give the projection added to which side (normal or adv)")
parser.add_argument("--add_projection", action="store_true",
help="Add projection layer before predictor, only be considered "
"when add_contrastive_predictor is not None")
parser.add_argument("--projection_norm_type", type=str, default=None,
help="The norm type used in the projection layer beforn predictor")
parser.add_argument("--projection_hidden_dim", type=int, default=None,
help="The hidden dimension of the projection or predictor MLP")
parser.add_argument("--projection_use_batch_norm", action="store_true",
help="Whether to use batch normalization in the hidden layer of MLP")
parser.add_argument("--contrastive_loss_stop_grad", type=str, default=None,
help="Use stop gradient to contrastive loss (and which mode to apply) or not")
parser.add_argument("--da_final_1", type=str, default=None,
help="The final 5 data augmentation strategies for view1 "
"(none, shuffle, token_cutoff, feature_cutoff, dropout, span)")
parser.add_argument("--da_final_2", type=str, default=None,
help="The final 5 data augmentation strategies for view2 "
"(none, shuffle, token_cutoff, feature_cutoff, dropout, span)")
parser.add_argument("--cutoff_rate_final_1", type=float, default=None,
help="The final cutoff/dropout rate for view1")
parser.add_argument("--cutoff_rate_final_2", type=float, default=None,
help="The final cutoff/dropout rate for view2")
parser.add_argument("--chinese_dataset", default="none",
choices=["none", "atec_ccks", "bq", "lcqmc", "pawsx", "stsb"],
help="Train and evaluate on Chinese STS tasks")
parser.add_argument("--patience", default=10, type=int, help="The patience for early stop")
return parser.parse_args()
def set_seed(seed: int, for_multi_gpu: bool):
"""
Added script to set random seed.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if for_multi_gpu:
torch.cuda.manual_seed_all(seed)
def main(args):
logging.info(f"Training arguments: {args.__dict__}")
set_seed(args.seed, for_multi_gpu=False)
# Check if dataset exsist. If not, download and extract it
nli_dataset_path = 'datasets/AllNLI.tsv.gz'
sts_dataset_path = 'datasets/stsbenchmark.tsv.gz'
if not os.path.exists(nli_dataset_path):
util.http_get('https://sbert.net/datasets/AllNLI.tsv.gz', nli_dataset_path)
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
# Read the dataset
train_batch_size = args.batch_size
bert_model_type_str = "base" if "base" in args.model_name_or_path else "large"
# time_str = datetime.now().strftime("%Y%m%d%H%M%S")
adv_loss_rate_str = "" if args.adv_loss_rate == 1.0 else f"-rate{args.adv_loss_rate}"
adv_param_str = "" if not args.adv_training else f"adv-{args.noise_norm:.3f}{'-stopgrad' if args.adv_loss_stop_grad else ''}{adv_loss_rate_str}_"
cl_mapping_to_lower_str = "" if args.mapping_to_small_space is None else f"-simclr-{args.projection_hidden_dim}-{args.mapping_to_small_space}-{'bn' if args.projection_use_batch_norm else ''}"
cl_add_predictor_str = "" if args.add_contrastive_predictor is None else f"-simsiam{'p' if args.add_projection else ''}{args.projection_norm_type if args.projection_norm_type is not None else ''}-{args.projection_hidden_dim}-{args.add_contrastive_predictor}-{'bn' if args.projection_use_batch_norm else ''}"
cl_type_str = "" if args.cl_type == "nt_xent" else "-cosine"
cl_param_str = "" if not args.add_cl else f"cl-rate{args.cl_rate}-t{args.temperature}{'-stopgrad'+args.contrastive_loss_stop_grad if args.contrastive_loss_stop_grad else ''}{cl_mapping_to_lower_str}{cl_add_predictor_str}{cl_type_str}_"
model_save_path = args.model_save_path or os.path.join("./output",
f"{args.train_data}_bert-{bert_model_type_str}_{args.batch_size}-{args.num_epochs}_{'maxsqr_' if args.concatenation_sent_max_square else ''}{'stopgrad_' if args.normal_loss_stop_grad else ''}{adv_param_str}{cl_param_str}seed={args.seed}")
if os.path.exists(model_save_path):
if args.force_del:
shutil.rmtree(model_save_path)
os.mkdir(model_save_path)
else:
raise ValueError("Existing output_dir for save model")
else:
os.mkdir(model_save_path)
# Tensorboard writer
tensorboard_writer = SummaryWriter(args.tensorboard_log_dir or os.path.join(model_save_path, "logs"))
with open(os.path.join(model_save_path, "args.json"), "w") as f:
json.dump(args.__dict__, f, indent=4, ensure_ascii=False)
with open(os.path.join(model_save_path, "command.txt"), "w") as f:
CUDA_VISIBLE_DEVICES = os.environ.get("CUDA_VISIBLE_DEVICES")
f.write(f"CUDA_VISIBLE_DEVICES={CUDA_VISIBLE_DEVICES} python3 {' '.join(sys.argv)}")
if args.continue_training:
if args.no_dropout:
sentence_bert_config_path = os.path.join(args.model_name_or_path, "0_Transformer", "sentence_bert_config.json")
sentence_bert_config_dict = json.load(open(sentence_bert_config_path, "r"))
# change config
new_config = copy.deepcopy(sentence_bert_config_dict)
new_config["attention_probs_dropout_prob"] = 0.0
new_config["hidden_dropout_prob"] = 0.0
json.dump(new_config, open(sentence_bert_config_path, "w"), indent=2)
# load model
model = SentenceTransformer(args.model_name_or_path, device=args.device)
# recover config
json.dump(sentence_bert_config_dict, open(sentence_bert_config_path, "w"), indent=2)
else:
model = SentenceTransformer(args.model_name_or_path, device=args.device)
else:
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
if args.no_dropout:
word_embedding_model = models.Transformer(args.model_name_or_path, attention_probs_dropout_prob=0.0,
hidden_dropout_prob=0.0)
else:
word_embedding_model = models.Transformer(args.model_name_or_path)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True, pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
if args.use_simsiam:
projection_model = models.MLP3(hidden_dim=args.projection_hidden_dim, norm=args.projection_norm_type)
model = SentenceTransformer(modules=[word_embedding_model, projection_model, pooling_model],
device=args.device)
else:
model = SentenceTransformer(modules=[word_embedding_model, pooling_model], device=args.device)
model.tensorboard_writer = tensorboard_writer
model.max_seq_length = args.max_seq_length
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
if args.chinese_dataset != "none":
train_samples = load_chinese_tsv_data(args.chinese_dataset, "train", 47900)
elif args.train_data == "nli":
# Read the AllNLI.tsv.gz file and create the training dataset
logging.info("Read AllNLI train dataset")
train_samples = []
with gzip.open(nli_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
if row['split'] == 'train':
label_id = label2int[row['label']]
if args.no_pair:
assert args.cl_loss_only, "no pair texts only used when contrastive loss only"
train_samples.append(InputExample(texts=[row['sentence1']]))
train_samples.append(InputExample(texts=[row['sentence2']]))
else:
train_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=label_id))
elif args.train_data == "stssick":
# Read data/downstream/STS and data/downstream/SICK and create the training dataset
logging.info("Read STS and SICK train dataset")
train_samples = load_datasets(datasets=["sts12", "sts13", "sts14", "sts15", "sts16", "stsb", "sickr"], need_label=False, use_all_unsupervised_texts=True, no_pair=args.no_pair)
elif args.train_data == "stsb":
logging.info("Read STS Benchmark train dataset")
train_samples = load_datasets(datasets=["stsb"], need_label=False, use_all_unsupervised_texts=True, no_pair=args.no_pair)
elif args.train_data in ["mr", "cr", "subj", "mpqa"]:
logging.info(f"Read {args.train_data.upper()} train dataset")
train_samples = load_senteval_binary(args.train_data, need_label=False, use_all_unsupervised_texts=True, no_pair=True)
elif args.train_data == "sst2":
logging.info(f"Read {args.train_data.upper()} train dataset")
train_samples = load_senteval_sst(need_label=False, use_all_unsupervised_texts=True, no_pair=True)
elif args.train_data == "trec":
logging.info(f"Read {args.train_data.upper()} train dataset")
train_samples = load_senteval_trec(need_label=False, use_all_unsupervised_texts=True, no_pair=True)
elif args.train_data == "mrpc":
logging.info(f"Read {args.train_data.upper()} train dataset")
train_samples = load_senteval_mrpc(need_label=False, use_all_unsupervised_texts=True, no_pair=True)
if args.data_proportion != 1.0:
num_sample_used = int(args.data_proportion * len(train_samples))
logging.info(f"Using {100 * args.data_proportion:.0f}% training data - {num_sample_used} (total {len(train_samples)}) samples")
random.shuffle(train_samples)
train_samples = train_samples[:num_sample_used]
if args.do_upsampling:
train_samples = train_samples * int(1.0 / args.data_proportion)
logging.info(f"Do upsampling, final size of training dataset is {len(train_samples)}")
save_samples(train_samples, os.path.join(model_save_path, "train_texts.txt"))
train_dataset = SentencesDataset(train_samples, model=model)
train_dataloader = DataLoader(train_dataset, shuffle=not args.no_shuffle, batch_size=train_batch_size)
if args.adv_training and args.add_cl:
train_loss = losses.AdvCLSoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int), concatenation_sent_max_square=args.concatenation_sent_max_square,
use_adversarial_training=args.adv_training, noise_norm=args.noise_norm,
adv_loss_stop_grad=args.adv_loss_stop_grad, adversarial_loss_rate=args.adv_loss_rate,
use_contrastive_loss=args.add_cl, contrastive_loss_type=args.cl_type,
contrastive_loss_rate=args.cl_rate, temperature=args.temperature,
contrastive_loss_stop_grad=args.contrastive_loss_stop_grad,
mapping_to_small_space=args.mapping_to_small_space,
add_contrastive_predictor=args.add_contrastive_predictor,
projection_hidden_dim=args.projection_hidden_dim,
projection_use_batch_norm=args.projection_use_batch_norm, add_projection=args.add_projection,
projection_norm_type=args.projection_norm_type, contrastive_loss_only=args.cl_loss_only,
data_augmentation_strategy=args.data_augmentation_strategy, cutoff_direction=args.cutoff_direction,
cutoff_rate=args.cutoff_rate, regularization_term_rate=args.regularization_term_rate,
loss_rate_scheduler=args.loss_rate_scheduler, adv_loss_cof=args.adv_loss_cof, mask_loss_cof=args.mask_loss_cof,
low_rate=args.low_rate, high_rank=args.high_rank)
elif args.adv_training:
train_loss = losses.AdvCLSoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=len(label2int), concatenation_sent_max_square=args.concatenation_sent_max_square, use_adversarial_training=args.adv_training, noise_norm=args.noise_norm, adv_loss_stop_grad=args.adv_loss_stop_grad, adversarial_loss_rate=args.adv_loss_rate, adv_loss_cof=args.adv_loss_cof, mask_loss_cof=args.mask_loss_cof, low_rate=args.low_rate, high_rank=args.high_rank)
elif args.add_cl:
train_loss = losses.AdvCLSoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=len(label2int),
concatenation_sent_max_square=args.concatenation_sent_max_square,
use_contrastive_loss=args.add_cl, contrastive_loss_type=args.cl_type,
contrastive_loss_rate=args.cl_rate, temperature=args.temperature, contrastive_loss_stop_grad=args.contrastive_loss_stop_grad, mapping_to_small_space=args.mapping_to_small_space, add_contrastive_predictor=args.add_contrastive_predictor, projection_hidden_dim=args.projection_hidden_dim, projection_use_batch_norm=args.projection_use_batch_norm, add_projection=args.add_projection, projection_norm_type=args.projection_norm_type, contrastive_loss_only=args.cl_loss_only, data_augmentation_strategy=args.data_augmentation_strategy, cutoff_direction=args.cutoff_direction, cutoff_rate=args.cutoff_rate, no_pair=args.no_pair, regularization_term_rate=args.regularization_term_rate, loss_rate_scheduler=args.loss_rate_scheduler, data_augmentation_strategy_final_1=args.da_final_1, data_augmentation_strategy_final_2=args.da_final_2, cutoff_rate_final_1=args.cutoff_rate_final_1, cutoff_rate_final_2=args.cutoff_rate_final_2, adv_loss_cof=args.adv_loss_cof, mask_loss_cof=args.mask_loss_cof, low_rate=args.low_rate, high_rank=args.high_rank)
elif args.use_simclr:
train_loss = losses.SimCLRLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=len(label2int), concatenation_sent_max_square=args.concatenation_sent_max_square, data_augmentation_strategy=args.data_augmentation_strategy, temperature=args.temperature)
elif args.use_simsiam:
train_loss = losses.SimSiamLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=len(label2int), concatenation_sent_max_square=args.concatenation_sent_max_square, data_augmentation_strategy=args.data_augmentation_strategy, temperature=args.temperature)
else:
train_loss = losses.AdvCLSoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=len(label2int), concatenation_sent_max_square=args.concatenation_sent_max_square, normal_loss_stop_grad=args.normal_loss_stop_grad, adv_loss_cof=args.adv_loss_cof, mask_loss_cof=args.mask_loss_cof, low_rate=args.low_rate, high_rank=args.high_rank)
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
if row['split'] == 'dev':
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
dev_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=score))
if args.chinese_dataset != "none":
# randomly sample 2000 examples for development
dev_samples = load_chinese_tsv_data(args.chinese_dataset, "dev", 2000)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, batch_size=train_batch_size, name='sts-dev', main_similarity=SimilarityFunction.COSINE)
# Configure the training
num_epochs = args.num_epochs
model.num_steps_total = math.ceil(len(train_dataset) * num_epochs / train_batch_size)
warmup_steps = math.ceil(len(train_dataset) * num_epochs / train_batch_size * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
optimizer_params={'lr': args.learning_rate, 'eps': 1e-6, 'correct_bias': False},
evaluation_steps=args.evaluation_steps,
warmup_steps=warmup_steps,
output_path=model_save_path,
use_apex_amp=args.use_apex_amp,
apex_amp_opt_level=args.apex_amp_opt_level,
early_stop_patience=args.patience,
warmup_epoch=args.warmup_epoch,
mask_high=args.mask_high,
mask_rate=args.mask_rate)
# Test on STS Benchmark
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
if row['split'] == 'test':
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
test_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=score))
if args.chinese_dataset != "none":
test_samples = load_chinese_tsv_data(args.chinese_dataset, "test")
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, batch_size=train_batch_size,
name='sts-test',
main_similarity=SimilarityFunction.COSINE)
test_evaluator(model, output_path=model_save_path)
if args.chinese_dataset == "none":
# Test on unsupervised dataset (mainly STS related dataset)
eval_nli_unsup(model_save_path, main_similarity=SimilarityFunction.COSINE)
eval_nli_unsup(model_save_path, main_similarity=SimilarityFunction.COSINE, last2avg=True)
corr_visualization(model_save_path)
else:
for dataset_name in ("atec_ccks", "bq", "lcqmc", "pawsx", "stsb"):
eval_chinese_unsup(model_save_path, dataset_name, batch_size=16, main_similarity=SimilarityFunction.COSINE)
eval_chinese_unsup(model_save_path, dataset_name, batch_size=16, main_similarity=SimilarityFunction.COSINE, last2avg=True)
eval_chinese_unsup(model_save_path, dataset_name, batch_size=16, main_similarity=SimilarityFunction.COSINE, firstlastavg=True)
corr_visualization(model_save_path, chinese_dataset=args.chinese_dataset)
if __name__ == "__main__":
args = parse_args()
main(args)
| 27,312 | 68.498728 | 1,079 |
py
|
SLT-FAI
|
SLT-FAI-main/testAtt.py
|
import torch
import pandas as pd
from transformers import BertModel, BertTokenizer
# model_path = "./output/unsup-consert-base-07260900/0_Transformer/" # baseline 32
# model_path = "./output/unsup-consert-base-07261600/0_Transformer/" # adversarial
model_path = "./output/unsup-consert-base-07271600/0_Transformer/" # ours
# model_path = 'bert-base-uncased'
DEVICE = 'cuda:3'
model = BertModel.from_pretrained(model_path)
tokenizer = BertTokenizer.from_pretrained(model_path)
example = 'a man is playing a bamboo flute.'
inputs = tokenizer(example, return_tensors='pt')
outputs = model(**inputs, output_attentions=True)
att = outputs[-1][-1][:, -3, :, :].squeeze()
data = pd.DataFrame(att.detach().numpy())
writer = pd.ExcelWriter('attentions.xlsx')
data.to_excel(writer, 'page_0', float_format='%.5f')
writer.save()
writer.close()
| 838 | 35.478261 | 83 |
py
|
SLT-FAI
|
SLT-FAI-main/testGRL.py
|
from torch.autograd import Function
from typing import Any, Optional, Tuple
import torch.nn as nn
import torch
import numpy as np
from tqdm import tqdm
import pandas as pd
import json
import torch.nn.functional as F
from torch.utils.data import DataLoader
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, KernelPCA, IncrementalPCA, TruncatedSVD
from transformers import BertModel, BertTokenizer
from data_utils import load_datasets
path = './data/labels/bookcorpus/labels.json'
# model_path = "./output/unsup-consert-base-06281629/0_Transformer/"
# model_path = "./output/unsup-consert-base-07092000/0_Transformer/" # uniform
# model_path = "./output/unsup-consert-base-07091600/0_Transformer/"
# model_path = 'bert-base-uncased'
# model_path = "./output/unsup-consert-base-07260900/0_Transformer/" # baseline 32
# model_path = "./output/unsup-consert-base-07261600/0_Transformer/" # adversarial
model_path = "./output/unsup-consert-base-07271600/0_Transformer/" # ours
low_rate = 0.5
tokenizer = BertTokenizer.from_pretrained(model_path)
device = 'cuda:2'
with open(path, 'r') as f:
token_dic = json.load(f)
freq_list = [token_dic[i] for i in token_dic]
num = 0
for i in freq_list:
if i == 0:
num += 1
freq_list.sort()
thres = freq_list[num + int((len(freq_list) - num) * low_rate)]
index_dic = {}
freq_label = {}
for k, v in token_dic.items():
index = tokenizer.convert_tokens_to_ids(k)
index_dic[index] = v
freq_label[index] = 1 if v < thres else 0
# tsne = TSNE(n_components=2, init='pca', verbose=1)
pca = TruncatedSVD(n_components=2)
train_samples = load_datasets(datasets=["sts12", "sts13", "sts14", "sts15", "sts16", "stsb", "sickr"],
need_label=False, use_all_unsupervised_texts=True)
model = BertModel.from_pretrained(model_path).to(device)
res = []
label_0 = []
label_1 = []
i = 0
with torch.no_grad():
for sample in tqdm(train_samples[:10000]):
sample = sample.texts[0]
sentence = tokenizer(sample, return_tensors='pt').to(device)
for index in sentence['input_ids'][0]:
if freq_label[index.item()] == 0:
label_0.append(i)
else:
label_1.append(i)
i += 1
# emb = model(**sentence)[0][:, 0, :]
emb = torch.mean(model(**sentence)[0], dim=1)
# emb = model(**sentence)[0].squeeze()
res.append(emb)
res = torch.cat(res, dim=0).to('cpu')
temp = pca.fit_transform(res.numpy())
# data_0 = pd.DataFrame(temp[label_0])
# data_1 = pd.DataFrame(temp[label_1])
data = pd.DataFrame(temp)
writer = pd.ExcelWriter('embedding.xlsx') # 写入Excel文件
# data_0.to_excel(writer, 'page_0', float_format='%.5f') # ‘page_1’是写入excel的sheet名
# data_1.to_excel(writer, 'page_1', float_format='%.5f')
data.to_excel(writer, 'page_0', float_format='%.5f')
writer.save()
writer.close()
| 3,001 | 31.630435 | 102 |
py
|
SLT-FAI
|
SLT-FAI-main/eval_pretrain.py
|
import os
import json
import logging
import sys
from sentence_transformers import SentenceTransformer, InputExample, LoggingHandler
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
logging.basicConfig(format='%(asctime)s - %(filename)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
def load_model(model_path: str):
model = SentenceTransformer(model_path)
logging.info("Model successfully loaded")
return model
def load_paired_samples(input_file: str, label_file: str, scale=5.0):
with open(input_file, "r") as f:
input_lines = [line.strip() for line in f.readlines()]
with open(label_file, "r") as f:
label_lines = [line.strip() for line in f.readlines()]
new_input_lines, new_label_lines = [], []
for idx in range(len(label_lines)):
if label_lines[idx]:
new_input_lines.append(input_lines[idx])
new_label_lines.append(label_lines[idx])
input_lines = new_input_lines
label_lines = new_label_lines
samples = []
for input_line, label_line in zip(input_lines, label_lines):
sent1, sent2 = input_line.split("\t")
samples.append(InputExample(texts=[sent1, sent2], label=float(label_line)/scale))
return samples
def eval_sts(model, year, dataset_names, batch_size=16, output_path="./", main_similarity=None):
logging.info(f"Evaluation on STS{year} dataset")
sts_data_path = f"./data/downstream/STS/STS{year}-en-test"
all_samples = []
results = {}
sum_score = 0.0
weighted_sum_score = 0.0
for dataset_name in dataset_names:
input_file = os.path.join(sts_data_path, f"STS.input.{dataset_name}.txt")
label_file = os.path.join(sts_data_path, f"STS.gs.{dataset_name}.txt")
sub_samples = load_paired_samples(input_file, label_file)
# sub_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(sub_samples, batch_size=batch_size, name=f"sts-{year}-{dataset_name}", main_similarity=main_similarity)
# sub_best_result = sub_evaluator(model, output_path=output_path)
# results[dataset_name] = {
# "num_samples": len(sub_samples),
# "best_spearman": sub_best_result
# }
# sum_score += sub_best_result
# weighted_sum_score += sub_best_result * len(sub_samples)
all_samples.extend(sub_samples)
logging.info(f"Loaded examples from STS{year} dataset, total {len(all_samples)} examples")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(all_samples, batch_size=batch_size, name=f"sts-{year}", main_similarity=main_similarity)
best_result = evaluator(model, output_path=output_path)
logging.info(f"Results on STS{year}: {best_result:.6f}")
results["all"] = {
"num_samples": len(all_samples),
"best_spearman_joint": best_result,
"best_spearman_mean": sum_score / len(dataset_names),
"best_spearman_wmean": weighted_sum_score / len(all_samples)
}
with open(os.path.join(output_path, f"STS{year}-results.json"), "w") as f:
json.dump(results, f, indent=4, ensure_ascii=False)
return best_result
def eval_sts12(model, batch_size=16, output_path="./", main_similarity=None):
dataset_names = ["MSRpar", "MSRvid", "SMTeuroparl", "surprise.OnWN", "surprise.SMTnews"]
return eval_sts(model, "12", dataset_names, batch_size=batch_size, output_path=output_path, main_similarity=main_similarity)
def eval_sts13(model, batch_size=16, output_path="./", main_similarity=None):
dataset_names = ["headlines", "OnWN", "FNWN"]
return eval_sts(model, "13", dataset_names, batch_size=batch_size, output_path=output_path, main_similarity=main_similarity)
def eval_sts14(model, batch_size=16, output_path="./", main_similarity=None):
dataset_names = ["images", "OnWN", "tweet-news", "deft-news", "deft-forum", "headlines"]
return eval_sts(model, "14", dataset_names, batch_size=batch_size, output_path=output_path, main_similarity=main_similarity)
def eval_sts15(model, batch_size=16, output_path="./", main_similarity=None):
dataset_names = ["answers-forums", "answers-students", "belief", "headlines", "images"]
return eval_sts(model, "15", dataset_names, batch_size=batch_size, output_path=output_path, main_similarity=main_similarity)
def eval_sts16(model, batch_size=16, output_path="./", main_similarity=None):
dataset_names = ["answer-answer", "headlines", "plagiarism", "postediting", "question-question"]
return eval_sts(model, "16", dataset_names, batch_size=batch_size, output_path=output_path, main_similarity=main_similarity)
def eval_stsbenchmark(model, batch_size=16, output_path="./", main_similarity=None):
logging.info("Evaluation on STSBenchmark dataset")
sts_benchmark_data_path = "./data/downstream/STS/STSBenchmark/sts-test.csv"
with open(sts_benchmark_data_path, "r") as f:
lines = [line.strip() for line in f if line.strip()]
samples = []
for line in lines:
_, _, _, _, label, sent1, sent2 = line.split("\t")
samples.append(InputExample(texts=[sent1, sent2], label=float(label) / 5.0))
logging.info(f"Loaded examples from STSBenchmark dataset, total {len(samples)} examples")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(samples, batch_size=batch_size, name="sts-benchmark", main_similarity=main_similarity)
best_result = evaluator(model, output_path=output_path)
logging.info(f"Results on STSBenchmark: {best_result:.6f}")
results = {
"num_samples": len(samples),
"best_spearman": best_result
}
with open(os.path.join(output_path, "STSBenchmark-results.json"), "w") as f:
json.dump(results, f, indent=4, ensure_ascii=False)
return best_result
def eval_sickr(model, batch_size=16, output_path="./", main_similarity=None):
logging.info("Evaluation on SICK (relatedness) dataset")
sick_data_path = "./data/downstream/SICK/SICK_test_annotated.txt"
with open(sick_data_path, "r") as f:
lines = [line.strip() for line in f if line.strip()]
samples = []
for line in lines[1:]:
_, sent1, sent2, label, _ = line.split("\t")
samples.append(InputExample(texts=[sent1, sent2], label=float(label) / 5.0))
logging.info(f"Loaded examples from SICK dataset, total {len(samples)} examples")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(samples, batch_size=batch_size, name="sick-r", main_similarity=main_similarity)
best_result = evaluator(model, output_path=output_path)
logging.info(f"Results on SICK (relatedness): {best_result:.6f}")
results = {
"num_samples": len(samples),
"best_spearman": best_result
}
with open(os.path.join(output_path, "SICK-R-results.json"), "w") as f:
json.dump(results, f, indent=4, ensure_ascii=False)
return best_result
if __name__ == "__main__":
model_path = sys.argv[1]
main_similarity = SimilarityFunction.COSINE
model = load_model(model_path)
output_path = os.path.join(model_path, "sts_eval")
if not os.path.exists(output_path):
os.mkdir(output_path)
logging.info(model_path)
score_sum = 0.0
score_sum += eval_stsbenchmark(model, output_path=output_path, main_similarity=main_similarity)
score_sum += eval_sickr(model, output_path=output_path, main_similarity=main_similarity)
score_sum += eval_sts12(model, output_path=output_path, main_similarity=main_similarity)
score_sum += eval_sts13(model, output_path=output_path, main_similarity=main_similarity)
score_sum += eval_sts14(model, output_path=output_path, main_similarity=main_similarity)
score_sum += eval_sts15(model, output_path=output_path, main_similarity=main_similarity)
score_sum += eval_sts16(model, output_path=output_path, main_similarity=main_similarity)
logging.info(f"Average score in unsupervised experiments: {score_sum / 7:.6f}")
| 8,100 | 50.272152 | 178 |
py
|
SLT-FAI
|
SLT-FAI-main/correlation_visualization.py
|
import os
import torch
import matplotlib
import argparse
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from sentence_transformers import SentenceTransformer, util
from data_utils import load_datasets, load_chinese_tsv_data
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, required=True, help="The model path to visualization")
args = parser.parse_args()
return args
def draw_figure(sims, labels, model_path, to_figures_dir=False):
fig = plt.figure(figsize=(6.4, 6.4))
ax = plt.axes((.085, .15, .905, .84))
points = ax.scatter(sims, labels, label='bert-base-nli', s=0.5)
# Add some text for labels, title and custom x-axis tick labels, etc.
plt.xlabel('similarity', fontsize=12)
plt.ylabel('label', fontsize=12)
plt.xticks([-1, 1], fontsize=10)
plt.yticks([0, 1], fontsize=10)
# ax.set_xticklabels(labels)
plt.xlim(-1.2, 1.2)
plt.ylim(-0.1, 1.1)
# ax.legend(fontsize=9, markerscale=1.0, loc=0)
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate(f"{height:.2f}",
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', fontsize=11)
# autolabel(rects1)
# autolabel(rects2)
# autolabel(rects3)
fig.tight_layout()
# save to files in both png and pdf format
from matplotlib.backends.backend_pdf import PdfPages
if to_figures_dir:
plt.savefig(f"./figures/corr_{os.path.basename(model_path.rstrip('/'))}.png", format="png")
plt.savefig(os.path.join(model_path, "stsb_corr.png"), format="png")
if to_figures_dir:
with PdfPages(f"./figures/corr_{os.path.basename(model_path.rstrip('/'))}.pdf") as pdf:
plt.savefig(pdf, format="pdf")
with PdfPages(os.path.join(model_path, "stsb_corr.pdf")) as pdf:
plt.savefig(pdf, format="pdf")
def corr_visualization(model_path, chinese_dataset="none", to_figures_dir=False):
stsb_samples = load_datasets(datasets=["stsb"], need_label=True, use_all_unsupervised_texts=False, no_pair=False)
if chinese_dataset != "none":
stsb_samples = load_chinese_tsv_data(chinese_dataset, "test")
model = SentenceTransformer(model_path)
all_texts = []
for sample in stsb_samples:
all_texts.extend(sample.texts)
all_labels = [sample.label for sample in stsb_samples]
all_reps = model.encode(all_texts)
all_sims = []
for idx in range(0, len(all_reps), 2):
sim = util.pytorch_cos_sim(all_reps[idx], all_reps[idx + 1]).item()
all_sims.append(sim)
assert len(all_sims) == len(all_labels) == len(stsb_samples)
print(f"similarity mean: {torch.tensor(all_sims).mean().item()}")
print(f"similarity std: {torch.tensor(all_sims).std().item()}")
print(f"similarity max: {max(all_sims)}")
print(f"similarity min: {min(all_sims)}")
print(f"labels mean: {torch.tensor(all_labels).mean().item()}")
print(f"labels std: {torch.tensor(all_labels).std().item()}")
print(f"labels max: {max(all_labels)}")
print(f"labels min: {min(all_labels)}")
draw_figure(all_sims, all_labels, model_path, to_figures_dir=to_figures_dir)
if __name__ == "__main__":
args = parse_args()
corr_visualization(args.model_path, to_figures_dir=True)
| 3,587 | 38.428571 | 117 |
py
|
SLT-FAI
|
SLT-FAI-main/testUA.py
|
import torch
import tqdm
import os
import time
import argparse
from torch.utils.data import DataLoader
from torch import nn, Tensor
from torch.nn.functional import normalize
from transformers import BertModel, BertTokenizer
from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers import models, losses
from data_utils import load_datasets
from sentence_transformers.util import batch_to_device
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable, Optional, Set
parser = argparse.ArgumentParser()
# model_path = 'bert-base-uncased'
# model_path = "./output/unsup-consert-base-06281629/0_Transformer/" # consert
# model_path = "./output/unsup-consert-base-07260900/0_Transformer/" # baseline 32
# model_path = "./output/unsup-consert-base-07261600/0_Transformer/" # adversarial
# model_path = "./output/unsup-consert-base-07271600/0_Transformer/" # ours
parser.add_argument("--model", default='bert-base-uncased')
parser.add_argument("--dataset", default="stsb", help="sts12, sts13, sts14, sts15, sts16, stsb, sickr")
parser.add_argument("--device", default='cuda:3')
parser.add_argument("--p", type=int, default=2)
parser.add_argument("--root", default='au-result')
args = parser.parse_args()
model_path = args.model
# model_path = "./output/unsup-consert-base-07092000/0_Transformer/"
tokenizer = BertTokenizer.from_pretrained(model_path)
device = args.device
def _recover_to_origin_keys(sentence_feature: Dict[str, Tensor], ori_keys: Set[str]):
return {k: v for k, v in sentence_feature.items() if k in ori_keys}
def _data_aug(model, sentence_feature, name, ori_keys, cutoff_rate=0.2):
assert name in ("none", "shuffle", "token_cutoff", "feature_cutoff", "dropout", "span")
sentence_feature = _recover_to_origin_keys(sentence_feature, ori_keys)
if name == "none":
pass # do nothing
elif name == "shuffle":
model[0].auto_model.set_flag("data_aug_shuffle", True)
elif name == "token_cutoff":
model[0].auto_model.set_flag("data_aug_cutoff", True)
model[0].auto_model.set_flag("data_aug_cutoff.direction", "row")
model[0].auto_model.set_flag("data_aug_cutoff.rate", cutoff_rate)
elif name == "span":
model[0].auto_model.set_flag("data_aug_span", True)
model[0].auto_model.set_flag("data_aug_span.rate", cutoff_rate)
elif name == "feature_cutoff":
model[0].auto_model.set_flag("data_aug_cutoff", True)
model[0].auto_model.set_flag("data_aug_cutoff.direction", "column")
model[0].auto_model.set_flag("data_aug_cutoff.rate", cutoff_rate)
elif name == "dropout":
model[0].auto_model.set_flag("data_aug_cutoff", True)
model[0].auto_model.set_flag("data_aug_cutoff.direction", "random")
model[0].auto_model.set_flag("data_aug_cutoff.rate", cutoff_rate)
rep = model(sentence_feature)["sentence_embedding"]
return rep, sentence_feature['token_embeddings']
train_samples = load_datasets(datasets=[args.dataset],
need_label=False, use_all_unsupervised_texts=True, no_pair=True)
word_embedding_model = models.Transformer(model_path)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True, pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model]).to(device)
train_dataset = SentencesDataset(train_samples, model=model)
train_dataloader = DataLoader(train_dataset, shuffle=False, batch_size=1)
train_dataloader.collate_fn = model.smart_batching_collate
align = []
all_rep = []
for data in tqdm.tqdm(train_dataloader):
features, labels = batch_to_device(data, device)
sentence_feature_a = features[0]
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep, _ = _data_aug(model, sentence_feature_a, 'none', ori_feature_keys)
rep_a_view1, _ = _data_aug(model, sentence_feature_a, 'feature_cutoff', ori_feature_keys, 0.2)
rep_a_view2, _ = _data_aug(model, sentence_feature_a, 'shuffle', ori_feature_keys)
rep, rep_a_view1, rep_a_view2 = normalize(rep), normalize(rep_a_view1), normalize(rep_a_view2)
align.append(torch.norm(rep_a_view1 - rep_a_view2, p=args.p).item() ** 2)
all_rep.append(rep.detach().to('cpu'))
uniform = []
for i in tqdm.tqdm(range(len(all_rep))):
for j in range(i + 1, len(all_rep)):
uniform.append(torch.exp(torch.norm(all_rep[i] - all_rep[j], p=args.p) ** 2 * (-2)))
alignment = sum(align) / len(align)
uniformv = torch.log(sum(uniform) / len(uniform))
print('sample scale is {}'.format(len(align)))
print('alignment: {:.4f}'.format(alignment))
print('uniform: {:.4f}'.format(uniformv))
if not os.path.exists(args.root):
os.makedirs(args.root)
path = os.path.join(args.root, '{}-{}.txt'.format(args.dataset, time.asctime()))
with open(path, 'w') as f:
f.write('dataset: {}, '.format(args.dataset))
f.write('model: {}, '.format(args.model))
f.write('sample scale is {}, '.format(len(align)))
f.write('alignment: {:.4f}, '.format(alignment))
f.write('uniform: {:.4f}'.format(uniformv))
f.close()
| 5,313 | 47.309091 | 107 |
py
|
SLT-FAI
|
SLT-FAI-main/analysis_rep_space.py
|
import gzip
import csv
import argparse
import json
from sentence_transformers import models, losses
from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
from eval import *
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default="./final_output/bert-base-uncased", help="The saved model path")
parser.add_argument("--output_dir", type=str, default="./tmp/bert-base-uncased", help="The output dir")
parser.add_argument("--filter_by", type=str, default="freq", choices=["freq", "tfidf"], help="Use which metric to filter token ids")
parser.add_argument("--num_filter_freq_rank_leq_than", type=int, default=50)
return parser.parse_args()
def save_stsb_info(output_file, split="test"):
sts_dataset_path = 'datasets/stsbenchmark.tsv.gz'
batch_size = 96
if split == "test":
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
if row['split'] == 'test':
score = float(row['score']) / 5.0 #Normalize score to range 0 ... 1
test_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=score))
print(f"Number of samples: {len(test_samples)}")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, batch_size=batch_size, name='nouse')
elif split == "dev":
dev_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
if row['split'] == 'dev':
score = float(row['score']) / 5.0 #Normalize score to range 0 ... 1
dev_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=score))
print(f"Number of samples: {len(dev_samples)}")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, batch_size=batch_size, name='nouse')
model = SentenceTransformer("./final_output/bert-base-uncased/")
model[0].feature_cache = []
evaluator(model, output_path="./final_output/bert-base-uncased/")
print(f"Number of texts: {len(model[0].feature_cache)}")
with open(output_file, "w") as f:
for obj in model[0].feature_cache:
new_lst = [str(item) for item in obj["input_id"] if item not in [0]]
f.write(f"{' '.join(new_lst)}\n")
def load_sample_features(input_file):
with open(input_file, "r") as f:
lines = f.readlines()
features = [[int(item) for item in line.strip().split()] for line in lines]
return features
def compute_token_features(sample_features, method="freq"):
assert method in ["freq", "tfidf"]
if method == "freq":
id2freq = {}
for sample_feature in sample_features:
for token_id in sample_feature:
if token_id not in id2freq:
id2freq[token_id] = 1
else:
id2freq[token_id] += 1
return id2freq
elif method == "tfidf":
raise NotImplementedError
def filter_freq_rank_leq_than(num):
def token_valid(token_id, token2feature, token2rank):
if token_id == 0:
return False
if token_id not in token2rank:
return True
if token2rank[token_id] <= num:
return False
else:
return True
return token_valid
class TokenChecker:
def __init__(self, check_func, token2feature, token2rank):
self.check_func = check_func
self.token2feature = token2feature
self.token2rank = token2rank
def __call__(self, token_id):
return self.check_func(token_id, self.token2feature, self.token2rank)
def restrict_eval_nli_unsup(model_path, output_path, main_similarity=SimilarityFunction.COSINE, last2avg=True, restrict_method="freq", num_filter_freq_rank_leq_than=50):
if not os.path.exists(output_path):
os.mkdir(output_path)
sample_features = load_sample_features("./tmp/stsb_test_features.txt")
token2feature = compute_token_features(sample_features, method=restrict_method)
token2feature_tuple = [(k, v) for k, v in token2feature.items()]
sorted_tuple = sorted(token2feature_tuple, key=lambda item: item[1], reverse=True)
with open(os.path.join(output_path, "token_features.txt"), "w") as f:
for token_id, feature in sorted_tuple:
f.write(f"{token_id}\t{feature}\n")
token2rank = {token_id: idx for idx, (token_id, _) in enumerate(sorted_tuple)}
json.dump(token2rank, open(os.path.join(output_path, "token_rank.json"), "w"), indent=4)
model = load_model(model_path, last2avg=last2avg)
model[1].token_checker = TokenChecker(filter_freq_rank_leq_than(num_filter_freq_rank_leq_than), token2feature, token2rank)
score_sts12 = eval_sts12(model, output_path=output_path, main_similarity=main_similarity)
score_sts13 = eval_sts13(model, output_path=output_path, main_similarity=main_similarity)
score_sts14 = eval_sts14(model, output_path=output_path, main_similarity=main_similarity)
score_sts15 = eval_sts15(model, output_path=output_path, main_similarity=main_similarity)
score_sts16 = eval_sts16(model, output_path=output_path, main_similarity=main_similarity)
score_stsb = eval_stsbenchmark(model, output_path=output_path, main_similarity=main_similarity)
score_sickr = eval_sickr(model, output_path=output_path, main_similarity=main_similarity)
score_sum = score_sts12 + score_sts13 + score_sts14 + score_sts15 + score_sts16 + score_stsb + score_sickr
score_avg = score_sum / 7.0
logging.info(f"Average score in unsupervised experiments: {score_avg:.6f}")
json.dump({
"sts12": score_sts12,
"sts13": score_sts13,
"sts14": score_sts14,
"sts15": score_sts15,
"sts16": score_sts16,
"stsb": score_stsb,
"sickr": score_sickr,
"average": score_avg
}, open(os.path.join(output_path, "summary.json"), "w"), indent=4)
return score_avg
if __name__ == "__main__":
# save_stsb_info("tmp/stsb_test_features.txt", split="test")
# save_stsb_info("tmp/stsb_dev_features.txt", split="dev")
args = parse_args()
restrict_eval_nli_unsup(args.model_path, args.output_dir, restrict_method=args.filter_by, num_filter_freq_rank_leq_than=args.num_filter_freq_rank_leq_than)
| 6,737 | 48.544118 | 169 |
py
|
SLT-FAI
|
SLT-FAI-main/data_utils.py
|
import os
import json
import random
import logging
import argparse
import io
from sentence_transformers import InputExample, LoggingHandler
logging.basicConfig(format='%(asctime)s - %(filename)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
def save_samples(samples, output_file):
with open(output_file, "w", encoding="utf-8") as f_out:
for sample in samples:
line = "\t".join(sample.texts)
f_out.write(f"{line}\n")
def load_paired_samples(input_file: str, label_file: str, need_label: bool = False, scale=5.0, no_pair=False):
if need_label:
assert not no_pair, "Only paired texts need label"
with open(input_file, "r") as f:
input_lines = [line.strip() for line in f.readlines()]
label_lines = [0]*len(input_lines) # dummy
if label_file!="":
with open(label_file, "r") as f:
label_lines = [line.strip() for line in f.readlines()]
if need_label:
new_input_lines, new_label_lines = [], []
for idx in range(len(label_lines)):
if label_lines[idx]:
new_input_lines.append(input_lines[idx])
new_label_lines.append(label_lines[idx])
input_lines = new_input_lines
label_lines = new_label_lines
samples = []
for input_line, label_line in zip(input_lines, label_lines):
sentences = input_line.split("\t")
if len(sentences)==2:
sent1, sent2 = sentences
else:
sent1, sent2 = sentences[0], None
if need_label:
samples.append(InputExample(texts=[sent1, sent2], label=float(label_line)/scale))
else:
if no_pair:
samples.append(InputExample(texts=[sent1]))
if sent2:
samples.append(InputExample(texts=[sent2]))
else:
samples.append(InputExample(texts=[sent1, sent2]))
return samples
def load_sts(year, dataset_names, need_label=False, no_pair=False):
logging.info(f"Loading STS{year} dataset")
sts_data_path = f"./data/downstream/STS/STS{year}-en-test"
all_samples = []
for dataset_name in dataset_names:
input_file = os.path.join(sts_data_path, f"STS.input.{dataset_name}.txt")
label_file = os.path.join(sts_data_path, f"STS.gs.{dataset_name}.txt")
sub_samples = load_paired_samples(input_file, label_file, need_label=need_label, no_pair=no_pair)
all_samples.extend(sub_samples)
logging.info(f"Loaded examples from STS{year} dataset, total {len(all_samples)} examples")
return all_samples
def load_senteval_binary(task_name, need_label=False, use_all_unsupervised_texts=True, no_pair=True):
if task_name=="mr":
dataset_names = ['rt-polarity.pos', 'rt-polarity.neg']
data_path = f"./data/downstream/MR"
elif task_name=="cr":
dataset_names = ['custrev.pos', 'custrev.neg']
data_path = f"./data/downstream/CR"
elif task_name=="subj":
dataset_names = ['subj.objective', 'subj.subjective']
data_path = f"./data/downstream/SUBJ"
elif task_name=="mpqa":
dataset_names = ['mpqa.pos', 'mpqa.neg']
data_path = f"./data/downstream/MPQA"
all_samples = []
for name in dataset_names:
input_file = os.path.join(data_path, name)
sub_samples = load_paired_samples(input_file, "", need_label=False, no_pair=True)
all_samples.extend(sub_samples)
logging.info(f"Loaded examples from {task_name.upper()} dataset, total {len(all_samples)} examples")
return all_samples
def load_senteval_sst(need_label=False, use_all_unsupervised_texts=True, no_pair=True):
data_path = f"./data/downstream/SST/binary"
samples = []
for name in ["sentiment-dev","sentiment-test","sentiment-train"]:
input_file = os.path.join(data_path, name)
for ln in open(input_file):
sent = ln.strip().split("\t")[0]
samples.append(InputExample(texts=[sent]))
logging.info(f"Loaded examples from SST dataset, total {len(samples)} examples")
return samples
def load_senteval_trec(need_label=False, use_all_unsupervised_texts=True, no_pair=True):
data_path = f"./data/downstream/TREC"
samples = []
for name in ["train_5500.label","TREC_10.label"]:
input_file = os.path.join(data_path, name)
for ln in io.open(input_file, 'r', encoding='latin-1'):
target, sample = ln.strip().split(':', 1)
sample = sample.split(' ', 1)[1]
samples.append(InputExample(texts=[sample]))
logging.info(f"Loaded examples from TREC dataset, total {len(samples)} examples")
return samples
def load_senteval_mrpc(need_label=False, use_all_unsupervised_texts=True, no_pair=True):
data_path = f"./data/downstream/MRPC"
samples = []
for name in ["msr_paraphrase_test.txt","msr_paraphrase_train.txt"]:
input_file = os.path.join(data_path, name)
for ln in open(input_file):
text = ln.strip().split('\t')
samples.append(InputExample(texts=[text[3]]))
samples.append(InputExample(texts=[text[4]]))
logging.info(f"Loaded examples from MRPC dataset, total {len(samples)} examples")
return samples
def load_sts12(need_label=False, use_all_unsupervised_texts=True, no_pair=False):
dataset_names = ["MSRpar", "MSRvid", "SMTeuroparl", "surprise.OnWN", "surprise.SMTnews"]
return load_sts("12", dataset_names, need_label=need_label, no_pair=no_pair)
def load_sts13(need_label=False, use_all_unsupervised_texts=True, no_pair=False):
dataset_names = ["headlines", "OnWN", "FNWN"]
return load_sts("13", dataset_names, need_label=need_label, no_pair=no_pair)
def load_sts14(need_label=False, use_all_unsupervised_texts=True, no_pair=False):
dataset_names = ["images", "OnWN", "tweet-news", "deft-news", "deft-forum", "headlines"]
return load_sts("14", dataset_names, need_label=need_label, no_pair=no_pair)
def load_sts15(need_label=False, use_all_unsupervised_texts=True, no_pair=False):
dataset_names = ["answers-forums", "answers-students", "belief", "headlines", "images"]
return load_sts("15", dataset_names, need_label=need_label, no_pair=no_pair)
def load_sts16(need_label=False, use_all_unsupervised_texts=True, no_pair=False):
dataset_names = ["answer-answer", "headlines", "plagiarism", "postediting", "question-question"]
return load_sts("16", dataset_names, need_label=need_label, no_pair=no_pair)
def load_stsbenchmark(need_label=False, use_all_unsupervised_texts=True, no_pair=False):
if need_label:
assert not no_pair, "Only paired texts need label"
logging.info("Loading STSBenchmark dataset")
all_samples = []
if use_all_unsupervised_texts:
splits = ["train", "dev", "test"]
else:
splits = ["test"]
for split in splits:
sts_benchmark_data_path = f"./data/downstream/STS/STSBenchmark/sts-{split}.csv"
with open(sts_benchmark_data_path, "r") as f:
lines = [line.strip() for line in f if line.strip()]
samples = []
for line in lines:
_, _, _, _, label, sent1, sent2 = line.split("\t")
if need_label:
samples.append(InputExample(texts=[sent1, sent2], label=float(label) / 5.0))
else:
if no_pair:
samples.append(InputExample(texts=[sent1]))
samples.append(InputExample(texts=[sent2]))
else:
samples.append(InputExample(texts=[sent1, sent2]))
all_samples.extend(samples)
logging.info(f"Loaded examples from STSBenchmark dataset, total {len(all_samples)} examples")
return all_samples
def load_sickr(need_label=False, use_all_unsupervised_texts=True, no_pair=False):
if need_label:
assert not no_pair, "Only paired texts need label"
logging.info("Loading SICK (relatedness) dataset")
all_samples = []
if use_all_unsupervised_texts:
splits = ["train", "trial", "test_annotated"]
else:
splits = ["test_annotated"]
for split in splits:
sick_data_path = f"./data/downstream/SICK/SICK_{split}.txt"
with open(sick_data_path, "r") as f:
lines = [line.strip() for line in f if line.strip()]
samples = []
for line in lines[1:]:
_, sent1, sent2, label, _ = line.split("\t")
if need_label:
samples.append(InputExample(texts=[sent1, sent2], label=float(label) / 5.0))
else:
if no_pair:
samples.append(InputExample(texts=[sent1]))
samples.append(InputExample(texts=[sent2]))
else:
samples.append(InputExample(texts=[sent1, sent2]))
all_samples.extend(samples)
logging.info(f"Loaded examples from SICK dataset, total {len(all_samples)} examples")
return all_samples
def load_datasets(datasets=None, need_label=False, use_all_unsupervised_texts=True, no_pair=False):
load_function_mapping = {
"sts12": load_sts12,
"sts13": load_sts13,
"sts14": load_sts14,
"sts15": load_sts15,
"sts16": load_sts16,
"stsb": load_stsbenchmark,
"sickr": load_sickr
}
datasets = datasets or ["sts12", "sts13", "sts14", "sts15", "sts16", "stsb", "sickr"]
all_samples = []
for dataset in datasets:
func = load_function_mapping[dataset]
all_samples.extend(func(need_label=need_label, use_all_unsupervised_texts=use_all_unsupervised_texts, no_pair=no_pair))
logging.info(f"Loaded data from datasets {datasets}, total number of samples {len(all_samples)}")
return all_samples
def load_chinese_tsv_data(dataset_name, split, max_num_samples=None, need_label=False, no_pair=True):
assert dataset_name in ("atec_ccks", "bq", "lcqmc", "pawsx", "stsb")
assert split in ("train", "dev", "test")
base_data_path = "./data/chinese"
data_file = os.path.join(base_data_path, dataset_name, f"{split}.tsv")
all_samples = []
with open(data_file) as f:
lines = f.readlines()
for line in lines:
sent1, sent2, label = line.strip().split("\t")
if split == "train":
if need_label:
all_samples.append(InputExample(texts=[sent1, sent2], label=int(label)))
elif no_pair:
all_samples.append(InputExample(texts=[sent1]))
all_samples.append(InputExample(texts=[sent2]))
else:
all_samples.append(InputExample(texts=[sent1, sent2]))
else:
all_samples.append(InputExample(texts=[sent1, sent2], label=float(label)))
if max_num_samples is not None and max_num_samples < len(all_samples):
all_samples = random.sample(all_samples, max_num_samples)
return all_samples
if __name__ == "__main__":
samples = load_datasets(need_label=False, use_all_unsupervised_texts=True, no_pair=True)
print(samples[0])
| 11,144 | 43.939516 | 127 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.