repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
end-to-end-asd
end-to-end-asd-main/experiment_config.py
import torch.nn as nn import torch.optim as optim import models.graph_models as g3d EASEE_R3D_18_inputs = { # input files 'csv_train_full': '/Dataset/ava_active_speaker/csv/ava_activespeaker_train_augmented.csv', 'csv_val_full': '/Dataset/ava_active_speaker/csv/ava_activespeaker_val_augmented.csv', 'csv_test_full': '/Dataset/ava_active_speaker/csv/ava_activespeaker_test_augmented.csv', # Data config 'audio_dir': '/Dataset/ava_active_speaker/instance_wavs_time/', 'video_dir': '/Dataset/ava_active_speaker/instance_crops_time/', 'models_out': '/home/alcazajl/Models/ASC2/tan3d/', # save directory # Pretrained Weights 'video_pretrain_weights': '/home/alcazajl/Models/Pretrained/R3D/r3d18_K_200ep.pth' } EASEE_R3D_50_inputs = { # input files 'csv_train_full': '/Dataset/ava_active_speaker/csv/ava_activespeaker_train_augmented.csv', 'csv_val_full': '/Dataset/ava_active_speaker/csv/ava_activespeaker_val_augmented.csv', 'csv_test_full': '/Dataset/ava_active_speaker/csv/ava_activespeaker_test_augmented.csv', # Data config 'audio_dir': '/Dataset/ava_active_speaker/instance_wavs_time/', 'video_dir': '/Dataset/ava_active_speaker/instance_crops_time/', 'models_out': '/home/alcazajl/Models/ASC2/tan3d/', # save directory # Pretrained Weights 'video_pretrain_weights': '/home/alcazajl/Models/Pretrained/R3D/r3d50_K_200ep.pth' } EASEE_R3D_18_4lvl_params = { # Net Arch 'backbone': g3d.R3D18_4lvlGCN, # Optimization config 'optimizer': optim.Adam, 'criterion': nn.CrossEntropyLoss(), 'learning_rate': 3e-4, 'epochs': 15, 'gamma': 0.1, # Batch Config 'batch_size': 17, 'threads': 8 } EASEE_R3D_50_4lvl_params = { # Net Arch 'backbone': g3d.R3D50_4lvlGCN, # Optimization config 'optimizer': optim.Adam, 'criterion': nn.CrossEntropyLoss(), 'learning_rate': 3e-4, 'epochs': 15, 'gamma': 0.1, # Batch Config 'batch_size': 17, 'threads': 8 }
2,020
27.871429
94
py
end-to-end-asd
end-to-end-asd-main/easee_R3D50.py
import os import torch import experiment_config as exp_conf import util.custom_transforms as ct from torchvision import transforms from torch_geometric.loader import DataLoader from torch.optim.lr_scheduler import MultiStepLR from ez_io.logging import setup_optim_outputs from datasets.graph_datasets import IndependentGraphDatasetETE3D from models.graph_layouts import get_spatial_connection_pattern from models.graph_layouts import get_temporal_connection_pattern from optimization.optimization_amp import optimize_easee from util.command_line import unpack_command_line_args, get_default_arg_parser if __name__ == '__main__': # Parse Command line args command_line_args = get_default_arg_parser().parse_args() lr_arg, frames_per_clip, ctx_size, n_clips, strd, img_size = unpack_command_line_args(command_line_args) # Connection pattern scp = get_spatial_connection_pattern(ctx_size, n_clips) tcp = get_temporal_connection_pattern(ctx_size, n_clips) opt_config = exp_conf.EASEE_R3D_18_4lvl_params easee_config = exp_conf.EASEE_R3D_18_inputs # Data Transforms image_size = (img_size, img_size) video_train_transform = transforms.Compose([transforms.Resize(image_size), ct.video_train]) video_val_transform = transforms.Compose([transforms.Resize(image_size), ct.video_val]) # output config model_name = 'easee_R3D_50' + \ '_clip' + str(frames_per_clip) + \ '_ctx' + str(ctx_size) + \ '_len' + str(n_clips) + \ '_str' + str(strd) log, target_models = setup_optim_outputs(easee_config['models_out'], easee_config, model_name) # Create Network and offload to GPU pretrain_weightds_path = easee_config['video_pretrain_weights'] ez_net = easee_config['backbone'](pretrain_weightds_path) has_cuda = torch.cuda.is_available() device = torch.device('cuda' if has_cuda else 'cpu') print('Cuda info ',has_cuda, device) ez_net.to(device) # Optimization config criterion = easee_config['criterion'] optimizer = easee_config['optimizer'](ez_net.parameters(), lr=easee_config['learning_rate']) scheduler = MultiStepLR(optimizer, milestones=[6, 8], gamma=0.1) # Data Paths video_train_path = os.path.join(easee_config['video_dir'], 'train') audio_train_path = os.path.join(easee_config['audio_dir'], 'train') video_val_path = os.path.join(easee_config['video_dir'], 'val') audio_val_path = os.path.join(easee_config['audio_dir'], 'val') # Dataloaders d_train = IndependentGraphDatasetETE3D(audio_train_path, video_train_path, easee_config['csv_train_full'], n_clips, strd, ctx_size, frames_per_clip, scp, tcp, video_train_transform, do_video_augment=True, crop_ratio=0.95) d_val = IndependentGraphDatasetETE3D(audio_val_path, video_val_path, easee_config['csv_val_full'], n_clips, strd, ctx_size, frames_per_clip, scp, tcp, video_val_transform, do_video_augment=False) dl_train = DataLoader(d_train, batch_size=opt_config['batch_size'], shuffle=True, num_workers=opt_config['threads'], pin_memory=True) dl_val = DataLoader(d_val, batch_size=opt_config['batch_size'], shuffle=True, num_workers=opt_config['threads'], pin_memory=True) # Optimization loop model = optimize_easee(ez_net, dl_train, dl_val, device, criterion, optimizer, scheduler, num_epochs=opt_config['epochs'], spatial_ctx_size=ctx_size, time_len=n_clips, models_out=target_models, log=log)
4,566
43.77451
108
py
end-to-end-asd
end-to-end-asd-main/easee_R3D18.py
import os import sys import torch import experiment_config as exp_conf import util.custom_transforms as ct from torchvision import transforms from torch_geometric.loader import DataLoader from torch.optim.lr_scheduler import MultiStepLR from ez_io.logging import setup_optim_outputs from datasets.graph_datasets import IndependentGraphDatasetETE3D from models.graph_layouts import get_spatial_connection_pattern from models.graph_layouts import get_temporal_connection_pattern from optimization.optimization_amp import optimize_easee from util.command_line import unpack_command_line_args, get_default_arg_parser if __name__ == '__main__': # Parse Command line args command_line_args = get_default_arg_parser().parse_args() lr_arg, frames_per_clip, ctx_size, n_clips, strd, img_size = unpack_command_line_args(command_line_args) # Graph Head Connection pattern scp = get_spatial_connection_pattern(ctx_size, n_clips) tcp = get_temporal_connection_pattern(ctx_size, n_clips) opt_config = exp_conf.EASEE_R3D_18_4lvl_params easee_config = exp_conf.EASEE_R3D_18_inputs # Data Transforms image_size = (img_size, img_size) video_train_transform = transforms.Compose([transforms.Resize(image_size), ct.video_train]) video_val_transform = transforms.Compose([transforms.Resize(image_size), ct.video_val]) # output config model_name = 'easee_R3D_18' + \ '_clip' + str(frames_per_clip) + \ '_ctx' + str(ctx_size) + \ '_len' + str(n_clips) + \ '_str' + str(strd) log, target_models = setup_optim_outputs(easee_config['models_out'], opt_config, model_name) # Create Network and offload to GPU pretrain_weightds_path = easee_config['video_pretrain_weights'] ez_net = opt_config['backbone'](pretrain_weightds_path) has_cuda = torch.cuda.is_available() device = torch.device('cuda' if has_cuda else 'cpu') print('Cuda info ',has_cuda, device) ez_net.to(device) # Optimization config criterion = opt_config['criterion'] optimizer = opt_config['optimizer'](ez_net.parameters(), lr=opt_config['learning_rate']) scheduler = MultiStepLR(optimizer, milestones=[6, 8], gamma=0.1) # Data Paths video_train_path = os.path.join(easee_config['video_dir'], 'train') audio_train_path = os.path.join(easee_config['audio_dir'], 'train') video_val_path = os.path.join(easee_config['video_dir'], 'val') audio_val_path = os.path.join(easee_config['audio_dir'], 'val') # Dataloaders d_train = IndependentGraphDatasetETE3D(audio_train_path, video_train_path, easee_config['csv_train_full'], n_clips, strd, ctx_size, frames_per_clip, scp, tcp, video_train_transform, do_video_augment=True, crop_ratio=0.95) d_val = IndependentGraphDatasetETE3D(audio_val_path, video_val_path, easee_config['csv_val_full'], n_clips, strd, ctx_size, frames_per_clip, scp, tcp, video_val_transform, do_video_augment=False) dl_train = DataLoader(d_train, batch_size=opt_config['batch_size'], shuffle=True, num_workers=opt_config['threads'], pin_memory=True) dl_val = DataLoader(d_val, batch_size=opt_config['batch_size'], shuffle=True, num_workers=opt_config['threads'], pin_memory=True) # Optimization loop model = optimize_easee(ez_net, dl_train, dl_val, device, criterion, optimizer, scheduler, num_epochs=opt_config['epochs'], spatial_ctx_size=ctx_size, time_len=n_clips, models_out=target_models, log=log)
4,585
43.524272
108
py
end-to-end-asd
end-to-end-asd-main/ez_io/file_util.py
import os import csv def postprocess_speech_label(speech_label): speech_label = int(speech_label) if speech_label == 2: # Remember 2 = SPEAKING_NOT_AUDIBLE speech_label = 0 return speech_label def postprocess_entity_label(entity_label): entity_label = int(entity_label) if entity_label == 2: # Remember 2 = SPEAKING_NOT_AUDIBLE entity_label = 0 return entity_label def csv_to_list(csv_path): as_list = None with open(csv_path, 'r') as f: reader = csv.reader(f) as_list = list(reader) return as_list def load_val_video_set(): files = os.listdir( '/ibex/ai/home/alcazajl/ava_active_speakers/csv/gt/ava_activespeaker_test_v1.0') videos = [f[:-18] for f in files] videos.sort() return videos def load_train_video_set(): files = os.listdir( '/ibex/ai/home/alcazajl/ava_active_speakers/csv/gt/ava_activespeaker_train_v1.0') videos = [f[:-18] for f in files] videos.sort() return videos def generate_av_mask(ctx_size, total_len): stride = ctx_size + 1 audio_mask = [] video_mask = [] for i in range(0, total_len): if i % stride == 0: audio_mask.append(i) else: video_mask.append(i) return audio_mask, video_mask def generate_avs_mask(ctx_size, total_len): stride = ctx_size + 2 audio_mask = [] sync_mask = [] video_mask = [] for i in range(0, total_len): if i % stride == 0: audio_mask.append(i) elif i % stride == 1: sync_mask.append(i) else: video_mask.append(i) return audio_mask, sync_mask, video_mask
1,673
24.363636
89
py
end-to-end-asd
end-to-end-asd-main/ez_io/logging.py
import os import json class Logger(): def __init__(self, targetFile, separator=';'): self.targetFile = targetFile self.separator = separator def writeHeaders(self, headers): with open(self.targetFile, 'a') as fh: for aHeader in headers: fh.write(aHeader + self.separator) fh.write('\n') def writeDataLog(self, dataArray): with open(self.targetFile, 'a') as fh: for dataItem in dataArray: fh.write(str(dataItem) + self.separator) fh.write('\n') def setup_optim_outputs(models_out, opt_config, experiment_name, headers=None): target_logs = os.path.join(models_out, experiment_name + '/logs.csv') target_models = os.path.join(models_out, experiment_name) print('target_models', target_models) if not os.path.isdir(target_models): os.makedirs(target_models) log = Logger(target_logs, ';') if headers is None: log.writeHeaders(['epoch', 'train_loss', 'train_audio_loss', 'train_video_loss', 'train_map', 'val_loss', 'val_audio_loss', 'val_video_loss', 'val_map']) else: log.writeHeaders(headers) # Dump cfg to json dump_cfg = opt_config.copy() for key, value in dump_cfg.items(): if callable(value): try: dump_cfg[key] = value.__name__ except: dump_cfg[key] = 'CrossEntropyLoss' json_cfg = os.path.join(models_out, experiment_name+'/cfg.json') with open(json_cfg, 'w') as json_file: json.dump(dump_cfg, json_file) models_out = os.path.join(models_out, experiment_name) return log, models_out
1,723
32.803922
79
py
end-to-end-asd
end-to-end-asd-main/ez_io/io_e4d.py
1
0
0
py
end-to-end-asd
end-to-end-asd-main/ez_io/io_ava.py
import os from PIL import Image from scipy.io import wavfile import numpy as np from util.audio_processing import generate_mel_spectrogram def _pil_loader(path): with Image.open(path) as img: return img.convert('RGB') def _cached_pil_loader(path, cache): if path in cache.keys(): return cache[path] with Image.open(path) as img: rgb = img.convert('RGB') cache[path] = rgb return rgb def _fit_audio_clip(audio_clip, sample_rate, video_clip_lenght): target_audio_length = int((1.0/27.0)*sample_rate*video_clip_lenght) pad_required = int((target_audio_length-len(audio_clip))/2) if pad_required > 0: audio_clip = np.pad(audio_clip, pad_width=(pad_required, pad_required), mode='reflect') if pad_required < 0: audio_clip = audio_clip[-1*pad_required:pad_required] # TODO There is a +-1 offset here and I dont feel like cheking it return audio_clip[0:target_audio_length-1] def load_v_clip_from_metadata(clip_meta_data, frames_source): ts_sequence = [str(meta[1]) for meta in clip_meta_data] entity_id = clip_meta_data[0][0] # Video Frames selected_frames = [os.path.join(frames_source, entity_id, ts+'.jpg') for ts in ts_sequence] video_data = [_pil_loader(sf) for sf in selected_frames] return video_data def load_v_clip_from_metadata_cache(clip_meta_data, frames_source, cache, silent_fail=False): ts_sequence = [str(meta[1]) for meta in clip_meta_data] entity_id = clip_meta_data[0][0] # Video Frames selected_frames = [os.path.join( frames_source, entity_id, ts+'.jpg') for ts in ts_sequence] if silent_fail: video_data = [_cached_pil_loader_silent_fail(sf, cache) for sf in selected_frames] else: video_data = [_cached_pil_loader(sf, cache) for sf in selected_frames] return video_data def load_a_clip_from_metadata(clip_meta_data, frames_source, audio_source, audio_offset, fail_silent=False): ts_sequence = [str(meta[1]) for meta in clip_meta_data] min_ts = float(clip_meta_data[0][1]) max_ts = float(clip_meta_data[-1][1]) entity_id = clip_meta_data[0][0] # Audio File audio_file = os.path.join(audio_source, entity_id+'.wav') sample_rate, audio_data = wavfile.read(audio_file) audio_start = int((min_ts-audio_offset)*sample_rate) audio_end = int((max_ts-audio_offset)*sample_rate) audio_clip = audio_data[audio_start:audio_end] audio_clip = _fit_audio_clip(audio_clip, sample_rate, len(ts_sequence)) audio_features = generate_mel_spectrogram(audio_clip, sample_rate) return audio_features def load_a_clip_from_metadata_sinc(clip_meta_data, frames_source, audio_source, audio_offset): ts_sequence = [str(meta[1]) for meta in clip_meta_data] min_ts = float(clip_meta_data[0][1]) max_ts = float(clip_meta_data[-1][1]) entity_id = clip_meta_data[0][0] # keep this var selected_frames = [os.path.join(frames_source, entity_id, ts+'.jpg') for ts in ts_sequence] # Audio File audio_file = os.path.join(audio_source, entity_id+'.wav') sample_rate, audio_data = wavfile.read(audio_file) audio_start = int((min_ts-audio_offset)*sample_rate) audio_end = int((max_ts-audio_offset)*sample_rate) audio_clip = audio_data[audio_start:audio_end] audio_clip = _fit_audio_clip(audio_clip, sample_rate, len(selected_frames)) return audio_clip
3,524
33.558824
95
py
end-to-end-asd
end-to-end-asd-main/ez_io/io_talkies.py
import os def load_av_clip_from_metadata_talkies(speaker_data, mid_index, half_clip_lenght, video_root, audio_root): #midone = speaker_data[mid_index] idx_sequence = [i for i in range( mid_index-half_clip_lenght, mid_index+half_clip_lenght+1)] idx_sequence = [idx if idx >= 0 else 0 for idx in idx_sequence] idx_sequence = [idx if idx < len(speaker_data) else len( speaker_data)-1 for idx in idx_sequence] frame_sequence = [speaker_data[idx] for idx in idx_sequence] frame_sequence = [os.path.join(video_root, str(f)+'.jpg') for f in frame_sequence] #print(frame_sequence) min_ts = float(os.path.basename(frame_sequence[0])[:-4]) max_ts = float(os.path.basename(frame_sequence[-1])[:-4]) # Video Frames video_data = [_pil_loader(sf) for sf in frame_sequence] # Audio File audio_file = os.path.join(audio_root+'.wav') sample_rate, audio_data = wavfile.read(audio_file) audio_start = int(min_ts*sample_rate) audio_end = int(max_ts*sample_rate) audio_clip = audio_data[audio_start:audio_end] if len(audio_clip) == 0: #print('S0', speaker_data, idx_sequence, frame_sequence) audio_clip = np.zeros((int(0.3*sample_rate))) audio_clip = _fit_audio_clip(audio_clip, sample_rate, len(frame_sequence)) audio_features = generate_mel_spectrogram(audio_clip, sample_rate) return video_data, audio_features def load_a_clip_from_metadata_talkies(video_id, clip_meta_data, audio_source, audio_offset, fail_silent=False): ts_sequence = [str(meta[1]) for meta in clip_meta_data] min_ts = float(clip_meta_data[0][1]) max_ts = float(clip_meta_data[-1][1]) entity_id = clip_meta_data[0][0] # Audio File audio_file = os.path.join(audio_source, video_id+'.wav') try: sample_rate, audio_data = wavfile.read(audio_file) except: sample_rate = 16000 audio_data = np.zeros((int(2*sample_rate))) audio_start = int((min_ts-audio_offset)*sample_rate) audio_end = int((max_ts-audio_offset)*sample_rate) audio_clip = audio_data[audio_start:audio_end] # TODO FIX if len(audio_clip) == 0: print('S0', video_id, min_ts, max_ts, len(audio_data)) audio_clip = np.zeros((int(0.3*sample_rate))) #print('bbb', min_ts, max_ts, audio_start, # audio_end, len(audio_data), len(audio_clip)) audio_clip = _fit_audio_clip(audio_clip, sample_rate, len(ts_sequence)) audio_features = generate_mel_spectrogram(audio_clip, sample_rate) return audio_features def load_v_clip_from_metadata_cache_talkies(video_id, clip_meta_data, frames_source, cache, silent_fail=False): ts_sequence = [str(meta[1]) for meta in clip_meta_data] entity_id = clip_meta_data[0][0] entity_id = entity_id.replace(' ', '_') # Video Frames selected_frames = [os.path.join( frames_source, video_id, entity_id, ts+'.jpg') for ts in ts_sequence] if silent_fail: video_data = [_cached_pil_loader_silent_fail( sf, cache) for sf in selected_frames] else: video_data = [_cached_pil_loader(sf, cache) for sf in selected_frames] return video_data def load_talkies_clip_from_metadata(clip_meta_data, frames_source, audio_file): selected_frames = [os.path.join( frames_source, str(ts)+'.jpg') for ts in clip_meta_data] video_data = [_pil_loader(sf) for sf in selected_frames] # audio data sample_rate, audio_data = wavfile.read(audio_file) audio_start = int(clip_meta_data[0]*sample_rate) audio_end = int(clip_meta_data[-1]*sample_rate) audio_clip = audio_data[audio_start:audio_end+1] l_pad_size = -1 # yup -1 dont switch it r_pad_size = -1 for cmd in clip_meta_data: if cmd == clip_meta_data[0]: l_pad_size = l_pad_size + 1 if cmd == clip_meta_data[-1]: r_pad_size = r_pad_size + 1 l_pad_size = int(l_pad_size*(1/30)*sample_rate) r_pad_size = int(r_pad_size*(1/30)*sample_rate) audio_clip = np.pad(audio_clip, (l_pad_size, r_pad_size), mode='reflect') audio_features = generate_mel_spectrogram(audio_clip, sample_rate) return video_data, audio_features
4,338
36.08547
111
py
end-to-end-asd
end-to-end-asd-main/models/graph_layouts.py
import math def get_spatial_connection_pattern(ctx_size, num_graphs): cp = {} cp['src'] = [] cp['dst'] = [] # Self connections for g in range(num_graphs): graph_offset = g*(ctx_size+1) for s in range(ctx_size+1): cp['src'].append(graph_offset + s) cp['dst'].append(graph_offset + s) # Spatial AV connections for g in range(num_graphs): graph_offset = g*(ctx_size+1) for s in range(1, ctx_size+1): cp['src'].append(graph_offset + 0) cp['dst'].append(graph_offset + s) cp['src'].append(graph_offset + s) cp['dst'].append(graph_offset + 0) # Spatial VV connections for g in range(num_graphs): graph_offset = g*(ctx_size+1) for s in range(1, ctx_size+1): for d in range(1, ctx_size+1): if d != s: cp['src'].append(graph_offset + s) cp['dst'].append(graph_offset + d) return cp def get_temporal_connection_pattern(ctx_size, num_graphs): cp = {} cp['src'] = [] cp['dst'] = [] # Self connections for g in range(num_graphs): graph_offset = g*(ctx_size+1) for s in range(ctx_size+1): cp['src'].append(graph_offset + s) cp['dst'].append(graph_offset + s) # Temporal VV connections for g in range(num_graphs): graph_offset = g*(ctx_size+1) for s in range(0, ctx_size+1): if g > 0: left_graph_offset = (g-1)*(ctx_size+1) cp['src'].append(graph_offset + s) cp['dst'].append(left_graph_offset + s) if g < num_graphs - 1: right_graph_offset = (g+1)*(ctx_size+1) cp['src'].append(graph_offset + s) cp['dst'].append(right_graph_offset + s) return cp def generate_av_mask(ctx_size, total_len): stride = ctx_size + 1 audio_mask = [] video_mask = [] for i in range(0, total_len): if i % stride == 0: audio_mask.append(i) else: video_mask.append(i) return audio_mask, video_mask def generate_temporal_video_mask(ctx_size, total_len): stride = ctx_size + 1 video_mask = [i for i in range(1, total_len, stride)] return video_mask def generate_temporal_video_center_mask(ctx_size, total_len, time_len): stride = ctx_size + 1 video_mask = [i + stride*math.floor(time_len/2) for i in range(1, total_len, stride*time_len)] return video_mask
2,575
27
71
py
end-to-end-asd
end-to-end-asd-main/models/shared_3d.py
import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F def get_inplanes(): return [64, 128, 256, 512] def conv3x3x3(in_planes, out_planes, stride=1): return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) def conv1x1x1(in_planes, out_planes, stride=1): return nn.Conv3d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock3D(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1, downsample=None): super().__init__() self.conv1 = conv3x3x3(in_planes, planes, stride) self.bn1 = nn.BatchNorm3d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3x3(planes, planes) self.bn2 = nn.BatchNorm3d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck3D(nn.Module): expansion = 4 def __init__(self, in_planes, planes, stride=1, downsample=None): super().__init__() self.conv1 = conv1x1x1(in_planes, planes) self.bn1 = nn.BatchNorm3d(planes) self.conv2 = conv3x3x3(planes, planes, stride) self.bn2 = nn.BatchNorm3d(planes) self.conv3 = conv1x1x1(planes, planes * self.expansion) self.bn3 = nn.BatchNorm3d(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out
2,293
23.666667
73
py
end-to-end-asd
end-to-end-asd-main/models/shared_2d.py
import torch.nn as nn from torch import Tensor from typing import Type, Any, Callable, Union, List, Optional def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d: """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock2D(nn.Module): expansion: int = 1 def __init__(self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None, groups: int = 1, base_width: int = 64, dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None) -> None: super(BasicBlock2D, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError('BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError("Dilation > 1 not supported in BasicBlock") # Both self.conv1 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.downsample = downsample self.stride = stride def forward(self, x: Tensor) -> Tensor: identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class Bottleneck2D(nn.Module): expansion: int = 4 def __init__(self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None, groups: int = 1, base_width: int = 64, dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None) -> None: super(Bottleneck2D, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width / 64.)) * groups # Both self.conv2 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv1x1(inplanes, width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, planes * self.expansion) self.bn3 = norm_layer(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x: Tensor) -> Tensor: identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out
3,514
33.80198
111
py
end-to-end-asd
end-to-end-asd-main/models/graph_models.py
import torch import torch.nn as nn import torch.nn.parameter from functools import partial from torch_geometric.nn import EdgeConv from models.graph_layouts import generate_av_mask from models.shared_2d import BasicBlock2D, conv1x1 from models.shared_3d import BasicBlock3D, Bottleneck3D, conv1x1x1, get_inplanes try: from torch.hub import load_state_dict_from_url except ImportError: from torch.utils.model_zoo import load_url as load_state_dict_from_url model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', } class LinearPathPreact(nn.Module): def __init__(self, in_channels, hidden_channels): super(LinearPathPreact, self).__init__() # Layer 1 self.fc1 = nn.Linear(in_channels, hidden_channels, bias=False) self.bn1 = nn.BatchNorm1d(in_channels) # Layer 2 self.fc2 = nn.Linear(hidden_channels, hidden_channels, bias=False) self.bn2 = nn.BatchNorm1d(hidden_channels) # Shared self.relu = nn.ReLU(inplace=True) def forward(self, x): x1 = self.bn1(x) x1 = self.relu(x1) x1 = self.fc1(x1) x2 = self.bn2(x1) x2 = self.relu(x2) x2 = self.fc2(x2) return x2 class GraphTwoStreamResNet3D(nn.Module): def __init__(self, args_2d, args_3d): super().__init__() block_2d, layers_2d, zero_init_residual, groups_2d, width_per_group, replace_stride_with_dilation, norm_layer_2d = args_2d block_3d, layers_3d, block_inplanes_3d, n_input_channels, conv1_t_size, conv1_t_stride, no_max_pool, shortcut_type, widen_factor = args_3d # Global Args if norm_layer_2d is None: norm_layer_2d = nn.BatchNorm2d self._norm_layer_2d = norm_layer_2d if replace_stride_with_dilation is None: # each element in the tuple indicates if we should replace # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError("replace_stride_with_dilation should be None " "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) # Audio stream self.inplanes_2d = 64 self.dilation_2d = 1 self.groups_2d = groups_2d self.base_width = width_per_group self.audio_conv1 = nn.Conv2d(1, self.inplanes_2d, kernel_size=7, stride=2, padding=3, bias=False) self.a_bn1 = norm_layer_2d(self.inplanes_2d) self.a_maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.a_layer1 = self._make_layer_2D(block_2d, 64, layers_2d[0]) self.a_layer2 = self._make_layer_2D(block_2d, 128, layers_2d[1], stride=2, dilate=replace_stride_with_dilation[0]) self.a_layer3 = self._make_layer_2D(block_2d, 256, layers_2d[2], stride=2, dilate=replace_stride_with_dilation[1]) self.a_layer4 = self._make_layer_2D(block_2d, 512, layers_2d[3], stride=2, dilate=replace_stride_with_dilation[2]) self.a_avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc_128_a = nn.Linear(512 * block_2d.expansion, 128) # Video Stream block_inplanes = [int(x * widen_factor) for x in block_inplanes_3d] self.in_planes_3d = block_inplanes[0] self.no_max_pool = no_max_pool self.v_conv1 = nn.Conv3d(n_input_channels, self.in_planes_3d, kernel_size=(conv1_t_size, 7, 7), stride=(conv1_t_stride, 2, 2), padding=(conv1_t_size // 2, 3, 3), bias=False) self.v_bn1 = nn.BatchNorm3d(self.in_planes_3d) self.v_maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(2, 2, 2), padding=(0, 1, 1)) self.v_layer1 = self._make_layer_3D(block_3d, block_inplanes[0], layers_3d[0], shortcut_type) self.v_layer2 = self._make_layer_3D(block_3d, block_inplanes[1], layers_3d[1], shortcut_type, stride=2) self.v_layer3 = self._make_layer_3D(block_3d, block_inplanes[2], layers_3d[2], shortcut_type, stride=2) self.v_layer4 = self._make_layer_3D(block_3d, block_inplanes[3], layers_3d[3], shortcut_type, stride=2) self.v_avgpool = nn.AdaptiveAvgPool3d((1, 1, 1)) self.fc_128_v = nn.Linear(512 * block_3d.expansion, 128) # Shared self.relu = nn.ReLU(inplace=True) # Dim reduction self.reduction_a = nn.Linear(512 * block_2d.expansion, 128) self.reduction_v = nn.Linear(512 * block_3d.expansion, 128) self.fc_aux_a = nn.Linear(128, 2) self.fc_aux_v = nn.Linear(128, 2) # Graph Net self.edge1 = EdgeConv(LinearPathPreact(128*2, 64)) self.edge2 = EdgeConv(LinearPathPreact(64*2, 64)) self.edge3 = EdgeConv(LinearPathPreact(64*2, 64)) self.edge4 = EdgeConv(LinearPathPreact(64*2, 64)) self.fc = nn.Linear(64, 2) for m in self.modules(): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm3d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer_2D(self, block, planes, blocks, stride=1, dilate=False): norm_layer = self._norm_layer_2d downsample = None previous_dilation = self.dilation_2d if dilate: self.dilation_2d *= stride stride = 1 if stride != 1 or self.inplanes_2d != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes_2d, planes * block.expansion, stride), norm_layer(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes_2d, planes, stride, downsample, self.groups_2d, self.base_width, previous_dilation, norm_layer)) self.inplanes_2d = planes * block.expansion for _ in range(1, blocks): layers.append(block(self.inplanes_2d, planes, groups=self.groups_2d, base_width=self.base_width, dilation=self.dilation_2d, norm_layer=norm_layer)) return nn.Sequential(*layers) def _downsample_basic_block(self, x, planes, stride): out = F.avg_pool3d(x, kernel_size=1, stride=stride) zero_pads = torch.zeros(out.size(0), planes - out.size(1), out.size(2), out.size(3), out.size(4)) if isinstance(out.data, torch.cuda.FloatTensor): zero_pads = zero_pads.cuda() out = torch.cat([out.data, zero_pads], dim=1) return out def _make_layer_3D(self, block, planes, blocks, shortcut_type, stride=1): downsample = None if stride != 1 or self.in_planes_3d != planes * block.expansion: if shortcut_type == 'A': downsample = partial(self._downsample_basic_block, planes=planes * block.expansion, stride=stride) else: downsample = nn.Sequential( conv1x1x1(self.in_planes_3d, planes * block.expansion, stride), nn.BatchNorm3d(planes * block.expansion)) layers = [] layers.append(block(in_planes=self.in_planes_3d, planes=planes, stride=stride, downsample=downsample)) self.in_planes_3d = planes * block.expansion for i in range(1, blocks): layers.append(block(self.in_planes_3d, planes)) return nn.Sequential(*layers) def forward_audio(self, a, audio_size): a = torch.unsqueeze(a[:, 0, 0, :audio_size[1], :audio_size[2]], dim=1) a = self.audio_conv1(a) a = self.a_bn1(a) a = self.relu(a) a = self.a_maxpool(a) a = self.a_layer1(a) a = self.a_layer2(a) a = self.a_layer3(a) a = self.a_layer4(a) a = self.a_avgpool(a) a = a.reshape(a.size(0), -1) return a def forward_video(self, v): v = self.v_conv1(v) v = self.v_bn1(v) v = self.relu(v) if not self.no_max_pool: v = self.v_maxpool(v) v = self.v_layer1(v) v = self.v_layer2(v) v = self.v_layer3(v) v = self.v_layer4(v) v = self.v_avgpool(v) v = v.reshape(v.size(0), -1) return v def forward(self, data, ctx_size, audio_size): x, edge_index, _ = data.x, data.edge_index, data.batch # indexing masks audio_mask, video_mask = generate_av_mask(ctx_size, x.size(0)) # Initial Conv. forward audio_feats = self.forward_audio(x[audio_mask], audio_size) video_feats = self.forward_video(x[video_mask]) # Dim Reduction audio_feats = self.relu(self.reduction_a(audio_feats)) video_feats = self.relu(self.reduction_v(video_feats)) # Rebuild interleaved tensor graph_feats = torch.zeros( (x.size(0), 128), device=audio_feats.get_device(), dtype=audio_feats.dtype) graph_feats[audio_mask] = audio_feats graph_feats[video_mask] = video_feats # Aux supervision audio_out = self.fc_aux_a(graph_feats[audio_mask]) video_out = self.fc_aux_v(graph_feats[video_mask]) # Graph Stream graph_feats = self.edge1(graph_feats, edge_index) graph_feats = self.edge2(graph_feats, edge_index) graph_feats = self.edge3(graph_feats, edge_index) graph_feats = self.edge4(graph_feats, edge_index) return self.fc(graph_feats), audio_out, video_out class GraphTwoStreamResNet3DTwoGraphs4LVLRes(GraphTwoStreamResNet3D): def __init__(self, args_2d, args_3d, filter_size): super().__init__(args_2d, args_3d) self.edge_spatial_1 = EdgeConv(LinearPathPreact(128*2, filter_size)) self.edge_spatial_2 = EdgeConv(LinearPathPreact(filter_size*2, filter_size)) self.edge_spatial_3 = EdgeConv(LinearPathPreact(filter_size*2, filter_size)) self.edge_spatial_4 = EdgeConv(LinearPathPreact(filter_size*2, filter_size)) self.edge_temporal_1 = EdgeConv(LinearPathPreact(filter_size*2, filter_size)) self.edge_temporal_2 = EdgeConv(LinearPathPreact(filter_size*2, filter_size)) self.edge_temporal_3 = EdgeConv(LinearPathPreact(filter_size*2, filter_size)) self.edge_temporal_4 = EdgeConv(LinearPathPreact(filter_size*2, filter_size)) self.fc = nn.Linear(filter_size, 2) # IS this necessary? self.edge1 = None self.edge2 = None self.edge3 = None self.edge4 = None def forward(self, data, ctx_size, audio_size): x, joint_edge_index, _ = data.x, data.edge_index, data.batch spatial_edge_index = joint_edge_index[0] temporal_edge_index = joint_edge_index[1] # indexing masks audio_mask, video_mask = generate_av_mask(ctx_size, x.size(0)) # Initial Conv. forward audio_feats = self.forward_audio(x[audio_mask], audio_size) video_feats = self.forward_video(x[video_mask]) # Dim Reduction audio_feats = self.relu(self.reduction_a(audio_feats)) video_feats = self.relu(self.reduction_v(video_feats)) # Rebuild interleaved tensor graph_feats = torch.zeros((x.size(0), 128), device=audio_feats.get_device(), dtype=audio_feats.dtype) graph_feats[audio_mask] = audio_feats graph_feats[video_mask] = video_feats # Aux supervision audio_out = self.fc_aux_a(graph_feats[audio_mask]) video_out = self.fc_aux_v(graph_feats[video_mask]) # Spatial Stream graph_feats_1s = self.edge_spatial_1(graph_feats, spatial_edge_index) graph_feats_1st = self.edge_temporal_1(graph_feats_1s, temporal_edge_index) graph_feats_2s = self.edge_spatial_2(graph_feats_1st, spatial_edge_index) graph_feats_2st = self.edge_temporal_2(graph_feats_2s, temporal_edge_index) graph_feats_2st = graph_feats_2st + graph_feats_1st graph_feats_3s = self.edge_spatial_3(graph_feats_2st, spatial_edge_index) graph_feats_3st = self.edge_temporal_3(graph_feats_3s, temporal_edge_index) graph_feats_3st = graph_feats_3st + graph_feats_2st graph_feats_4s = self.edge_spatial_4(graph_feats_3st, spatial_edge_index) graph_feats_4st = self.edge_temporal_4(graph_feats_4s, temporal_edge_index) graph_feats_4st = graph_feats_4st + graph_feats_3st return self.fc(graph_feats_4st), audio_out, video_out def _load_video_weights_into_model(model, ws_file): resnet_state_dict = torch.load(ws_file)['state_dict'] own_state = model.state_dict() for name, param in resnet_state_dict.items(): if 'v_'+name in own_state: own_state['v_'+name].copy_(param) else: print('No video assignation for ', name) print('loaded video ws') return def _load_audio_weights_into_model(model, arch2d, progress): resnet_state_dict = load_state_dict_from_url( model_urls[arch2d], progress=progress) own_state = model.state_dict() for name, param in resnet_state_dict.items(): if 'a_'+name in own_state: own_state['a_'+name].copy_(param) else: print('No audio assignation for ', name) # Audio initial Ws conv1_weights = resnet_state_dict['conv1.weight'] avgWs = torch.mean(conv1_weights, dim=1, keepdim=True) own_state['audio_conv1.weight'].copy_(avgWs) print('loaded audio ws') return def R3D18_4lvlGCN(pretrained_weigths, filter_size=128): args_2d = BasicBlock2D, [2, 2, 2, 2], False, 1, 64, None, None args_3d = BasicBlock3D, [2, 2, 2, 2], get_inplanes(), 3, 7, 1, False, 'B', 1.0 model = GraphTwoStreamResNet3DTwoGraphs4LVLRes(args_2d, args_3d, filter_size) _load_audio_weights_into_model(model, 'resnet18', True) _load_video_weights_into_model(model, pretrained_weigths) return model def R3D50_4lvlGCN(pretrained_weigths, filter_size=128): args_2d = BasicBlock2D, [2, 2, 2, 2], False, 1, 64, None, None args_3d = Bottleneck3D, [3, 4, 6, 3], get_inplanes(), 3, 7, 1, False, 'B', 1.0 model = GraphTwoStreamResNet3DTwoGraphs4LVLRes(args_2d, args_3d, filter_size) _load_audio_weights_into_model(model, 'resnet18', True) _load_video_weights_into_model(model, pretrained_weigths) return model
15,486
39.225974
146
py
end-to-end-asd
end-to-end-asd-main/util/custom_transforms.py
from torchvision import transforms video_train = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.3729, 0.2850, 0.2439), (0.2286, 0.2008, 0.1911)) ]) video_val = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.3729, 0.2850, 0.2439), (0.2286, 0.2008, 0.1911)) ])
318
28
76
py
end-to-end-asd
end-to-end-asd-main/util/clip_utils.py
def generate_clip_meta(entity_meta_data, midone, half_clip_size): max_span_left = _get_clip_max_span(entity_meta_data, midone, -1, half_clip_size+1) max_span_right = _get_clip_max_span(entity_meta_data, midone, 1, half_clip_size+1) clip_data = entity_meta_data[midone-max_span_left:midone+max_span_right+1] clip_data = _extend_clip_data(clip_data, max_span_left, max_span_right, half_clip_size) return clip_data def _get_clip_max_span(csv_data, midone, direction, max): idx = 0 for idx in range(0, max): if midone+(idx*direction) < 0: return idx-1 if midone+(idx*direction) >= len(csv_data): return idx-1 return idx def _extend_clip_data(clip_data, max_span_left, max_span_right, half_clip_size): if max_span_left < half_clip_size: for i in range(half_clip_size-max_span_left): clip_data.insert(0, clip_data[0]) if max_span_right < half_clip_size: for i in range(half_clip_size-max_span_right): clip_data.insert(-1, clip_data[-1]) return clip_data
1,193
34.117647
80
py
end-to-end-asd
end-to-end-asd-main/util/augmentations.py
import random from PIL import Image from torchvision.transforms import RandomCrop from torchvision.transforms.functional import hflip def video_temporal_crop(video_data, crop_ratio): # random flip if bool(random.getrandbits(1)): video_data = [s.transpose(Image.FLIP_LEFT_RIGHT) for s in video_data] # random crop mid = int(len(video_data) / 2) width, height = video_data[mid].size f = random.uniform(crop_ratio, 1) i, j, h, w = RandomCrop.get_params(video_data[mid], output_size=(int(height*f), int(width*f))) video_data = [s.crop(box=(j, i, j+w, i+h)) for s in video_data] return video_data def video_flip(video_data, crop_ratio): # random flip if bool(random.getrandbits(1)): video_data = [hflip(vd) for vd in video_data] return video_data
814
27.103448
98
py
end-to-end-asd
end-to-end-asd-main/util/audio_processing.py
import numpy as np import python_speech_features def generate_mel_spectrogram(audio_clip, sample_rate): mfcc = zip(*python_speech_features.mfcc(audio_clip, sample_rate)) audio_features = np.stack([np.array(i) for i in mfcc]) audio_features = np.expand_dims(audio_features, axis=0) return audio_features
321
31.2
69
py
end-to-end-asd
end-to-end-asd-main/util/command_line.py
import argparse def get_default_arg_parser(): parser = argparse.ArgumentParser() parser.add_argument('--lr', default='5e-4') parser.add_argument('--frmc', default='13') parser.add_argument('--ctx', default='2') parser.add_argument('--nclp', default='7') parser.add_argument('--strd', default='3') parser.add_argument('--size', default='160') return parser def unpack_command_line_args(args): lr_arg = float(args.lr) frames_per_clip = float(args.frmc) ctx_size = int(args.ctx) n_clips = int(args.nclp) strd = int(args.strd) img_size = int(args.size) return lr_arg, frames_per_clip, ctx_size, n_clips, strd, img_size
681
26.28
69
py
end-to-end-asd
end-to-end-asd-main/datasets/graph_datasets.py
import os import math import torch import random import numpy as np import ez_io.io_ava as io import util.clip_utils as cu from torch_geometric.data import Data, Dataset from ez_io.file_util import csv_to_list, postprocess_speech_label, postprocess_entity_label from util.augmentations import video_temporal_crop, video_corner_crop class GraphContextualDataset(Dataset): def __init__(self): # In memory data self.entity_data = {} self.speech_data = {} self.ts_to_entity = {} self.entity_list = [] def get_speaker_context(self, video_id, target_entity_id, center_ts, ctx_len): # Get contex and exclude self context_entities = list(self.ts_to_entity[video_id][center_ts]) random.shuffle(context_entities) context_entities.remove(target_entity_id) # but remeber you must include self if not context_entities: context_entities.insert(0, target_entity_id) while len(context_entities) < ctx_len: # self is context context_entities.append(random.choice(context_entities)) elif len(context_entities) < ctx_len: context_entities.insert(0, target_entity_id) # make sure is at 0 while len(context_entities) < ctx_len: context_entities.append(random.choice(context_entities[1:])) else: context_entities.insert(0, target_entity_id) # make sure is at 0 context_entities = context_entities[:ctx_len] return context_entities def search_ts_in_meta_data(self, entity_metadata, ts): for idx, em in enumerate(entity_metadata): if em[1] == ts: return idx raise Exception('Bad Context') def _cache_entity_data(self, csv_file_path): entity_set = set() csv_data = csv_to_list(csv_file_path) csv_data.pop(0) # CSV header for csv_row in csv_data: video_id = csv_row[0] entity_id = csv_row[-3] timestamp = csv_row[1] speech_label = postprocess_speech_label(csv_row[-2]) entity_label = postprocess_entity_label(csv_row[-2]) minimal_entity_data = (entity_id, timestamp, entity_label) # Store minimal entity data if video_id not in self.entity_data.keys(): self.entity_data[video_id] = {} if entity_id not in self.entity_data[video_id].keys(): self.entity_data[video_id][entity_id] = [] entity_set.add((video_id, entity_id)) self.entity_data[video_id][entity_id].append(minimal_entity_data) # Store speech meta-data if video_id not in self.speech_data.keys(): self.speech_data[video_id] = {} if timestamp not in self.speech_data[video_id].keys(): self.speech_data[video_id][timestamp] = speech_label # Max operation yields if someone is speaking. new_speech_label = max( self.speech_data[video_id][timestamp], speech_label) self.speech_data[video_id][timestamp] = new_speech_label return entity_set def _entity_list_postprocessing(self, entity_set): print('Initial', len(entity_set)) # filter out missing data on disk print('video_root',self.video_root) all_disk_data = set(os.listdir(self.video_root)) for video_id, entity_id in entity_set.copy(): if entity_id not in all_disk_data: entity_set.remove((video_id, entity_id)) print('Pruned not in disk', len(entity_set)) for video_id, entity_id in entity_set.copy(): dir = os.path.join(self.video_root, entity_id) if len(os.listdir(dir)) != len(self.entity_data[video_id][entity_id]): entity_set.remove((video_id, entity_id)) print('Pruned not complete', len(entity_set)) self.entity_list = sorted(list(entity_set)) # Allocate Simultanous Entities for video_id, entity_id in entity_set: if video_id not in self.ts_to_entity.keys(): self.ts_to_entity[video_id] = {} ent_min_data = self.entity_data[video_id][entity_id] for ed in ent_min_data: timestamp = ed[1] if timestamp not in self.ts_to_entity[video_id].keys(): self.ts_to_entity[video_id][timestamp] = [] self.ts_to_entity[video_id][timestamp].append(entity_id) class GraphDatasetETE(GraphContextualDataset): def __init__(self, audio_root, video_root, csv_file_path, context_size, clip_lenght, connection_pattern, video_transform=None, do_video_augment=False, crop_ratio=0.8, norm_audio=False): super().__init__() # Data directories self.audio_root = audio_root self.video_root = video_root # Post-processing self.crop_ratio = crop_ratio self.video_transform = video_transform self.do_video_augment = do_video_augment # Graph Layout self.context_size = context_size # Node config self.norm_audio = norm_audio self.half_clip_length = math.floor(clip_lenght/2) # Cache data entity_set = self._cache_entity_data(csv_file_path) self._entity_list_postprocessing(entity_set) # Edge Config src_edges = connection_pattern['src'] dst_edges = connection_pattern['dst'] self.batch_edges = torch.tensor([src_edges, dst_edges], dtype=torch.long) # Replicate entity list self.entity_list.extend(self.entity_list) self.avg_time = [] def __len__(self): return int(len(self.entity_list)/1) def get_audio_size(self,): video_id, entity_id = self.entity_list[0] entity_metadata = self.entity_data[video_id][entity_id] audio_offset = float(entity_metadata[0][1]) mid_index = random.randint(0, len(entity_metadata)-1) clip_meta_data = cu.generate_clip_meta(entity_metadata, mid_index, self.half_clip_length) audio_data = io.load_a_clip_from_metadata(clip_meta_data, self.video_root, self.audio_root, audio_offset) return np.float32(audio_data).shape def _get_scene_video_data(self, video_id, entity_id, mid_index): orginal_entity_metadata = self.entity_data[video_id][entity_id] time_ent = orginal_entity_metadata[mid_index][1] context = self.get_speaker_context( video_id, entity_id, time_ent, self.context_size) video_data = [] targets = [] for ctx_entity in context: entity_metadata = self.entity_data[video_id][ctx_entity] ts_idx = self.search_ts_in_meta_data(entity_metadata, time_ent) target_ctx = int(entity_metadata[ts_idx][-1]) clip_meta_data = cu.generate_clip_meta(entity_metadata, ts_idx, self.half_clip_length) video_data.append(io.load_v_clip_from_metadata(clip_meta_data, self.video_root)) targets.append(target_ctx) if self.do_video_augment: video_data = [video_temporal_crop(vd, self.crop_ratio) for vd in video_data] if self.video_transform is not None: for vd_idx, vd in enumerate(video_data): tensor_vd = [self.video_transform(f) for f in vd] video_data[vd_idx] = tensor_vd video_data = [torch.cat(vd, dim=0) for vd in video_data] return video_data, targets def _get_audio_data(self, video_id, entity_id, mid_index): entity_metadata = self.entity_data[video_id][entity_id] audio_offset = float(entity_metadata[0][1]) midone = entity_metadata[mid_index] target_audio = self.speech_data[video_id][midone[1]] clip_meta_data = cu.generate_clip_meta(entity_metadata, mid_index, self.half_clip_length) audio_data = io.load_a_clip_from_metadata(clip_meta_data, self.video_root, self.audio_root, audio_offset) return np.float32(audio_data), target_audio def __getitem__(self, index): video_id, entity_id = self.entity_list[index] target_entity_metadata = self.entity_data[video_id][entity_id] target_index = random.randint(0, len(target_entity_metadata)-1) # get av data video_data, target_v = self._get_scene_video_data(video_id, entity_id, target_index) audio_data, target_a = self._get_audio_data(video_id, entity_id, target_index) if self.norm_audio: audio_data = (audio_data+3.777757875102366)/186.4988690376491 # Fill targets target_set = [] target_set.append(target_a) for tv in target_v: target_set.append(tv) # Feature data feature_set = torch.zeros((len(video_data)+1, video_data[0].size(0), video_data[0].size(1), video_data[0].size(2))) audio_data = torch.from_numpy(audio_data) feature_set[0, 0, :audio_data.size( 1), :audio_data.size(2)] = audio_data for i in range(self.context_size): feature_set[i+1, ...] = video_data[i] return Data(x=feature_set, edge_index=self.batch_edges, y=torch.tensor(target_set)) class IndependentGraphDatasetETE3D(GraphDatasetETE): def __init__(self, audio_root, video_root, csv_file_path, graph_time_steps, stride, context_size, clip_lenght, spatial_connection_pattern, temporal_connection_pattern, video_transform=None, do_video_augment=False, crop_ratio=0.95, norm_audio=False): super().__init__(audio_root, video_root, csv_file_path, context_size, clip_lenght, spatial_connection_pattern, video_transform, do_video_augment, crop_ratio, norm_audio) # Superclass Edge Config self.batch_edges = None spatial_src_edges = spatial_connection_pattern['src'] spatial_dst_edges = spatial_connection_pattern['dst'] self.spatial_batch_edges = torch.tensor( [spatial_src_edges, spatial_dst_edges], dtype=torch.long) temporal_src_edges = temporal_connection_pattern['src'] temporal_dst_edges = temporal_connection_pattern['dst'] self.temporal_batch_edges = torch.tensor( [temporal_src_edges, temporal_dst_edges], dtype=torch.long) # Temporal Graph graph Layout self.graph_time_steps = graph_time_steps self.stride = stride def _get_scene_video_data(self, video_id, entity_id, mid_index, cache): orginal_entity_metadata = self.entity_data[video_id][entity_id] time_ent = orginal_entity_metadata[mid_index][1] context = self.get_speaker_context(video_id, entity_id, time_ent, self.context_size) video_data = [] targets = [] for ctx_entity in context: entity_metadata = self.entity_data[video_id][ctx_entity] ts_idx = self.search_ts_in_meta_data(entity_metadata, time_ent) target_ctx = int(entity_metadata[ts_idx][-1]) clip_meta_data = cu.generate_clip_meta(entity_metadata, ts_idx, self.half_clip_length) video_data.append(io.load_v_clip_from_metadata_cache(clip_meta_data, self.video_root, cache)) targets.append(target_ctx) if self.video_transform is not None: for vd_idx, vd in enumerate(video_data): tensor_vd = [self.video_transform(f) for f in vd] video_data[vd_idx] = tensor_vd if self.do_video_augment: video_data = [video_corner_crop( vd, self.crop_ratio) for vd in video_data] video_data = [torch.stack(vd, dim=1) for vd in video_data] return video_data, targets def _get_time_context(self, entity_data, target_index): all_ts = [ed[1] for ed in entity_data] center_ts = entity_data[target_index][1] center_ts_idx = all_ts.index(str(center_ts)) half_time_steps = int(self.graph_time_steps/2) start = center_ts_idx-(half_time_steps*self.stride) end = center_ts_idx+((half_time_steps+1)*self.stride) selected_ts_idx = list(range(start, end, self.stride)) selected_ts = [] for i, idx in enumerate(selected_ts_idx): if idx < 0: idx = 0 if idx >= len(all_ts): idx = len(all_ts)-1 selected_ts.append(all_ts[idx]) return selected_ts def __getitem__(self, index): video_id, entity_id = self.entity_list[index] target_entity_metadata = self.entity_data[video_id][entity_id] center_index = random.randint(0, len(target_entity_metadata)-1) time_context = self._get_time_context( target_entity_metadata, center_index) feature_set = None target_set = [] all_ts = [ted[1] for ted in target_entity_metadata] nodes_per_graph = self.context_size+1 cache = {} for graph_idx, tc in enumerate(time_context): target_index = all_ts.index(tc) # get av data video_data, target_v = self._get_scene_video_data(video_id, entity_id, target_index, cache) audio_data, target_a = self._get_audio_data(video_id, entity_id, target_index) # Fill targets target_set.append(target_a) for tv in target_v: target_set.append(tv) # Create now that we have the size if feature_set is None: feature_set = torch.zeros(nodes_per_graph * (self.graph_time_steps), video_data[0].size(0), video_data[0].size(1), video_data[0].size(2), video_data[0].size(3)) # Fill in graph_offset = graph_idx*nodes_per_graph audio_data = torch.from_numpy(audio_data) feature_set[graph_offset, 0, 0, :audio_data.size(1), :audio_data.size(2)] = audio_data for i in range(self.context_size): feature_set[graph_offset + (i+1), ...] = video_data[i] return Data(x=feature_set, edge_index=(self.spatial_batch_edges, self.temporal_batch_edges), y=torch.tensor(target_set))
14,316
40.259366
176
py
end-to-end-asd
end-to-end-asd-main/optimization/losses.py
import torch import torch.nn as nn class assignation_loss_audio(torch.nn.Module): def __init__(self, graph_size): super(assignation_loss_audio, self).__init__() self.graph_size = graph_size self.softmax_layer = torch.nn.Softmax(dim=1) def forward(self, outputs, audio_targets): pred = self.softmax_layer(outputs)[:, 1] # Positive predictions pred = pred.view((-1, self.graph_size)) pred = pred[:, 1:] max_pred, _ = torch.max(pred, dim=1) audio_gt = audio_targets no_assig_penalty = audio_gt*(audio_gt - max_pred) bad_assig_penalty = (1-audio_gt)*max_pred total_penalty = no_assig_penalty + bad_assig_penalty return torch.mean(total_penalty)
749
33.090909
71
py
end-to-end-asd
end-to-end-asd-main/optimization/optimization_amp.py
import os import torch from torch.cuda.amp import autocast from models.graph_layouts import generate_av_mask from sklearn.metrics import average_precision_score from models.graph_layouts import generate_temporal_video_center_mask, generate_temporal_video_mask def optimize_easee(model, dataloader_train, data_loader_val, device, criterion, optimizer, scheduler, num_epochs, spatial_ctx_size, time_len, a_weight=0.2, v_weight=0.5, models_out=None, log=None): for epoch in range(num_epochs): print() print('Epoch {}/{}'.format(epoch+1, num_epochs)) print('-' * 10) outs_train = _train_model_amp_avl(model, dataloader_train, optimizer, criterion, device, spatial_ctx_size, time_len, a_weight, v_weight) outs_val = _test_model_graph_losses(model, data_loader_val, criterion, device, spatial_ctx_size, time_len) scheduler.step() train_loss, ta_loss, tv_loss, train_ap = outs_train val_loss, va_loss, vv_loss, val_ap, val_tap, val_cap = outs_val if models_out is not None and epoch > num_epochs-10: # just save last 10 epochs model_target = os.path.join(models_out, str(epoch+1)+'.pth') print('save model to ', model_target) torch.save(model.state_dict(), model_target) if log is not None: log.writeDataLog([epoch+1, train_loss, ta_loss, tv_loss, train_ap, val_loss, va_loss, vv_loss, val_ap, val_tap, val_cap]) return model def _train_model_amp_avl(model, dataloader, optimizer, criterion, device, ctx_size, time_len, a_weight, v_weight): model.train() softmax_layer = torch.nn.Softmax(dim=1) pred_lst = [] label_lst = [] pred_time_lst = [] label_time_lst = [] pred_center_lst = [] label_center_lst = [] running_loss_g = 0.0 running_loss_a = 0.0 running_loss_v = 0.0 audio_size = dataloader.dataset.get_audio_size() scaler = torch.cuda.amp.GradScaler(enabled=True) # Iterate over data for idx, dl in enumerate(dataloader): print('\t Train iter {:d}/{:d} {:.4f}'.format(idx, len(dataloader), running_loss_g/(idx+1)), end='\r') graph_data = dl graph_data = graph_data.to(device) targets = graph_data.y optimizer.zero_grad() with torch.set_grad_enabled(True): # TODO inneficient here audio_mask, video_mask = generate_av_mask(ctx_size, graph_data.x.size(0)) temporal_video_mask = generate_temporal_video_mask(ctx_size, graph_data.x.size(0)) center_mask = generate_temporal_video_center_mask(ctx_size, graph_data.x.size(0), time_len) with autocast(True): outputs, audio_out, video_out = model(graph_data, ctx_size, audio_size) aux_loss_a = criterion(audio_out, targets[audio_mask]) aux_loss_v = criterion(video_out, targets[video_mask]) loss_graph = criterion(outputs, targets) loss = a_weight*aux_loss_a + v_weight*aux_loss_v + loss_graph scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() with torch.set_grad_enabled(False): label_lst.extend(targets[video_mask].cpu().numpy().tolist()) pred_lst.extend(softmax_layer(outputs[video_mask]).cpu().numpy()[:, 1].tolist()) label_time_lst.extend(targets[temporal_video_mask].cpu().numpy().tolist()) pred_time_lst.extend(softmax_layer(outputs[temporal_video_mask]).cpu().numpy()[:, 1].tolist()) label_center_lst.extend(targets[center_mask].cpu().numpy().tolist()) pred_center_lst.extend(softmax_layer(outputs[center_mask]).cpu().numpy()[:, 1].tolist()) # statistics running_loss_g += loss_graph.item() running_loss_a += aux_loss_a.item() running_loss_v += aux_loss_v.item() if idx == len(dataloader)-2: break epoch_loss_g = running_loss_g / len(dataloader) epoch_loss_a = running_loss_a / len(dataloader) epoch_loss_v = running_loss_v / len(dataloader) epoch_ap = average_precision_score(label_lst, pred_lst) epoch_time_ap = average_precision_score(label_time_lst, pred_time_lst) epoch_center_ap = average_precision_score(label_center_lst, pred_center_lst) print('Train Graph Loss: {:.4f}, Audio Loss: {:.4f}, Video Loss: {:.4f}, VmAP: {:.4f}, TVmAP: {:.4f}, CVmAP: {:.4f}'.format( epoch_loss_g, epoch_loss_a, epoch_loss_v, epoch_ap, epoch_time_ap, epoch_center_ap)) return epoch_loss_g, epoch_loss_a, epoch_loss_v, epoch_ap def _test_model_graph_losses(model, dataloader, criterion, device, ctx_size, time_len): model.eval() softmax_layer = torch.nn.Softmax(dim=1) pred_lst = [] label_lst = [] pred_time_lst = [] label_time_lst = [] pred_center_lst = [] label_center_lst = [] running_loss_g = 0.0 running_loss_a = 0.0 running_loss_v = 0.0 audio_size = dataloader.dataset.get_audio_size() # Iterate over data for idx, dl in enumerate(dataloader): print('\t Val iter {:d}/{:d} {:.4f}'.format(idx, len(dataloader), running_loss_g/(idx+1)), end='\r') graph_data = dl graph_data = graph_data.to(device) targets = graph_data.y with torch.set_grad_enabled(False): # TODO inneficient here audio_mask, video_mask = generate_av_mask( ctx_size, graph_data.x.size(0)) temporal_video_mask = generate_temporal_video_mask( ctx_size, graph_data.x.size(0)) center_mask = generate_temporal_video_center_mask( ctx_size, graph_data.x.size(0), time_len) outputs, audio_out, video_out = model( graph_data, ctx_size, audio_size) loss_graph = criterion(outputs, targets) aux_loss_a = criterion(audio_out, targets[audio_mask]) aux_loss_v = criterion(video_out, targets[video_mask]) label_lst.extend(targets[video_mask].cpu().numpy().tolist()) pred_lst.extend(softmax_layer( outputs[video_mask]).cpu().numpy()[:, 1].tolist()) label_time_lst.extend( targets[temporal_video_mask].cpu().numpy().tolist()) pred_time_lst.extend(softmax_layer( outputs[temporal_video_mask]).cpu().numpy()[:, 1].tolist()) label_center_lst.extend( targets[center_mask].cpu().numpy().tolist()) pred_center_lst.extend(softmax_layer( outputs[center_mask]).cpu().numpy()[:, 1].tolist()) # statistics running_loss_g += loss_graph.item() running_loss_a += aux_loss_a.item() running_loss_v += aux_loss_v.item() if idx == len(dataloader)-2: break epoch_loss_g = running_loss_g / len(dataloader) epoch_loss_a = running_loss_a / len(dataloader) epoch_loss_v = running_loss_v / len(dataloader) epoch_ap = average_precision_score(label_lst, pred_lst) epoch_time_ap = average_precision_score(label_time_lst, pred_time_lst) epoch_center_ap = average_precision_score( label_center_lst, pred_center_lst) print('Val Graph Loss: {:.4f}, Audio Loss: {:.4f}, Video Loss: {:.4f}, VmAP: {:.4f}, TVmAP: {:.4f}, CVmAP: {:.4f}'.format( epoch_loss_g, epoch_loss_a, epoch_loss_v, epoch_ap, epoch_time_ap, epoch_center_ap)) return epoch_loss_g, epoch_loss_a, epoch_loss_v, epoch_ap, epoch_time_ap, epoch_center_ap
7,919
39.615385
128
py
JEMPP
JEMPP-master/eval_jempp.py
# coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import utils import torch as t, torch.nn as nn from torch.utils.data import DataLoader, Dataset import torchvision as tv, torchvision.transforms as tr import sys import argparse import numpy as np from ExpUtils import * from models.jem_models import F, CCF from utils import plot, Hamiltonian # Sampling from tqdm import tqdm t.backends.cudnn.benchmark = True t.backends.cudnn.enabled = True seed = 1 im_sz = 32 n_ch = 3 n_classes = 10 correct = 0 print = wlog def init_random(bs): return t.FloatTensor(bs, 3, 32, 32).uniform_(-1, 1) conditionals = [] def init_from_centers(args): global conditionals from torch.distributions.multivariate_normal import MultivariateNormal bs = args.buffer_size if args.dataset == 'svhn': size = [3, 28, 28] else: size = [3, 32, 32] if args.dataset == 'cifar_test': args.dataset = 'cifar10' centers = t.load('%s_mean.pt' % args.dataset) covs = t.load('%s_cov.pt' % args.dataset) buffer = [] for i in range(args.n_classes): mean = centers[i].to(args.device) cov = covs[i].to(args.device) dist = MultivariateNormal(mean, covariance_matrix=cov + 1e-4 * t.eye(int(np.prod(size))).to(args.device)) buffer.append(dist.sample((bs // args.n_classes, )).view([bs // args.n_classes] + size).cpu()) conditionals.append(dist) return t.clamp(t.cat(buffer), -1, 1) def init_inform(args, bs): global conditionals n_ch = 3 size = [3, 32, 32] im_sz = 32 new = t.zeros(bs, n_ch, im_sz, im_sz) for i in range(bs): index = np.random.randint(args.n_classes) dist = conditionals[index] new[i] = dist.sample().view(size) return t.clamp(new, -1, 1).cpu() def sample_p_0(device, replay_buffer, bs, y=None): if len(replay_buffer) == 0: return init_random(bs), [] buffer_size = len(replay_buffer) if y is None else len(replay_buffer) // n_classes if buffer_size > bs: inds = t.randint(0, buffer_size, (bs,)) else: inds = t.arange(0, bs) # if cond, convert inds to class conditional inds if y is not None: inds = y.cpu() * buffer_size + inds assert not args.uncond, "Can't drawn conditional samples without giving me y" buffer_samples = replay_buffer[inds] if args.init == 'i': random_samples = init_inform(args, bs) else: random_samples = init_random(bs) choose_random = (t.rand(bs) < args.reinit_freq).float()[:, None, None, None] samples = choose_random * random_samples + (1 - choose_random) * buffer_samples return samples.to(device), inds def sample_q(f, replay_buffer, y=None, n_steps=10, in_steps=10, args=None): """this func takes in replay_buffer now so we have the option to sample from scratch (i.e. replay_buffer==[]). See test_wrn_ebm.py for example. """ # f.eval() # get batch size bs = args.batch_size if y is None else y.size(0) # generate initial samples and buffer inds of those samples (if buffer is used) init_sample, buffer_inds = sample_p_0(args.device, replay_buffer, bs=bs, y=y) x_k = t.autograd.Variable(init_sample, requires_grad=True).to(args.device) # sgld if args.in_steps > 0: Hamiltonian_func = Hamiltonian(f.f.layer_one) eps = 1 for it in range(n_steps): energies = f(x_k, y=y) e_x = energies.sum() # wgrad = f.f.conv1.weight.grad eta = t.autograd.grad(e_x, [x_k], retain_graph=True)[0] # e_x.backward(retain_graph=True) # eta = x_k.grad.detach() # f.f.conv1.weight.grad = wgrad if in_steps > 0: p = 1.0 * f.f.layer_one_out.grad p = p.detach() tmp_inp = x_k.data tmp_inp.requires_grad_() if args.sgld_lr > 0: # if in_steps == 0: use SGLD other than PYLD # if in_steps != 0: combine outter and inner gradients # default 0 if eps > 0: eta = t.clamp(eta, -eps, eps) tmp_inp = x_k + eta * args.sgld_lr if eps > 0: tmp_inp = t.clamp(tmp_inp, -1, 1) for i in range(in_steps): H = Hamiltonian_func(tmp_inp, p) eta_grad = t.autograd.grad(H, [tmp_inp], only_inputs=True, retain_graph=True)[0] if eps > 0: eta_step = t.clamp(eta_grad, -eps, eps) else: eta_step = eta_grad * args.pyld_lr tmp_inp.data = tmp_inp.data + eta_step if eps > 0: tmp_inp = t.clamp(tmp_inp, -1, 1) x_k.data = tmp_inp.data if args.sgld_std > 0.0: x_k.data += args.sgld_std * t.randn_like(x_k) f.train() final_samples = x_k.detach() # update replay buffer if len(replay_buffer) > 0: replay_buffer[buffer_inds] = final_samples.cpu() return final_samples def uncond_samples(f, args, device, save=True): if args.init == 'i': init_from_centers(args) replay_buffer = init_from_centers(args) else: replay_buffer = t.FloatTensor(args.buffer_size, 3, 32, 32).uniform_(-1, 1) for i in range(args.n_sample_steps): samples = sample_q(f, replay_buffer, y=None, n_steps=args.n_steps, in_steps=args.in_steps, args=args) if i % args.print_every == 0 and save: plot('{}/samples_{}.png'.format(args.save_dir, i), samples) print(i) return replay_buffer def cond_samples(f, replay_buffer, args, device, fresh=False): if fresh: replay_buffer = uncond_samples(f, args, device, save=True) n_it = replay_buffer.size(0) // 100 all_y = [] for i in range(n_it): x = replay_buffer[i * 100: (i + 1) * 100].to(device) y = f.classify(x).max(1)[1] all_y.append(y) all_y = t.cat(all_y, 0) each_class = [replay_buffer[all_y == l] for l in range(10)] print([len(c) for c in each_class]) for i in range(100): this_im = [] for l in range(10): this_l = each_class[l][i * 10: (i + 1) * 10] this_im.append(this_l) this_im = t.cat(this_im, 0) if this_im.size(0) > 0: plot('{}/samples_{}.png'.format(args.save_dir, i), this_im) print(i) def best_samples(f, replay_buffer, args, device, fresh=False): sqrt = lambda x: int(t.sqrt(t.Tensor([x]))) plot = lambda p, x: tv.utils.save_image(t.clamp(x, -1, 1), p, normalize=True, nrow=sqrt(x.size(0))) if fresh: replay_buffer = uncond_samples(f, args, device, save=True) n_it = replay_buffer.size(0) // 100 all_y = [] all_px = [] probs = [] with t.no_grad(): for i in tqdm(range(n_it)): x = replay_buffer[i * 100: (i + 1) * 100].to(device) logits = f.classify(x) y = logits.max(1)[1] px = logits.logsumexp(1) prob = nn.Softmax(dim=1)(logits).max(1)[0] all_y.append(y) probs.append(prob) all_px.append(px) all_y = t.cat(all_y, 0) probs = t.cat(probs, 0) print(probs.min().item()) print((probs < 0).sum().item()) all_px = t.cat(all_px, 0) print("%f %f %f" % (probs.mean().item(), probs.max().item(), probs.min().item())) each_class = [replay_buffer[all_y == l] for l in range(10)] each_class_probs = [probs[all_y == l] for l in range(10)] each_class_px = [all_px[all_y == l] for l in range(10)] print([len(c) for c in each_class]) new_buffer = [] ratio = abs(args.ratio) for c in range(10): each_probs = each_class_probs[c] # select each_metric = each_class_px[c] # each_metric = each_class_probs[c] if ratio < 1: topk = int(len(each_probs) * ratio) else: topk = int(ratio) topk = min(topk, len(each_probs)) if args.ratio > 0: topks = t.topk(each_metric, topk, largest=args.ratio > 0) index_list = topks[1] else: topks = t.topk(each_metric, topk, largest=args.ratio > 0) index_list = topks[1] print('P(x) min %.3f max %.3f' % (-each_metric[index_list].max().item(), -each_metric[index_list].min().item())) print('Prob(y|x) max %.3f min %.3f' % (each_probs[index_list].max().item(), each_probs[index_list].min().item())) images = each_class[c][index_list] new_buffer.append(images) plot('{}/topk_{}.png'.format(args.save_dir, c), images) replay_buffer = t.cat(new_buffer, 0) print(replay_buffer.shape) def cond_fid(f, replay_buffer, args, device, ratio=0.1): n_it = replay_buffer.size(0) // 100 all_y = [] probs = [] with t.no_grad(): for i in tqdm(range(n_it)): x = replay_buffer[i * 100: (i + 1) * 100].to(device) logits = f.classify(x) y = logits.max(1)[1] prob = nn.Softmax(dim=1)(logits).max(1)[0] all_y.append(y) probs.append(prob) all_y = t.cat(all_y, 0) probs = t.cat(probs, 0) each_class = [replay_buffer[all_y == l] for l in range(args.n_classes)] each_class_probs = [probs[all_y == l] for l in range(args.n_classes)] print([len(c) for c in each_class]) new_buffer = [] for c in range(args.n_classes): each_probs = each_class_probs[c] if ratio < 1: topk = int(len(each_probs) * ratio) else: topk = int(ratio) topk = min(topk, len(each_probs)) topks = t.topk(each_probs, topk) index_list = topks[1] images = each_class[c][index_list] new_buffer.append(images) replay_buffer = t.cat(new_buffer, 0) print(replay_buffer.shape) from Task.eval_buffer import eval_fid fid = eval_fid(f, replay_buffer, args) return fid def logp_hist(f, args, device): import matplotlib.pyplot as plt import seaborn as sns sns.set() plt.switch_backend('agg') def sample(x, n_steps=args.n_steps): x_k = t.autograd.Variable(x.clone(), requires_grad=True) # sgld for k in range(n_steps): f_prime = t.autograd.grad(f(x_k).sum(), [x_k], retain_graph=True)[0] x_k.data += f_prime + 1e-2 * t.randn_like(x_k) final_samples = x_k.detach() return final_samples def grad_norm(x): x_k = t.autograd.Variable(x, requires_grad=True) f_prime = t.autograd.grad(f(x_k).sum(), [x_k], retain_graph=True)[0] grad = f_prime.view(x.size(0), -1) return grad.norm(p=2, dim=1) def score_fn(x): if args.score_fn == "px": return f(x).detach().cpu() elif args.score_fn == "py": return nn.Softmax()(f.classify(x)).max(1)[0].detach().cpu() else: return f.classify(x).max(1)[0].detach().cpu() transform_test = tr.Compose( [tr.ToTensor(), tr.Normalize((.5, .5, .5), (.5, .5, .5)), lambda x: x + args.sigma * t.randn_like(x)] ) datasets = { "cifar10": tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=False), "svhn": tv.datasets.SVHN(root="../data", transform=transform_test, download=True, split="test"), "cifar100":tv.datasets.CIFAR100(root="../data", transform=transform_test, download=True, train=False), "celeba": tv.datasets.CelebA(root="../data", download=True, split="test", transform=tr.Compose([tr.Resize(32), tr.ToTensor(), tr.Normalize((.5, .5, .5), (.5, .5, .5)), lambda x: x + args.sigma * t.randn_like(x)])) } score_dict = {} num_workers = 0 if args.debug else 4 for dataset_name in args.datasets: print(dataset_name) dataset = datasets[dataset_name] dataloader = DataLoader(dataset, batch_size=100, shuffle=True, num_workers=num_workers, drop_last=False) this_scores = [] for x, _ in dataloader: x = x.to(device) scores = score_fn(x) this_scores.extend(scores.numpy()) score_dict[dataset_name] = this_scores colors = ['green', 'red'] for i, (name, scores) in enumerate(score_dict.items()): plt.hist(scores, label=name, bins=100, alpha=.5, color=colors[i]) plt.legend(loc='upper left') plt.xticks([]) plt.yticks([]) plt.savefig(args.save_dir + "/jem_%s_logp.pdf" % args.datasets[1], bbox_inches='tight', pad_inches=0.0) def OODAUC(f, args, device): print("OOD Evaluation") def grad_norm(x): x_k = t.autograd.Variable(x, requires_grad=True) f_prime = t.autograd.grad(f(x_k).sum(), [x_k], retain_graph=True)[0] grad = f_prime.view(x.size(0), -1) return grad.norm(p=2, dim=1) transform_test = tr.Compose( [tr.ToTensor(), tr.Normalize((.5, .5, .5), (.5, .5, .5)), lambda x: x + args.sigma * t.randn_like(x)] ) num_workers = 0 if args.debug else 4 dset_real = tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=False) dload_real = DataLoader(dset_real, batch_size=100, shuffle=False, num_workers=num_workers, drop_last=False) if args.ood_dataset == "svhn": dset_fake = tv.datasets.SVHN(root="../data", transform=transform_test, download=True, split="test") elif args.ood_dataset == "cifar_100": dset_fake = tv.datasets.CIFAR100(root="../data", transform=transform_test, download=True, train=False) elif args.ood_dataset == "celeba": dset_fake = tv.datasets.ImageFolder(root="/scratch/gobi1/gwohl/CelebA/splits", transform=tr.Compose([tr.Resize(32), tr.ToTensor(), tr.Normalize((.5, .5, .5), (.5, .5, .5)), lambda x: x + args.sigma * t.randn_like(x)])) else: dset_fake = tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=False) dload_fake = DataLoader(dset_fake, batch_size=100, shuffle=True, num_workers=num_workers, drop_last=False) print(len(dload_real), len(dload_fake)) real_scores = [] print("Real scores...") def score_fn(x): if args.score_fn == "px": return f(x).detach().cpu() elif args.score_fn == "py": return nn.Softmax()(f.classify(x)).max(1)[0].detach().cpu() else: return -grad_norm(x).detach().cpu() for x, _ in dload_real: x = x.to(device) scores = score_fn(x) real_scores.append(scores.numpy()) fake_scores = [] print("Fake scores...") if args.ood_dataset == "cifar_interp": last_batch = None for i, (x, _) in enumerate(dload_fake): x = x.to(device) if i > 0: x_mix = (x + last_batch) / 2 + args.sigma * t.randn_like(x) scores = score_fn(x_mix) fake_scores.append(scores.numpy()) last_batch = x else: for i, (x, _) in enumerate(dload_fake): x = x.to(device) scores = score_fn(x) fake_scores.append(scores.numpy()) real_scores = np.concatenate(real_scores) fake_scores = np.concatenate(fake_scores) real_labels = np.ones_like(real_scores) fake_labels = np.zeros_like(fake_scores) import sklearn.metrics scores = np.concatenate([real_scores, fake_scores]) labels = np.concatenate([real_labels, fake_labels]) score = sklearn.metrics.roc_auc_score(labels, scores) print(score) def test_clf(f, args, device): transform_test = tr.Compose( [tr.ToTensor(), tr.Normalize((.5, .5, .5), (.5, .5, .5)), lambda x: x + t.randn_like(x) * args.sigma] ) def sample(x, n_steps=args.n_steps): x_k = t.autograd.Variable(x.clone(), requires_grad=True) # sgld for k in range(n_steps): f_prime = t.autograd.grad(f(x_k).sum(), [x_k], retain_graph=True)[0] x_k.data += f_prime + 1e-2 * t.randn_like(x_k) final_samples = x_k.detach() return final_samples if args.dataset == "cifar_train": dset = tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=True) elif args.dataset == "cifar_test": dset = tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=False) elif args.dataset == "cifar100_train": dset = tv.datasets.CIFAR100(root="../data", transform=transform_test, download=True, train=True) elif args.dataset == "cifar100_test": dset = tv.datasets.CIFAR100(root="../data", transform=transform_test, download=True, train=False) elif args.dataset == "svhn_train": dset = tv.datasets.SVHN(root="../data", transform=transform_test, download=True, split="train") elif args.dataset == "svhn_test": dset = tv.datasets.SVHN(root="../data", transform=transform_test, download=True, split="test") else: dset = tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=False) num_workers = 0 if args.debug else 4 dload = DataLoader(dset, batch_size=100, shuffle=False, num_workers=num_workers, drop_last=False) corrects, losses, pys, preds = [], [], [], [] for x_p_d, y_p_d in tqdm(dload): x_p_d, y_p_d = x_p_d.to(device), y_p_d.to(device) if args.n_steps > 0: x_p_d = sample(x_p_d) logits = f.classify(x_p_d) py = nn.Softmax(dim=1)(f.classify(x_p_d)).max(1)[0].detach().cpu().numpy() loss = nn.CrossEntropyLoss(reduction='none')(logits, y_p_d).cpu().detach().numpy() losses.extend(loss) correct = (logits.max(1)[1] == y_p_d).float().cpu().numpy() corrects.extend(correct) pys.extend(py) preds.extend(logits.max(1)[1].cpu().numpy()) loss = np.mean(losses) correct = np.mean(corrects) t.save({"losses": losses, "corrects": corrects, "pys": pys}, os.path.join(args.save_dir, "vals.pt")) print('loss %.5g, accuracy: %g%%' % (loss, correct * 100)) return correct def calibration(f, args, device): from Task.calibration import reliability_diagrams from Task.calibration import ECELoss transform_test = tr.Compose( [tr.ToTensor(), tr.Normalize((.5, .5, .5), (.5, .5, .5)), lambda x: x + args.sigma * t.randn_like(x)] ) num_workers = 0 if args.debug else 4 dset_real = tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=False) dload_real = DataLoader(dset_real, batch_size=100, shuffle=False, num_workers=num_workers, drop_last=False) f.eval() real_scores = [] labels = [] pred = [] ece_com = ECELoss(20) ece = 0 c = 0 logits_l = [] for x, y in dload_real: x = x.to(device) labels.append(y.numpy()) logits = f.classify(x) logits_l.append(logits.detach()) scores = nn.Softmax(dim=1)(logits).max(dim=1)[0].detach().cpu() preds = nn.Softmax(dim=1)(logits).argmax(dim=1).detach().cpu() real_scores.append(scores.numpy()) pred.append(preds.numpy()) logits_l = torch.cat(logits_l) temps = torch.LongTensor(np.concatenate(labels)) ece = ece_com(logits_l, temps.to(device)).item() print("On Calibration of Modern Neural Networks code result:", ece) real_scores = np.concatenate(real_scores) labels = np.concatenate(np.array(labels)) pred = np.concatenate(pred) print(len(real_scores)) # print(pred.shape) reliability_diagrams(list(pred), list(labels), list(real_scores), bin_size=0.05, title="Accuracy: %.2f%%" % (100.0 * correct), args=args) def main(args): global correct set_file_logger(logger, args) args.save_dir = args.dir_path print(args.dir_path) t.manual_seed(seed) if t.cuda.is_available(): t.cuda.manual_seed_all(seed) device = t.device('cuda' if t.cuda.is_available() else 'cpu') args.device = device model_cls = F if args.uncond else CCF f = model_cls(args.depth, args.width, args.norm, n_classes=args.n_classes, model=args.model) print(f"loading model from {args.load_path}") # load em up ckpt_dict = t.load(args.load_path) f.load_state_dict(ckpt_dict["model_state_dict"]) replay_buffer = ckpt_dict["replay_buffer"] f = f.to(device) f.eval() if args.eval == "OOD": OODAUC(f, args, device) if args.eval == "cali": correct = test_clf(f, args, device) calibration(f, args, device) if args.eval == "test_clf": test_clf(f, args, device) if args.eval == "cond_samples": cond_samples(f, replay_buffer, args, device, args.fresh_samples) if args.eval == "fid": cond_fid(f, replay_buffer, args, device, ratio=args.ratio) if args.eval == "uncond_samples": uncond_samples(f, args, device) if args.eval == "logp_hist": logp_hist(f, args, device) if __name__ == "__main__": parser = argparse.ArgumentParser("LDA Energy Based Models") parser.add_argument("--eval", default="OOD", type=str, choices=["uncond_samples", "cond_samples", "best_samples", "logp_hist", "OOD", "test_clf", "fid", "cali"]) parser.add_argument("--score_fn", default="px", type=str, choices=["px", "py", "pxgrad"], help="For OODAUC, chooses what score function we use.") parser.add_argument("--ood_dataset", default="svhn", type=str, choices=["svhn", "cifar_interp", "cifar_100", "celeba"], help="Chooses which dataset to compare against for OOD") parser.add_argument("--dataset", default="cifar_test", type=str, choices=["cifar_train", "cifar_test", "svhn_test", "svhn_train", "cifar100_test"], help="Dataset to use when running test_clf for classification accuracy") parser.add_argument("--datasets", nargs="+", type=str, default=[], help="The datasets you wanna use to generate a log p(x) histogram") # optimization parser.add_argument("--batch_size", type=int, default=64) # regularization parser.add_argument("--sigma", type=float, default=3e-2) # network parser.add_argument("--norm", type=str, default="batch", choices=[None, "none", "norm", "batch", "instance", "layer", "act"]) parser.add_argument("--init", type=str, default='i', help='r random, i inform') # EBM specific parser.add_argument("--n_steps", type=int, default=0) parser.add_argument("--in_steps", type=int, default=5, help="number of steps of SGLD per iteration, 100 works for short-run, 20 works for PCD") parser.add_argument("--in_lr", type=float, default=0.01) parser.add_argument("--width", type=int, default=10) parser.add_argument("--depth", type=int, default=28) parser.add_argument("--uncond", action="store_true") parser.add_argument("--buffer_size", type=int, default=0) parser.add_argument("--reinit_freq", type=float, default=.0) parser.add_argument("--sgld_lr", type=float, default=1.0) parser.add_argument("--sgld_std", type=float, default=1e-2) parser.add_argument("--model", type=str, default='yopo') parser.add_argument("--ratio", type=int, default=100) # logging + evaluation parser.add_argument("--save_dir", type=str, default='jem_eval') parser.add_argument("--print_every", type=int, default=100) parser.add_argument("--n_sample_steps", type=int, default=100) parser.add_argument("--n_images", type=int, default=60000) parser.add_argument("--load_path", type=str, default=None) parser.add_argument("--print_to_log", action="store_true") parser.add_argument("--fresh_samples", action="store_true", help="If set, then we generate a new replay buffer from scratch for conditional sampling," "Will be much slower.") parser.add_argument("--gpu-id", type=str, default="") args = parser.parse_args() auto_select_gpu(args) init_debug(args) run_time = time.strftime('%m%d%H%M%S', time.localtime(time.time())) if args.save_dir == 'jem_eval': # by default to eval the model args.dir_path = args.load_path + "_eval_%s_%s" % (args.eval, run_time) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id args.n_classes = 100 if "cifar100" in args.dataset else 10 main(args) print(args.save_dir)
25,406
37.730183
147
py
JEMPP
JEMPP-master/train_jempp.py
# coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch as t import torch.nn as nn import os import argparse import numpy as np import matplotlib.pyplot as plt from tqdm import tqdm from ExpUtils import * from utils import eval_classification, Hamiltonian, checkpoint, get_data, set_bn_train, set_bn_eval, plot from models.jem_models import get_model_and_buffer t.set_num_threads(2) t.backends.cudnn.benchmark = True t.backends.cudnn.enabled = True seed = 1 inner_his = [] conditionals = [] def init_random(args, bs): global conditionals n_ch = 3 size = [3, 32, 32] im_sz = 32 new = t.zeros(bs, n_ch, im_sz, im_sz) for i in range(bs): index = np.random.randint(args.n_classes) dist = conditionals[index] new[i] = dist.sample().view(size) return t.clamp(new, -1, 1).cpu() def sample_p_0(replay_buffer, bs, y=None): if len(replay_buffer) == 0: return init_random(args, bs), [] buffer_size = len(replay_buffer) if y is None else len(replay_buffer) // args.n_classes inds = t.randint(0, buffer_size, (bs,)) # if cond, convert inds to class conditional inds if y is not None: inds = y.cpu() * buffer_size + inds buffer_samples = replay_buffer[inds] random_samples = init_random(args, bs) choose_random = (t.rand(bs) < args.reinit_freq).float()[:, None, None, None] samples = choose_random * random_samples + (1 - choose_random) * buffer_samples return samples.to(args.device), inds def sample_q(f, replay_buffer, y=None, n_steps=10, in_steps=10, args=None, save=True): """this func takes in replay_buffer now so we have the option to sample from scratch (i.e. replay_buffer==[]). See test_wrn_ebm.py for example. """ global inner_his inner_his = [] # Batch norm uses train status # f.eval() # get batch size bs = args.batch_size if y is None else y.size(0) # generate initial samples and buffer inds of those samples (if buffer is used) init_sample, buffer_inds = sample_p_0(replay_buffer, bs=bs, y=y) x_k = t.autograd.Variable(init_sample, requires_grad=True) # sgld if in_steps > 0: Hamiltonian_func = Hamiltonian(f.f.layer_one) eps = args.eps if args.pyld_lr <= 0: in_steps = 0 for it in range(n_steps): energies = f(x_k, y=y) e_x = energies.sum() # wgrad = f.f.conv1.weight.grad eta = t.autograd.grad(e_x, [x_k], retain_graph=True)[0] # e_x.backward(retain_graph=True) # eta = x_k.grad.detach() # f.f.conv1.weight.grad = wgrad if in_steps > 0: p = 1.0 * f.f.layer_one_out.grad p = p.detach() tmp_inp = x_k.data tmp_inp.requires_grad_() if args.sgld_lr > 0: # if in_steps == 0: use SGLD other than PYLD # if in_steps != 0: combine outter and inner gradients # default 0 tmp_inp = x_k + t.clamp(eta, -eps, eps) * args.sgld_lr tmp_inp = t.clamp(tmp_inp, -1, 1) for i in range(in_steps): H = Hamiltonian_func(tmp_inp, p) eta_grad = t.autograd.grad(H, [tmp_inp], only_inputs=True, retain_graph=True)[0] eta_step = t.clamp(eta_grad, -eps, eps) * args.pyld_lr tmp_inp.data = tmp_inp.data + eta_step tmp_inp = t.clamp(tmp_inp, -1, 1) x_k.data = tmp_inp.data if args.sgld_std > 0.0: x_k.data += args.sgld_std * t.randn_like(x_k) if in_steps > 0: loss = -1.0 * Hamiltonian_func(x_k.data, p) loss.backward() f.train() final_samples = x_k.detach() # update replay buffer if len(replay_buffer) > 0 and save: replay_buffer[buffer_inds] = final_samples.cpu() return final_samples def category_mean(dload_train, args): import time start = time.time() if args.dataset == 'svhn': size = [3, 32, 32] else: size = [3, 32, 32] centers = t.zeros([args.n_classes, int(np.prod(size))]) covs = t.zeros([args.n_classes, int(np.prod(size)), int(np.prod(size))]) im_test, targ_test = [], [] for im, targ in dload_train: im_test.append(im) targ_test.append(targ) im_test, targ_test = t.cat(im_test), t.cat(targ_test) # conditionals = [] for i in range(args.n_classes): imc = im_test[targ_test == i] imc = imc.view(len(imc), -1) mean = imc.mean(dim=0) sub = imc - mean.unsqueeze(dim=0) cov = sub.t() @ sub / len(imc) centers[i] = mean covs[i] = cov print(time.time() - start) t.save(centers, '%s_mean.pt' % args.dataset) t.load(covs, '%s_cov.pt' % args.dataset) def init_from_centers(args): global conditionals from torch.distributions.multivariate_normal import MultivariateNormal bs = args.buffer_size if args.dataset == 'tinyimagenet': size = [3, 64, 64] elif args.dataset == 'svhn': size = [3, 32, 32] else: size = [3, 32, 32] centers = t.load('%s_mean.pt' % args.dataset) covs = t.load('%s_cov.pt' % args.dataset) buffer = [] for i in range(args.n_classes): mean = centers[i].to(args.device) cov = covs[i].to(args.device) dist = MultivariateNormal(mean, covariance_matrix=cov + 1e-4 * t.eye(int(np.prod(size))).to(args.device)) buffer.append(dist.sample((bs // args.n_classes, )).view([bs // args.n_classes] + size).cpu()) conditionals.append(dist) return t.clamp(t.cat(buffer), -1, 1) def main(args): np.random.seed(args.seed) t.manual_seed(args.seed) if t.cuda.is_available(): t.cuda.manual_seed_all(args.seed) device = t.device('cuda' if t.cuda.is_available() else 'cpu') args.device = device # datasets dload_train, dload_train_labeled, dload_valid, dload_test = get_data(args) # for dataset centers # if not os.path.isfile('%s_cov.pt' % args.dataset): # category_mean(dload_train, args) f, replay_buffer = get_model_and_buffer(args, device) if args.p_x_weight > 0: replay_buffer = init_from_centers(args) # optimizer params = f.class_output.parameters() if args.clf_only else f.parameters() if args.optimizer == "adam": optim = t.optim.Adam(params, lr=args.lr, betas=[.9, .999], weight_decay=args.weight_decay) else: optim = t.optim.SGD(params, lr=args.lr, momentum=.9, weight_decay=args.weight_decay) best_valid_acc = 0.0 cur_iter = 0 # trace learning rate new_lr = args.lr n_steps = args.n_steps in_steps = args.in_steps for epoch in range(args.n_epochs): if epoch in args.decay_epochs: for param_group in optim.param_groups: new_lr = param_group['lr'] * args.decay_rate param_group['lr'] = new_lr print("Decaying lr to {}".format(new_lr)) for i, (x_p_d, _) in tqdm(enumerate(dload_train)): if cur_iter <= args.warmup_iters: lr = args.lr * cur_iter / float(args.warmup_iters) for param_group in optim.param_groups: param_group['lr'] = lr x_p_d = x_p_d.to(device) x_lab, y_lab = dload_train_labeled.__next__() x_lab, y_lab = x_lab.to(device), y_lab.to(device) L = 0. if args.p_x_weight > 0: # maximize log p(x) if args.plc == 'alltrain1': fp_all = f(x_p_d) fp = fp_all.mean() if args.class_cond_p_x_sample: assert not args.uncond, "can only draw class-conditional samples if EBM is class-cond" y_q = t.randint(0, args.n_classes, (args.batch_size,)).to(device) x_q = sample_q(f, replay_buffer, y=y_q, n_steps=n_steps, in_steps=in_steps, args=args) else: x_q = sample_q(f, replay_buffer, n_steps=n_steps, in_steps=in_steps, args=args) # sample from log-sumexp if args.plc == 'eval': f.apply(set_bn_eval) fp_all = f(x_p_d) fp = fp_all.mean() if args.plc == 'alltrain2': fp_all = f(x_p_d) fp = fp_all.mean() fq_all = f(x_q) fq = fq_all.mean() l_p_x = -(fp - fq) if args.plc == 'eval': f.apply(set_bn_train) if cur_iter % args.print_every == 0: print('{} P(x) | {}:{:>d} f(x_p_d)={:>9.4f} f(x_q)={:>9.4f} d={:>9.4f}'.format(args.pid, epoch, i, fp, fq, fp - fq)) L += args.p_x_weight * l_p_x if args.p_y_given_x_weight > 0: # maximize log p(y | x) logits = f.classify(x_lab) l_p_y_given_x = nn.CrossEntropyLoss()(logits, y_lab) if cur_iter % args.print_every == 0: acc = (logits.max(1)[1] == y_lab).float().mean() print('{} P(y|x) {}:{:>d} loss={:>9.4f}, acc={:>9.4f}'.format(args.pid, epoch, cur_iter, l_p_y_given_x.item(), acc.item())) L += args.p_y_given_x_weight * l_p_y_given_x if args.p_x_y_weight > 0: # maximize log p(x, y) assert not args.uncond, "this objective can only be trained for class-conditional EBM DUUUUUUUUHHHH!!!" x_q_lab = sample_q(f, replay_buffer, y=y_lab, n_steps=n_steps, in_steps=in_steps, args=args) fp, fq = f(x_lab, y_lab).mean(), f(x_q_lab, y_lab).mean() l_p_x_y = -(fp - fq) if cur_iter % args.print_every == 0: print('P(x, y) | {}:{:>d} f(x_p_d)={:>9.4f} f(x_q)={:>9.4f} d={:>9.4f}'.format(epoch, i, fp, fq, fp - fq)) L += args.p_x_y_weight * l_p_x_y # break if the loss diverged...easier for poppa to run experiments this way if L.abs().item() > 1e8: print("BAD BOIIIIIIIIII") print("min {:>4.3f} max {:>5.3f}".format(x_q.min().item(), x_q.max().item())) plot('{}/diverge_{}_{:>06d}.png'.format(args.save_dir, epoch, i), x_q) return optim.zero_grad() L.backward() optim.step() cur_iter += 1 if cur_iter % args.print_every == 0 and args.p_x_weight > 0: if not args.plot_cond: if args.class_cond_p_x_sample: assert not args.uncond, "can only draw class-conditional samples if EBM is class-cond" y_q = t.randint(0, args.n_classes, (args.batch_size,)).to(device) x_q = sample_q(f, replay_buffer, y=y_q, n_steps=n_steps, in_steps=in_steps, args=args) else: x_q = sample_q(f, replay_buffer, n_steps=n_steps, in_steps=in_steps, args=args) plot('{}/samples/x_q_{}_{:>06d}.png'.format(args.save_dir, epoch, i), x_q) if args.plot_cond: # generate class-conditional samples y = t.arange(0, args.n_classes)[None].repeat(args.n_classes, 1).transpose(1, 0).contiguous().view(-1).to(device) x_q_y = sample_q(f, replay_buffer, y=y, n_steps=n_steps, in_steps=in_steps, args=args) plot('{}/samples/x_q_y{}_{:>06d}.png'.format(args.save_dir, epoch, i), x_q_y) if epoch % args.ckpt_every == 0 and args.p_x_weight > 0: checkpoint(f, replay_buffer, f'ckpt_{epoch}.pt', args, device) if epoch % args.eval_every == 0 and (args.p_y_given_x_weight > 0 or args.p_x_y_weight > 0): f.eval() with t.no_grad(): correct, loss = eval_classification(f, dload_valid, 'Valid', epoch, args, wlog) if args.dataset != 'tinyimagenet': t_c, _ = eval_classification(f, dload_test, 'Test', epoch, args, wlog) if correct > best_valid_acc: best_valid_acc = correct print("Epoch {} Best Valid!: {}".format(epoch, correct)) checkpoint(f, replay_buffer, "best_valid_ckpt.pt", args, device) f.train() checkpoint(f, replay_buffer, "last_ckpt.pt", args, device) if __name__ == "__main__": parser = argparse.ArgumentParser("LDA Energy Based Models") parser.add_argument("--dataset", type=str, default="cifar10", choices=["cifar10", "svhn", "cifar100", 'tinyimagenet']) parser.add_argument("--data_root", type=str, default="../data") # optimization parser.add_argument("--lr", type=float, default=1e-4) parser.add_argument("--decay_epochs", nargs="+", type=int, default=[60, 90, 120, 135], help="decay learning rate by decay_rate at these epochs") parser.add_argument("--decay_rate", type=float, default=.2, help="learning rate decay multiplier") parser.add_argument("--clf_only", action="store_true", help="If set, then only train the classifier") parser.add_argument("--labels_per_class", type=int, default=-1, help="number of labeled examples per class, if zero then use all labels") parser.add_argument("--optimizer", choices=["adam", "sgd"], default="adam") parser.add_argument("--batch_size", type=int, default=64) parser.add_argument("--n_epochs", type=int, default=150) parser.add_argument("--warmup_iters", type=int, default=-1, help="number of iters to linearly increase learning rate, if -1 then no warmmup") # loss weighting parser.add_argument("--p_x_weight", type=float, default=1.) parser.add_argument("--p_y_given_x_weight", type=float, default=1.) parser.add_argument("--p_x_y_weight", type=float, default=0.) # regularization parser.add_argument("--dropout_rate", type=float, default=0.0) parser.add_argument("--sigma", type=float, default=3e-2, help="stddev of gaussian noise to add to input, .03 works but .1 is more stable") parser.add_argument("--weight_decay", type=float, default=4e-4) # network parser.add_argument("--norm", type=str, default=None, choices=[None, "none", "batch", "instance", "layer", "act"], help="norm to add to weights, none works fine") # EBM specific parser.add_argument("--n_steps", type=int, default=10, help="number of steps of SGLD per iteration, 20 works for PCD") parser.add_argument("--in_steps", type=int, default=5, help="number of steps of SGLD per iteration, 100 works for short-run, 20 works for PCD") parser.add_argument("--width", type=int, default=10, help="WRN width parameter") parser.add_argument("--depth", type=int, default=28, help="WRN depth parameter") parser.add_argument("--uncond", action="store_true", help="If set, then the EBM is unconditional") parser.add_argument("--class_cond_p_x_sample", action="store_true", help="If set we sample from p(y)p(x|y), othewise sample from p(x)," "Sample quality higher if set, but classification accuracy better if not.") parser.add_argument("--buffer_size", type=int, default=10000) parser.add_argument("--reinit_freq", type=float, default=0.05) # SGLD or PYLD parser.add_argument("--sgld_lr", type=float, default=0.0) parser.add_argument("--sgld_std", type=float, default=0) parser.add_argument("--pyld_lr", type=float, default=0.2) # logging + evaluation parser.add_argument("--save_dir", type=str, default='./experiment') parser.add_argument("--dir_path", type=str, default='./experiment') parser.add_argument("--log_dir", type=str, default='./runs') parser.add_argument("--log_arg", type=str, default='JEMPP-n_steps-in_steps-pyld_lr-norm-plc') parser.add_argument("--ckpt_every", type=int, default=10, help="Epochs between checkpoint save") parser.add_argument("--eval_every", type=int, default=1, help="Epochs between evaluation") parser.add_argument("--print_every", type=int, default=100, help="Iterations between print") parser.add_argument("--load_path", type=str, default=None) parser.add_argument("--print_to_log", action="store_true", help="If true, directs std-out to log file") parser.add_argument("--plot_cond", action="store_true", help="If set, save class-conditional samples") parser.add_argument("--plot_uncond", action="store_true", help="If set, save unconditional samples") parser.add_argument("--n_valid", type=int, default=5000) parser.add_argument("--plc", type=str, default="alltrain1", help="alltrain1, alltrain2, eval") parser.add_argument("--eps", type=float, default=1, help="eps bound") parser.add_argument("--model", type=str, default='yopo') parser.add_argument("--novis", action="store_true", help="") parser.add_argument("--debug", action="store_true", help="") parser.add_argument("--exp_name", type=str, default="JEMPP", help="exp name, for description") parser.add_argument("--seed", type=int, default=1) parser.add_argument("--gpu-id", type=str, default="0") args = parser.parse_args() init_env(args, logger) args.save_dir = args.dir_path os.makedirs('{}/samples'.format(args.dir_path)) print = wlog print(args.dir_path) main(args) print(args.dir_path)
17,931
43.606965
166
py
JEMPP
JEMPP-master/utils.py
import os import torch import torch as t import torch.nn as nn import torchvision as tv import torchvision.transforms as tr from torch.utils.data import DataLoader, Dataset import numpy as np from torch.nn.modules.loss import _Loss from ExpUtils import AverageMeter class Hamiltonian(_Loss): def __init__(self, layer, reg_cof=1e-4): super(Hamiltonian, self).__init__() self.layer = layer self.reg_cof = 0 def forward(self, x, p): y = self.layer(x) H = torch.sum(y * p) # H = H - self.reg_cof * l2 return H def sqrt(x): return int(t.sqrt(t.Tensor([x]))) def plot(p, x): return tv.utils.save_image(t.clamp(x, -1, 1), p, normalize=True, nrow=sqrt(x.size(0))) def makedirs(dirname): if not os.path.exists(dirname): os.makedirs(dirname) def save_checkpoint(state, save, epoch): if not os.path.exists(save): os.makedirs(save) filename = os.path.join(save, 'checkpt-%04d.pth' % epoch) torch.save(state, filename) class DataSubset(Dataset): def __init__(self, base_dataset, inds=None, size=-1): self.base_dataset = base_dataset if inds is None: inds = np.random.choice(list(range(len(base_dataset))), size, replace=False) self.inds = inds def __getitem__(self, index): base_ind = self.inds[index] return self.base_dataset[base_ind] def __len__(self): return len(self.inds) def cycle(loader): while True: for data in loader: yield data def init_random(args, bs, im_sz=32, n_ch=3): return t.FloatTensor(bs, n_ch, im_sz, im_sz).uniform_(-1, 1) def get_data(args): if args.dataset == "svhn": transform_train = tr.Compose( [tr.Pad(4, padding_mode="reflect"), tr.RandomCrop(32), tr.ToTensor(), tr.Normalize((.5, .5, .5), (.5, .5, .5)), lambda x: x + args.sigma * t.randn_like(x)] ) else: transform_train = tr.Compose( [tr.Pad(4, padding_mode="reflect"), tr.RandomCrop(32), tr.RandomHorizontalFlip(), tr.ToTensor(), tr.Normalize((.5, .5, .5), (.5, .5, .5)), lambda x: x + args.sigma * t.randn_like(x)] ) transform_test = tr.Compose( [tr.ToTensor(), tr.Normalize((.5, .5, .5), (.5, .5, .5))] ) def dataset_fn(train, transform): if args.dataset == "cifar10": return tv.datasets.CIFAR10(root=args.data_root, transform=transform, download=True, train=train) elif args.dataset == "cifar100": return tv.datasets.CIFAR100(root=args.data_root, transform=transform, download=True, train=train) else: return tv.datasets.SVHN(root=args.data_root, transform=transform, download=True, split="train" if train else "test") # get all training inds full_train = dataset_fn(True, transform_train) all_inds = list(range(len(full_train))) # set seed np.random.seed(1234) # shuffle np.random.shuffle(all_inds) # seperate out validation set if args.n_valid > args.n_classes: valid_inds, train_inds = all_inds[:args.n_valid], all_inds[args.n_valid:] else: valid_inds, train_inds = [], all_inds train_inds = np.array(train_inds) train_labeled_inds = [] other_inds = [] if args.labels_per_class > 0: train_labels = np.array([full_train[ind][1] for ind in train_inds]) # to speed up for i in range(args.n_classes): print(i) train_labeled_inds.extend(train_inds[train_labels == i][:args.labels_per_class]) other_inds.extend(train_inds[train_labels == i][args.labels_per_class:]) else: train_labeled_inds = train_inds dset_train = DataSubset(dataset_fn(True, transform_train), inds=train_inds) dset_train_labeled = DataSubset(dataset_fn(True, transform_train), inds=train_labeled_inds) dset_valid = DataSubset(dataset_fn(True, transform_test), inds=valid_inds) num_workers = 0 if args.debug else 4 dload_train = DataLoader(dset_train, batch_size=args.batch_size, shuffle=True, num_workers=num_workers, drop_last=True) dload_train_labeled = DataLoader(dset_train_labeled, batch_size=args.batch_size, shuffle=True, num_workers=num_workers, drop_last=True) dload_train_labeled = cycle(dload_train_labeled) dset_test = dataset_fn(False, transform_test) dload_valid = DataLoader(dset_valid, batch_size=100, shuffle=False, num_workers=num_workers, drop_last=False) dload_test = DataLoader(dset_test, batch_size=100, shuffle=False, num_workers=num_workers, drop_last=False) return dload_train, dload_train_labeled, dload_valid, dload_test def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def checkpoint(f, buffer, tag, args, device): f.cpu() ckpt_dict = { "model_state_dict": f.state_dict(), "replay_buffer": buffer, } t.save(ckpt_dict, os.path.join(args.save_dir, tag)) f.to(device) def set_bn_eval(m): if isinstance(m, nn.modules.batchnorm._BatchNorm): m.eval() def set_bn_train(m): if isinstance(m, nn.modules.batchnorm._BatchNorm): m.train() def eval_classification(f, dload, set_name, epoch, args=None, wlog=None): corrects, losses = [], [] if args.n_classes >= 200: top1 = AverageMeter('Acc@1', ':6.2f') top5 = AverageMeter('Acc@5', ':6.2f') for x, y in dload: x, y = x.to(args.device), y.to(args.device) logits = f.classify(x) loss = nn.CrossEntropyLoss(reduction='none')(logits, y).detach().cpu().numpy() losses.extend(loss) if args.n_classes >= 200: acc1, acc5 = accuracy(logits, y, topk=(1, 5)) top1.update(acc1[0], x.size(0)) top5.update(acc5[0], x.size(0)) else: correct = (logits.max(1)[1] == y).float().cpu().numpy() corrects.extend(correct) correct = (logits.max(1)[1] == y).float().cpu().numpy() corrects.extend(correct) loss = np.mean(losses) if wlog: my_print = wlog else: my_print = print if args.n_classes >= 200: correct = top1.avg my_print("Epoch %d, %s loss %.5f, top1 acc %.4f, top5 acc %.4f" % (epoch, set_name, loss, top1.avg, top5.avg)) else: correct = np.mean(corrects) my_print("Epoch %d, %s loss %.5f, acc %.4f" % (epoch, set_name, loss, correct)) if args.vis: args.writer.add_scalar('%s/Loss' % set_name, loss, epoch) if args.n_classes >= 200: args.writer.add_scalar('%s/Acc_1' % set_name, top1.avg, epoch) args.writer.add_scalar('%s/Acc_5' % set_name, top5.avg, epoch) else: args.writer.add_scalar('%s/Accuracy' % set_name, correct, epoch) return correct, loss
7,385
32.572727
139
py
JEMPP
JEMPP-master/ExpUtils.py
import os import sys import json import time import socket import shutil import signal import logging from functools import partial import torch import numpy as np import tensorboardX as tbX import matplotlib.pyplot as plt logging.basicConfig(level=logging.INFO, format="%(asctime)s: %(filename)s[%(lineno)d]: %(message)s", datefmt="%m-%d %H:%M:%S") logger = logging.getLogger() logger.setLevel(logging.INFO) wlog = logger.info def init_env(args, exp_logger): # 1. debug -> num_workers init_debug(args) args.vis = not args.novis args.hostname = socket.gethostname().split('.')[0] # 2. select gpu auto_select_gpu(args) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id args.dir_path = form_dir_path(args.exp_name, args) set_file_logger(exp_logger, args) init_logger_board(args) args.n_classes = 10 if args.dataset == "cifar100": args.n_classes = 100 if args.dataset == "tinyimagenet": args.n_classes = 200 def init_debug(args): # verify the debug mode # pytorch loader has a parameter num_workers # in debug mode, it should be 0 # so set args.debug gettrace = getattr(sys, 'gettrace', None) if gettrace is None: print('No sys.gettrace') args.debug = False elif gettrace(): print('Hmm, Big Debugger is watching me') args.debug = True else: args.debug = False def auto_select_gpu(args): if args.gpu_id: return try: import GPUtil except ImportError: wlog("please install GPUtil for automatically selecting GPU") args.gpu_id = '1' return if len(GPUtil.getGPUs()) == 0: return id_list = GPUtil.getAvailable(order="load", maxLoad=0.7, maxMemory=0.9, limit=8) if len(id_list) == 0: print("GPU memory is not enough for predicted usage") raise NotImplementedError args.gpu_id = str(id_list[0]) def init_logger_board(args): if 'vis' in vars(args) and args.vis: args.writer = tbX.SummaryWriter(log_dir=args.dir_path) def vlog(writer, cur_iter, set_name, wlog=None, verbose=True, **kwargs): for k in kwargs: v = kwargs[k] writer.add_scalar('%s/%s' % (set_name, k.capitalize()), v, cur_iter) if wlog: my_print = wlog else: my_print = print if not verbose: prompt = "%d " % cur_iter prompt += ','.join("%s: %.4f" % (k, kwargs[k]) for k in ['loss', 'acc', 'acc1', 'acc5'] if k in kwargs) my_print(prompt) def set_file_logger(exp_logger, args): # Just use "logger" above # use tensorboard + this function to substitute ExpSaver args_dict = vars(args) dir_path = args.dir_path if not os.path.isdir(dir_path): os.makedirs(dir_path) with open(os.path.join(dir_path, "para.json"), "w") as fp: json.dump(args_dict, fp, indent=4, sort_keys=True) logfile = os.path.join(dir_path, "exp.log") fh = logging.FileHandler(logfile, mode='w') fh.setLevel(logging.INFO) formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d]: %(message)s") fh.setFormatter(formatter) exp_logger.addHandler(fh) copy_script_to_folder(sys.argv[0], args.dir_path) if os.name != 'nt': signal.signal(signal.SIGQUIT, partial(rename_quit_handler, args)) signal.signal(signal.SIGTERM, partial(delete_quit_handler, args)) def list_args(args): for e in sorted(vars(args).items()): print("args.%s = %s" % (e[0], e[1] if not isinstance(e[1], str) else '"%s"' % e[1])) def form_dir_path(task, args): """ Params: task: the name of your experiment/research args: the namespace of argparse requires: --dataset: always need a dataset. --log-arg: the details shown in the name of your directory where logs are. --log-dir: the directory to save logs, default is ~/projecct/runs. """ args.pid = os.getpid() args_dict = vars(args) if "log_dir" not in args_dict: args.log_dir = "" if "log_arg" not in args_dict: args.log_arg = "" run_time = time.strftime('%m%d%H%M%S', time.localtime(time.time())) log_arg_list = [] if args.debug: task += '-debug' for e in args.log_arg.split("-"): v = args_dict.get(e, None) if v is None: log_arg_list.append(str(e)) elif isinstance(v, str): log_arg_list.append(str(v)) else: log_arg_list.append("%s=%s" % (e, str(v))) args.exp_marker = exp_marker = "-".join(log_arg_list) exp_marker = "%s/%s/%s@%s@%d" % (task, args.dataset, run_time, exp_marker, os.getpid()) base_dir = os.path.join(os.environ['HOME'], 'project/runs') if not args.log_dir else args.log_dir dir_path = os.path.join(base_dir, exp_marker) return dir_path def summary(data): assert isinstance(data, np.ndarray) or isinstance(data, torch.Tensor) wlog("shape: %s, num of points: %d, pixels: %d" % (str(data.shape), data.shape[0], np.prod(data.shape[1:]))) wlog("max: %g, min %g" % (data.max(), data.min())) wlog("mean: %g" % data.mean()) wlog("mean of abs: %g" % np.abs(data).mean()) wlog("mean of square sum: %g" % (data ** 2).mean()) def remove_outliers(x, outlier_constant=1.5): a = np.array(x) upper_quartile = np.percentile(a, 75) lower_quartile = np.percentile(a, 25) iqr = (upper_quartile - lower_quartile) * outlier_constant quartile_set = (lower_quartile - iqr, upper_quartile + iqr) result = a[np.where((a >= quartile_set[0]) & (a <= quartile_set[1]))] return result def vis_step(writer, step, dicts): """ Add several curves. """ for k in dicts: writer.add_scalar(k, dicts[k], step) def copy_script_to_folder(caller_path, folder): '''copy script''' script_filename = caller_path.split('/')[-1] script_relative_path = os.path.join(folder, script_filename) shutil.copy(caller_path, script_relative_path) def time_string(): '''convert time format''' ISOTIMEFORMAT='%Y-%m-%d %X' string = '[{}]'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) )) return string def convert_secs2time(epoch_time): need_hour = int(epoch_time / 3600) need_mins = int((epoch_time - 3600*need_hour) / 60) need_secs = int(epoch_time - 3600*need_hour - 60*need_mins) return need_hour, need_mins, need_secs class RecorderMeter(object): """Computes and stores the minimum loss value and its epoch index""" def __init__(self, total_epoch): self.reset(total_epoch) def reset(self, total_epoch): assert total_epoch > 0 self.total_epoch = total_epoch self.current_epoch = 0 self.epoch_losses = np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val] self.epoch_losses = self.epoch_losses - 1 self.epoch_accuracy= np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val] self.epoch_accuracy= self.epoch_accuracy def update(self, idx, train_loss, train_acc, val_loss, val_acc): assert idx >= 0 and idx < self.total_epoch, 'total_epoch : {} , but update with the {} index'.format(self.total_epoch, idx) self.epoch_losses [idx, 0] = train_loss self.epoch_losses [idx, 1] = val_loss self.epoch_accuracy[idx, 0] = train_acc self.epoch_accuracy[idx, 1] = val_acc self.current_epoch = idx + 1 return self.max_accuracy(False) == val_acc def max_accuracy(self, istrain): if self.current_epoch <= 0: return 0 if istrain: return self.epoch_accuracy[:self.current_epoch, 0].max() else: return self.epoch_accuracy[:self.current_epoch, 1].max() def plot_curve(self, save_path): title = 'the accuracy/loss curve of train/val' dpi = 80 width, height = 1200, 800 legend_fontsize = 10 scale_distance = 48.8 figsize = width / float(dpi), height / float(dpi) fig = plt.figure(figsize=figsize) x_axis = np.array([i for i in range(self.total_epoch)]) # epochs y_axis = np.zeros(self.total_epoch) plt.xlim(0, self.total_epoch) plt.ylim(0, 100) interval_y = 5 interval_x = 5 plt.xticks(np.arange(0, self.total_epoch + interval_x, interval_x)) plt.yticks(np.arange(0, 100 + interval_y, interval_y)) plt.grid() plt.title(title, fontsize=20) plt.xlabel('the training epoch', fontsize=16) plt.ylabel('accuracy', fontsize=16) y_axis[:] = self.epoch_accuracy[:, 0] plt.plot(x_axis, y_axis, color='g', linestyle='-', label='train-accuracy', lw=2) plt.legend(loc=4, fontsize=legend_fontsize) y_axis[:] = self.epoch_accuracy[:, 1] plt.plot(x_axis, y_axis, color='y', linestyle='-', label='valid-accuracy', lw=2) plt.legend(loc=4, fontsize=legend_fontsize) y_axis[:] = self.epoch_losses[:, 0] plt.plot(x_axis, y_axis*50, color='g', linestyle=':', label='train-loss-x50', lw=2) plt.legend(loc=4, fontsize=legend_fontsize) y_axis[:] = self.epoch_losses[:, 1] plt.plot(x_axis, y_axis*50, color='y', linestyle=':', label='valid-loss-x50', lw=2) plt.legend(loc=4, fontsize=legend_fontsize) if save_path is not None: fig.savefig(save_path, dpi=dpi, bbox_inches='tight') print ('---- save figure {} into {}'.format(title, save_path)) plt.close(fig) class AverageMeter: """Computes and stores the average and current value""" def __init__(self, name='', fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) def plotting(exp_dir): # Load the training log dictionary: train_dict = pickle.load(open(os.path.join(exp_dir, 'log.pkl'), 'rb')) ########################################################### # Make the vanilla train and test loss per epoch plot # ########################################################### plt.plot(np.asarray(train_dict['train_loss']), label='train_loss') plt.plot(np.asarray(train_dict['test_loss']), label='test_loss') # plt.ylim(0,2000) plt.xlabel('evaluation step') plt.ylabel('metrics') plt.tight_layout() plt.legend(loc='upper right') plt.savefig(os.path.join(exp_dir, 'loss.png' )) plt.clf() # accuracy plt.plot(np.asarray(train_dict['train_acc']), label='train_acc') plt.plot(np.asarray(train_dict['test_acc']), label='test_acc') # plt.ylim(0,2000) plt.xlabel('evaluation step') plt.ylabel('metrics') plt.tight_layout() plt.legend(loc='upper right') plt.savefig(os.path.join(exp_dir, 'acc.png')) plt.clf() def get_axis(axarr, H, W, i, j): H, W = H - 1, W - 1 if not (H or W): ax = axarr elif not (H and W): ax = axarr[max(i, j)] else: ax = axarr[i][j] return ax def show_image_row(xlist, ylist=None, fontsize=12, size=(2.5, 2.5), tlist=None, filename=None): H, W = len(xlist), len(xlist[0]) fig, axarr = plt.subplots(H, W, figsize=(size[0] * W, size[1] * H)) for w in range(W): for h in range(H): ax = get_axis(axarr, H, W, h, w) ax.imshow(xlist[h][w].permute(1, 2, 0)) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) ax.xaxis.set_ticklabels([]) ax.yaxis.set_ticklabels([]) if ylist and w == 0: ax.set_ylabel(ylist[h], fontsize=fontsize) if tlist: ax.set_title(tlist[h][w], fontsize=fontsize) if filename is not None: plt.savefig(filename, bbox_inches='tight') plt.show() def delete_quit_handler(g_var, signal, frame): shutil.rmtree(g_var.dir_path) sys.exit(0) def rename_quit_handler(g_var, signal, frame): os.rename(g_var.dir_path, g_var.dir_path + "_stop") sys.exit(0)
12,387
31.686016
131
py
JEMPP
JEMPP-master/Task/inception.py
# Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path import sys import tarfile import numpy as np from six.moves import urllib import tensorflow as tf import glob import scipy.misc import math import sys MODEL_DIR = './imagenet' DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' softmax = None os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) # Call this function with list of images. Each of elements should be a # numpy array with values ranging from 0 to 255. def get_inception_score(images, splits=10): # For convenience if len(images[0].shape) != 3: return 0, 0 # Bypassing all the assertions so that we don't end prematuraly' # assert(type(images) == list) # assert(type(images[0]) == np.ndarray) # assert(len(images[0].shape) == 3) # assert(np.max(images[0]) > 10) # assert(np.min(images[0]) >= 0.0) inps = [] for img in images: img = img.astype(np.float32) inps.append(np.expand_dims(img, 0)) bs = 1 preds = [] n_batches = int(math.ceil(float(len(inps)) / float(bs))) for i in range(n_batches): if i % int(n_batches / 10) == 0: print('iteration {0}, total {1}'.format(i, n_batches)) inp = inps[(i * bs):min((i + 1) * bs, len(inps))] inp = np.concatenate(inp, 0) pred = sess.run(softmax, {'ExpandDims:0': inp}) preds.append(pred) preds = np.concatenate(preds, 0) scores = [] for i in range(splits): part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :] kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) kl = np.mean(np.sum(kl, 1)) scores.append(np.exp(kl)) return np.mean(scores), np.std(scores) # This function is called automatically. def _init_inception(): global softmax if not os.path.exists(MODEL_DIR): os.makedirs(MODEL_DIR) filename = DATA_URL.split('/')[-1] filepath = os.path.join(MODEL_DIR, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % ( filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR) with tf.gfile.FastGFile(os.path.join( MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='') # Works with an arbitrary minibatch size. pool3 = sess.graph.get_tensor_by_name('pool_3:0') ops = pool3.graph.get_operations() for op_idx, op in enumerate(ops): for o in op.outputs: shape = o.get_shape() shape = [s.value for s in shape] new_shape = [] for j, s in enumerate(shape): if s == 1 and j == 0: new_shape.append(None) else: new_shape.append(s) o.set_shape(tf.TensorShape(new_shape)) w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1] logits = tf.matmul(tf.squeeze(pool3, [1, 2]), w) softmax = tf.nn.softmax(logits) if softmax is None: _init_inception()
3,803
33.27027
92
py
JEMPP
JEMPP-master/Task/imagenet_preprocessing.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Image pre-processing utilities. """ import tensorflow as tf IMAGE_DEPTH = 3 # color images import tensorflow as tf # _R_MEAN = 123.68 # _G_MEAN = 116.78 # _B_MEAN = 103.94 # _CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN] _CHANNEL_MEANS = [0.0, 0.0, 0.0] # The lower bound for the smallest side of the image for aspect-preserving # resizing. For example, if an image is 500 x 1000, it will be resized to # _RESIZE_MIN x (_RESIZE_MIN * 2). _RESIZE_MIN = 128 def _decode_crop_and_flip(image_buffer, bbox, num_channels): """Crops the given image to a random part of the image, and randomly flips. We use the fused decode_and_crop op, which performs better than the two ops used separately in series, but note that this requires that the image be passed in as an un-decoded string Tensor. Args: image_buffer: scalar string Tensor representing the raw JPEG image buffer. bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] where each coordinate is [0, 1) and the coordinates are arranged as [ymin, xmin, ymax, xmax]. num_channels: Integer depth of the image buffer for decoding. Returns: 3-D tensor with cropped image. """ # A large fraction of image datasets contain a human-annotated bounding box # delineating the region of the image containing the object of interest. We # choose to create a new bounding box for the object which is a randomly # distorted version of the human-annotated bounding box that obeys an # allowed range of aspect ratios, sizes and overlap with the human-annotated # bounding box. If no box is supplied, then we assume the bounding box is # the entire image. sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( tf.image.extract_jpeg_shape(image_buffer), bounding_boxes=bbox, min_object_covered=0.1, aspect_ratio_range=[0.75, 1.33], area_range=[0.05, 1.0], max_attempts=100, use_image_if_no_bounding_boxes=True) bbox_begin, bbox_size, _ = sample_distorted_bounding_box # Reassemble the bounding box in the format the crop op requires. offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) # Use the fused decode and crop op here, which is faster than each in series. cropped = tf.image.decode_and_crop_jpeg( image_buffer, crop_window, channels=num_channels) # Flip to add a little more random distortion in. cropped = tf.image.random_flip_left_right(cropped) return cropped def _central_crop(image, crop_height, crop_width): """Performs central crops of the given image list. Args: image: a 3-D image tensor crop_height: the height of the image following the crop. crop_width: the width of the image following the crop. Returns: 3-D tensor with cropped image. """ shape = tf.shape(input=image) height, width = shape[0], shape[1] amount_to_be_cropped_h = (height - crop_height) crop_top = amount_to_be_cropped_h // 2 amount_to_be_cropped_w = (width - crop_width) crop_left = amount_to_be_cropped_w // 2 return tf.slice( image, [crop_top, crop_left, 0], [crop_height, crop_width, -1]) def _mean_image_subtraction(image, means, num_channels): """Subtracts the given means from each image channel. For example: means = [123.68, 116.779, 103.939] image = _mean_image_subtraction(image, means) Note that the rank of `image` must be known. Args: image: a tensor of size [height, width, C]. means: a C-vector of values to subtract from each channel. num_channels: number of color channels in the image that will be distorted. Returns: the centered image. Raises: ValueError: If the rank of `image` is unknown, if `image` has a rank other than three or if the number of channels in `image` doesn't match the number of values in `means`. """ if image.get_shape().ndims != 3: raise ValueError('Input must be of size [height, width, C>0]') if len(means) != num_channels: raise ValueError('len(means) must match the number of channels') # We have a 1-D tensor of means; convert to 3-D. means = tf.expand_dims(tf.expand_dims(means, 0), 0) return image - means def _smallest_size_at_least(height, width, resize_min): """Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: an int32 scalar tensor indicating the new width. """ resize_min = tf.cast(resize_min, tf.float32) # Convert to floats to make subsequent calculations go smoothly. height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32) smaller_dim = tf.minimum(height, width) scale_ratio = resize_min / smaller_dim # Convert back to ints to make heights and widths that TF ops will accept. new_height = tf.cast(tf.ceil(height * scale_ratio), tf.int32) new_width = tf.cast(tf.ceil(width * scale_ratio), tf.int32) return new_height, new_width def _aspect_preserving_resize(image, resize_min): """Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. """ shape = tf.shape(input=image) height, width = shape[0], shape[1] new_height, new_width = _smallest_size_at_least(height, width, resize_min) return _resize_image(image, new_height, new_width) def _resize_image(image, height, width): """Simple wrapper around tf.resize_images. This is primarily to make sure we use the same `ResizeMethod` and other details each time. Args: image: A 3-D image `Tensor`. height: The target height for the resized image. width: The target width for the resized image. Returns: resized_image: A 3-D tensor containing the resized image. The first two dimensions have the shape [height, width]. """ return tf.image.resize_images( image, [height, width], method=tf.image.ResizeMethod.BILINEAR, align_corners=False) def preprocess_image(image_buffer, bbox, output_height, output_width, num_channels, is_training=False): """Preprocesses the given image. Preprocessing includes decoding, cropping, and resizing for both training and eval images. Training preprocessing, however, introduces some random distortion of the image to improve accuracy. Args: image_buffer: scalar string Tensor representing the raw JPEG image buffer. bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] where each coordinate is [0, 1) and the coordinates are arranged as [ymin, xmin, ymax, xmax]. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. num_channels: Integer depth of the image buffer for decoding. is_training: `True` if we're preprocessing the image for training and `False` otherwise. Returns: A preprocessed image. """ if is_training: # For training, we want to randomize some of the distortions. image = _decode_crop_and_flip(image_buffer, bbox, num_channels) image = _resize_image(image, output_height, output_width) else: # For validation, we want to decode, resize, then just crop the middle. image = tf.image.decode_jpeg(image_buffer, channels=num_channels) image = _aspect_preserving_resize(image, _RESIZE_MIN) print(image) image = _central_crop(image, output_height, output_width) image.set_shape([output_height, output_width, num_channels]) return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels) def parse_example_proto(example_serialized): """Parses an Example proto containing a training example of an image. The output of the build_image_data.py image preprocessing script is a dataset containing serialized Example protocol buffers. Each Example proto contains the following fields: image/height: 462 image/width: 581 image/colorspace: 'RGB' image/channels: 3 image/class/label: 615 image/class/synset: 'n03623198' image/class/text: 'knee pad' image/object/bbox/xmin: 0.1 image/object/bbox/xmax: 0.9 image/object/bbox/ymin: 0.2 image/object/bbox/ymax: 0.6 image/object/bbox/label: 615 image/format: 'JPEG' image/filename: 'ILSVRC2012_val_00041207.JPEG' image/encoded: <JPEG encoded string> Args: example_serialized: scalar Tensor tf.string containing a serialized Example protocol buffer. Returns: image_buffer: Tensor tf.string containing the contents of a JPEG file. label: Tensor tf.int32 containing the label. bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] where each coordinate is [0, 1) and the coordinates are arranged as [ymin, xmin, ymax, xmax]. text: Tensor tf.string containing the human-readable label. """ # Dense features in Example proto. feature_map = { 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), } sparse_float32 = tf.VarLenFeature(dtype=tf.float32) # Sparse features in Example proto. feature_map.update( {k: sparse_float32 for k in ['image/object/bbox/xmin', 'image/object/bbox/ymin', 'image/object/bbox/xmax', 'image/object/bbox/ymax']}) features = tf.parse_single_example(example_serialized, feature_map) label = tf.cast(features['image/class/label'], dtype=tf.int32) xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) # Note that we impose an ordering of (y, x) just to make life difficult. bbox = tf.concat([ymin, xmin, ymax, xmax], 0) # Force the variable number of bounding boxes into the shape # [1, num_boxes, coords]. bbox = tf.expand_dims(bbox, 0) bbox = tf.transpose(bbox, [0, 2, 1]) return features['image/encoded'], label, bbox, features['image/class/text'] class ImagenetPreprocessor: def __init__(self, image_size, dtype, train): self.image_size = image_size self.dtype = dtype self.train = train def preprocess(self, image_buffer, bbox): # pylint: disable=g-import-not-at-top image = preprocess_image(image_buffer, bbox, self.image_size, self.image_size, IMAGE_DEPTH, is_training=self.train) return tf.cast(image, self.dtype) def parse_and_preprocess(self, value): image_buffer, label_index, bbox, _ = parse_example_proto(value) image = self.preprocess(image_buffer, bbox) image = tf.reshape(image, [self.image_size, self.image_size, IMAGE_DEPTH]) return label_index, image
12,435
35.792899
119
py
JEMPP
JEMPP-master/Task/data.py
from tensorflow.python.platform import flags from tensorflow.contrib.data.python.ops import batching import tensorflow as tf import json from torch.utils.data import Dataset import pickle import os.path as osp import os import numpy as np import time from scipy.misc import imread, imresize from torchvision.datasets import CIFAR10, MNIST, SVHN, CIFAR100, ImageFolder from torchvision import transforms from Task.imagenet_preprocessing import ImagenetPreprocessor import torch import torchvision FLAGS = flags.FLAGS # Dataset Options flags.DEFINE_string('dsprites_path', '/root/data/dsprites-dataset/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz', 'path to dsprites characters') flags.DEFINE_string('imagenet_datadir', '/root/imagenet_big', 'whether cutoff should always in image') flags.DEFINE_bool('dshape_only', False, 'fix all factors except for shapes') flags.DEFINE_bool('dpos_only', False, 'fix all factors except for positions of shapes') flags.DEFINE_bool('dsize_only', False, 'fix all factors except for size of objects') flags.DEFINE_bool('drot_only', False, 'fix all factors except for rotation of objects') flags.DEFINE_bool('dsprites_restrict', False, 'fix all factors except for rotation of objects') flags.DEFINE_string('imagenet_path', '/root/imagenet', 'path to imagenet images') flags.DEFINE_string('load_path', '/root/imagenet', 'path to imagenet images') flags.DEFINE_string('load_type', 'npy', 'npy or png') flags.DEFINE_bool('single', False, 'single ') flags.DEFINE_string('datasource', 'random', 'default or noise or negative or single') # Data augmentation options # flags.DEFINE_bool('cutout_inside', False, 'whether cutoff should always in image') # flags.DEFINE_float('cutout_prob', 1.0, 'probability of using cutout') # flags.DEFINE_integer('cutout_mask_size', 16, 'size of cutout') # flags.DEFINE_bool('cutout', False, 'whether to add cutout regularizer to data') flags.DEFINE_string('eval', '', '') flags.DEFINE_string('init', '', '') flags.DEFINE_string('norm', '', '') flags.DEFINE_string('n_steps', '', '') flags.DEFINE_string('reinit_freq', '', '') flags.DEFINE_string('print_every', '', '') flags.DEFINE_string('n_sample_steps', '', '') flags.DEFINE_integer('gpu-id', 16, 'size of cutout') def cutout(mask_color=(0, 0, 0)): mask_size_half = FLAGS.cutout_mask_size // 2 offset = 1 if FLAGS.cutout_mask_size % 2 == 0 else 0 def _cutout(image): image = np.asarray(image).copy() if np.random.random() > FLAGS.cutout_prob: return image h, w = image.shape[:2] if FLAGS.cutout_inside: cxmin, cxmax = mask_size_half, w + offset - mask_size_half cymin, cymax = mask_size_half, h + offset - mask_size_half else: cxmin, cxmax = 0, w + offset cymin, cymax = 0, h + offset cx = np.random.randint(cxmin, cxmax) cy = np.random.randint(cymin, cymax) xmin = cx - mask_size_half ymin = cy - mask_size_half xmax = xmin + FLAGS.cutout_mask_size ymax = ymin + FLAGS.cutout_mask_size xmin = max(0, xmin) ymin = max(0, ymin) xmax = min(w, xmax) ymax = min(h, ymax) image[:, ymin:ymax, xmin:xmax] = np.array(mask_color)[:, None, None] return image return _cutout class TFImagenetLoader(Dataset): def __init__(self, split, batchsize, idx, num_workers, rescale=1): IMAGENET_NUM_TRAIN_IMAGES = 1281167 IMAGENET_NUM_VAL_IMAGES = 50000 self.rescale = rescale if split == "train": im_length = IMAGENET_NUM_TRAIN_IMAGES records_to_skip = im_length * idx // num_workers records_to_read = im_length * (idx + 1) // num_workers - records_to_skip else: im_length = IMAGENET_NUM_VAL_IMAGES self.curr_sample = 0 index_path = osp.join(FLAGS.imagenet_datadir, 'index.json') with open(index_path) as f: metadata = json.load(f) counts = metadata['record_counts'] if split == 'train': file_names = list(sorted([x for x in counts.keys() if x.startswith('train')])) result_records_to_skip = None files = [] for filename in file_names: records_in_file = counts[filename] if records_to_skip >= records_in_file: records_to_skip -= records_in_file continue elif records_to_read > 0: if result_records_to_skip is None: # Record the number to skip in the first file result_records_to_skip = records_to_skip files.append(filename) records_to_read -= (records_in_file - records_to_skip) records_to_skip = 0 else: break else: files = list(sorted([x for x in counts.keys() if x.startswith('validation')])) files = [osp.join(FLAGS.imagenet_datadir, x) for x in files] preprocess_function = ImagenetPreprocessor(128, dtype=tf.float32, train=False).parse_and_preprocess ds = tf.data.TFRecordDataset.from_generator(lambda: files, output_types=tf.string) ds = ds.apply(tf.data.TFRecordDataset) ds = ds.take(im_length) ds = ds.prefetch(buffer_size=FLAGS.batch_size) ds = ds.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=10000)) ds = ds.apply(batching.map_and_batch(map_func=preprocess_function, batch_size=FLAGS.batch_size, num_parallel_batches=4)) ds = ds.prefetch(buffer_size=2) ds_iterator = ds.make_initializable_iterator() labels, images = ds_iterator.get_next() self.images = tf.clip_by_value(images / 256 + tf.random_uniform(tf.shape(images), 0, 1. / 256), 0.0, 1.0) self.labels = labels config = tf.ConfigProto(device_count={'GPU': 0}) sess = tf.Session(config=config) sess.run(ds_iterator.initializer) self.im_length = im_length // batchsize self.sess = sess def __next__(self): self.curr_sample += 1 sess = self.sess im_corrupt = np.random.uniform(0, self.rescale, size=(FLAGS.batch_size, 128, 128, 3)) label, im = sess.run([self.labels, self.images]) im = im * self.rescale label = np.eye(1000)[label.squeeze() - 1] im, im_corrupt, label = torch.from_numpy(im), torch.from_numpy(im_corrupt), torch.from_numpy(label) return im_corrupt, im, label def __iter__(self): return self def __len__(self): return self.im_length class CelebA(Dataset): def __init__(self): self.path = "/root/data/img_align_celeba" self.ims = os.listdir(self.path) self.ims = [osp.join(self.path, im) for im in self.ims] def __len__(self): return len(self.ims) def __getitem__(self, index): FLAGS = self.FLAGS FLAGS.single = False label = 1 if FLAGS.single: index = 0 path = self.ims[index] im = imread(path) im = imresize(im, (32, 32)) image_size = 32 im = im / 255. if FLAGS.datasource == 'default': im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3) elif FLAGS.datasource == 'random': im_corrupt = np.random.uniform( 0, 1, size=(image_size, image_size, 3)) return im_corrupt, im, label class Cifar10(Dataset): def __init__( self, FLAGS, train=True, full=False, augment=False, noise=True, rescale=1.0): if augment: transform_list = [ torchvision.transforms.RandomCrop(32, padding=4), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ToTensor(), ] # if FLAGS.cutout: # transform_list.append(cutout()) transform = transforms.Compose(transform_list) else: transform = transforms.ToTensor() self.FLAGS = FLAGS self.full = full self.data = CIFAR10( "../data/dataset/cifar10", transform=transform, train=train, download=True) self.test_data = CIFAR10( "../data/dataset/cifar10", transform=transform, train=False, download=True) self.one_hot_map = np.eye(10) self.noise = noise self.rescale = rescale def __len__(self): if self.full: return len(self.data) + len(self.test_data) else: return len(self.data) def __getitem__(self, index): FLAGS = self.FLAGS FLAGS.single = False if not FLAGS.single: if self.full: if index >= len(self.data): im, label = self.test_data[index - len(self.data)] else: im, label = self.data[index] else: im, label = self.data[index] else: im, label = self.data[0] im = np.transpose(im, (1, 2, 0)).numpy() image_size = 32 label = self.one_hot_map[label] im = im * 255 / 256 if self.noise: im = im * self.rescale + \ np.random.uniform(0, self.rescale * 1 / 256., im.shape) np.random.seed((index + int(time.time() * 1e7)) % 2**32) FLAGS.datasource = 'random' if FLAGS.datasource == 'default': im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3) elif FLAGS.datasource == 'random': im_corrupt = np.random.uniform( 0.0, self.rescale, (image_size, image_size, 3)) return im_corrupt, im, label class Cifar100(Dataset): def __init__(self, FLAGS, train=True, augment=False): if augment: transform_list = [ torchvision.transforms.RandomCrop(32, padding=4), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ToTensor(), ] # if FLAGS.cutout: # transform_list.append(cutout()) transform = transforms.Compose(transform_list) else: transform = transforms.ToTensor() self.FLAGS = FLAGS self.data = CIFAR100( os.path.join(os.environ['HOME'], 'project/research/data/dataset', "cifar100"), transform=transform, train=train, download=True) self.one_hot_map = np.eye(100) def __len__(self): return len(self.data) def __getitem__(self, index): FLAGS = self.FLAGS FLAGS.single = False if not FLAGS.single: im, label = self.data[index] else: im, label = self.data[0] im = np.transpose(im, (1, 2, 0)).numpy() image_size = 32 label = self.one_hot_map[label] im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape) np.random.seed((index + int(time.time() * 1e7)) % 2**32) FLAGS.datasource = 'random' if FLAGS.datasource == 'default': im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3) elif FLAGS.datasource == 'random': im_corrupt = np.random.uniform( 0.0, 1.0, (image_size, image_size, 3)) return im_corrupt, im, label class Svhn(Dataset): def __init__(self, FLAGS, train=True, augment=False): transform = transforms.ToTensor() self.FLAGS = FLAGS self.data = SVHN(os.path.join(os.environ['HOME'], 'project/research/data/dataset', "svhn"), transform=transform, download=True) self.one_hot_map = np.eye(10) def __len__(self): return len(self.data) def __getitem__(self, index): FLAGS = self.FLAGS FLAGS.single = False if not FLAGS.single: im, label = self.data[index] else: em, label = self.data[0] im = np.transpose(im, (1, 2, 0)).numpy() image_size = 32 label = self.one_hot_map[label] im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape) np.random.seed((index + int(time.time() * 1e7)) % 2**32) FLAGS.datasource = 'random' if FLAGS.datasource == 'default': im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3) elif FLAGS.datasource == 'random': im_corrupt = np.random.uniform( 0.0, 1.0, (image_size, image_size, 3)) return im_corrupt, im, label class Mnist(Dataset): def __init__(self, train=True, rescale=1.0): self.data = MNIST( "/root/mnist", transform=transforms.ToTensor(), download=True, train=train) self.labels = np.eye(10) self.rescale = rescale def __len__(self): return len(self.data) def __getitem__(self, index): FLAGS = self.FLAGS FLAGS.single = False im, label = self.data[index] label = self.labels[label] im = im.squeeze() # im = im.numpy() / 2 + np.random.uniform(0, 0.5, (28, 28)) # im = im.numpy() / 2 + 0.2 im = im.numpy() / 256 * 255 + np.random.uniform(0, 1. / 256, (28, 28)) im = im * self.rescale image_size = 28 if FLAGS.datasource == 'default': im_corrupt = im + 0.3 * np.random.randn(image_size, image_size) elif FLAGS.datasource == 'random': im_corrupt = np.random.uniform(0, self.rescale, (28, 28)) return im_corrupt, im, label class DSprites(Dataset): def __init__( self, cond_size=False, cond_shape=False, cond_pos=False, cond_rot=False): dat = np.load(FLAGS.dsprites_path) if FLAGS.dshape_only: l = dat['latents_values'] mask = (l[:, 4] == 16 / 31) & (l[:, 5] == 16 / 31) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39) self.data = np.tile(dat['imgs'][mask], (10000, 1, 1)) self.label = np.tile(dat['latents_values'][mask], (10000, 1)) self.label = self.label[:, 1:2] elif FLAGS.dpos_only: l = dat['latents_values'] # mask = (l[:, 1] == 1) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39) mask = (l[:, 1] == 1) & ( l[:, 3] == 30 * np.pi / 39) & (l[:, 2] == 0.5) self.data = np.tile(dat['imgs'][mask], (100, 1, 1)) self.label = np.tile(dat['latents_values'][mask], (100, 1)) self.label = self.label[:, 4:] + 0.5 elif FLAGS.dsize_only: l = dat['latents_values'] # mask = (l[:, 1] == 1) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39) mask = (l[:, 3] == 30 * np.pi / 39) & (l[:, 4] == 16 / 31) & (l[:, 5] == 16 / 31) & (l[:, 1] == 1) self.data = np.tile(dat['imgs'][mask], (10000, 1, 1)) self.label = np.tile(dat['latents_values'][mask], (10000, 1)) self.label = (self.label[:, 2:3]) elif FLAGS.drot_only: l = dat['latents_values'] mask = (l[:, 2] == 0.5) & (l[:, 4] == 16 / 31) & (l[:, 5] == 16 / 31) & (l[:, 1] == 1) self.data = np.tile(dat['imgs'][mask], (100, 1, 1)) self.label = np.tile(dat['latents_values'][mask], (100, 1)) self.label = (self.label[:, 3:4]) self.label = np.concatenate( [np.cos(self.label), np.sin(self.label)], axis=1) elif FLAGS.dsprites_restrict: l = dat['latents_values'] mask = (l[:, 1] == 1) & (l[:, 3] == 0 * np.pi / 39) self.data = dat['imgs'][mask] self.label = dat['latents_values'][mask] else: self.data = dat['imgs'] self.label = dat['latents_values'] if cond_size: self.label = self.label[:, 2:3] elif cond_shape: self.label = self.label[:, 1:2] elif cond_pos: self.label = self.label[:, 4:] elif cond_rot: self.label = self.label[:, 3:4] self.label = np.concatenate( [np.cos(self.label), np.sin(self.label)], axis=1) else: self.label = self.label[:, 1:2] self.identity = np.eye(3) def __len__(self): return self.data.shape[0] def __getitem__(self, index): FLAGS = self.FLAGS FLAGS.single = False im = self.data[index] image_size = 64 if not ( FLAGS.dpos_only or FLAGS.dsize_only) and ( not FLAGS.cond_size) and ( not FLAGS.cond_pos) and ( not FLAGS.cond_rot) and ( not FLAGS.drot_only): label = self.identity[self.label[index].astype( np.int32) - 1].squeeze() else: label = self.label[index] if FLAGS.datasource == 'default': im_corrupt = im + 0.3 * np.random.randn(image_size, image_size) elif FLAGS.datasource == 'random': im_corrupt = 0.5 + 0.5 * np.random.randn(image_size, image_size) return im_corrupt, im, label class Imagenet(Dataset): def __init__(self, train=True, augment=False): if train: for i in range(1, 11): f = pickle.load( open( osp.join( FLAGS.imagenet_path, 'train_data_batch_{}'.format(i)), 'rb')) if i == 1: labels = f['labels'] data = f['data'] else: labels.extend(f['labels']) data = np.vstack((data, f['data'])) else: f = pickle.load( open( osp.join( FLAGS.imagenet_path, 'val_data'), 'rb')) labels = f['labels'] data = f['data'] self.labels = labels self.data = data self.one_hot_map = np.eye(1000) def __len__(self): return self.data.shape[0] def __getitem__(self, index): FLAGS = self.FLAGS FLAGS.single = False if not FLAGS.single: im, label = self.data[index], self.labels[index] else: im, label = self.data[0], self.labels[0] label -= 1 im = im.reshape((3, 32, 32)) / 255 im = im.transpose((1, 2, 0)) image_size = 32 label = self.one_hot_map[label] im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape) np.random.seed((index + int(time.time() * 1e7)) % 2**32) if FLAGS.datasource == 'default': im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3) elif FLAGS.datasource == 'random': im_corrupt = np.random.uniform( 0.0, 1.0, (image_size, image_size, 3)) return im_corrupt, im, label class Textures(Dataset): def __init__(self, train=True, augment=False): self.dataset = ImageFolder("/mnt/nfs/yilundu/data/dtd/images") def __len__(self): return 2 * len(self.dataset) def __getitem__(self, index): idx = index % (len(self.dataset)) im, label = self.dataset[idx] im = np.array(im)[:32, :32] / 255 im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape) return im, im, label
19,913
33.512998
135
py
JEMPP
JEMPP-master/Task/calibration.py
import random import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import matplotlib.pyplot as plt import seaborn as sns def expected_calibration_error(predictions, truths, confidences, bin_size=0.1, title='demo'): upper_bounds = np.arange(bin_size, 1 + bin_size, bin_size) accs = [] # Compute empirical probability for each bin plot_x = [] ece = 0 for conf_thresh in upper_bounds: acc, perc_pred, avg_conf = compute_accuracy(conf_thresh - bin_size, conf_thresh, confidences, predictions, truths) plot_x.append(avg_conf) accs.append(acc) ece += abs(avg_conf - acc) * perc_pred return ece def reliability_diagrams(predictions, truths, confidences, bin_size=0.1, title='demo', args=None): upper_bounds = np.arange(bin_size, 1 + bin_size, bin_size) accs = [] # Compute empirical probability for each bin conf_x = [] ece = 0 for conf_thresh in upper_bounds: acc, perc_pred, avg_conf = compute_accuracy(conf_thresh - bin_size, conf_thresh, confidences, predictions, truths) conf_x.append(avg_conf) accs.append(acc) temp = abs(avg_conf - acc) * perc_pred print('m %.2f, B_m %d, acc(B_m) %.4f, conf = %.4f, |B_m||acc(B_m) - conf(B_m)|/n = %.5f' % (conf_thresh, int(perc_pred * len(predictions)), acc, avg_conf, temp)) ece += temp # Produce error bars for each bin upper_bound_to_bootstrap_est = {x: [] for x in upper_bounds} for i in range(1): # Generate bootstrap boot_strap_outcomes = [] boot_strap_confs = random.sample(confidences, len(confidences)) for samp_conf in boot_strap_confs: correct = 0 if random.random() < samp_conf: correct = 1 boot_strap_outcomes.append(correct) # Compute error frequency in each bin for upper_bound in upper_bounds: conf_thresh_upper = upper_bound conf_thresh_lower = upper_bound - bin_size filtered_tuples = [x for x in zip(boot_strap_outcomes, boot_strap_confs) if x[1] > conf_thresh_lower and x[1] <= conf_thresh_upper] correct = len([x for x in filtered_tuples if x[0] == 1]) acc = float(correct) / len(filtered_tuples) if len(filtered_tuples) > 0 else 0 upper_bound_to_bootstrap_est[upper_bound].append(acc) upper_bound_to_bootstrap_upper_bar = {} upper_bound_to_bootstrap_lower_bar = {} for upper_bound, freqs in upper_bound_to_bootstrap_est.items(): top_95_quintile_i = int(0.975 * len(freqs)) lower_5_quintile_i = int(0.025 * len(freqs)) upper_bar = sorted(freqs)[top_95_quintile_i] lower_bar = sorted(freqs)[lower_5_quintile_i] upper_bound_to_bootstrap_upper_bar[upper_bound] = upper_bar upper_bound_to_bootstrap_lower_bar[upper_bound] = lower_bar upper_bars = [] lower_bars = [] for i, upper_bound in enumerate(upper_bounds): if upper_bound_to_bootstrap_upper_bar[upper_bound] == 0: upper_bars.append(0) lower_bars.append(0) else: # The error bar arguments need to be the distance from the data point, not the y-value upper_bars.append(abs(conf_x[i] - upper_bound_to_bootstrap_upper_bar[upper_bound])) lower_bars.append(abs(conf_x[i] - upper_bound_to_bootstrap_lower_bar[upper_bound])) # sns.set(font_scale=2) fig, ax = plt.subplots() ax.errorbar(conf_x, conf_x, label="Perfect classifier calibration") new_conf_x = [] new_accs = [] for i, bars in enumerate(zip(lower_bars, upper_bars)): if bars[0] == 0 and bars[1] == 0: continue new_conf_x.append(conf_x[i]) new_accs.append(accs[i]) print("ECE: %g" % ece) ax.plot(new_conf_x, new_accs, '-o', label="Accuracy", color="red") ax.set_ylim([0, 1]) ax.set_xlim([0, 1]) plt.title(title + " ECE: %.2f%%" % (ece * 100)) plt.ylabel('Empirical probability') plt.xlabel('Estimated probability') plt.show() plt.close() fig, ax = plt.subplots() ax.errorbar([0, 1], [0, 1], label="Perfect classifier calibration") # ax.plot(new_conf_x, new_accs, '-o', label="Accuracy", color="black") ax.bar(upper_bounds - 0.025, accs, width=bin_size, label="Accuracy", color="red", edgecolor='gray', align='center') ax.set_ylim([0, 1]) ax.set_xlim([0, 1]) plt.title(title + " ECE: %.1f%%" % (ece * 100), fontsize=20) plt.ylabel('Empirical probability', fontsize=20) plt.xlabel('Estimated probability', fontsize=16) # fig.savefig("reliability.tif", format='tif', bbox_inches='tight', dpi=1200) if args is not None and args.load_path: fig.savefig(args.load_path + "_calibration.png") # fig.savefig(args.load_path + "_calibration.eps", format='eps', bbox_inches='tight', dpi=1200) plt.show() plt.close() def compute_accuracy(conf_thresh_lower, conf_thresh_upper, conf, pred, true): filtered_tuples = [x for x in zip(pred, true, conf) if x[2] > conf_thresh_lower and x[2] <= conf_thresh_upper] if len(filtered_tuples) < 1: return 0, 0, 0 else: correct = len([x for x in filtered_tuples if x[0] == x[1]]) avg_conf = sum([x[2] for x in filtered_tuples]) / len(filtered_tuples) accuracy = float(correct) / len(filtered_tuples) perc_of_data = float(len(filtered_tuples)) / len(conf) return accuracy, perc_of_data, avg_conf def compute_accuracy2(conf_thresh_lower, conf_thresh_upper, conf, pred, true): num_classes = max(true) filtered_tuples = [x for x in zip(pred, true, conf) if x[2] > conf_thresh_lower and x[2] <= conf_thresh_upper] if len(filtered_tuples) < 1: return 0, 0, 0 else: corrects = [] acc = [] for i in range(num_classes): predict = len([x for x in filtered_tuples if x[0] == i]) category = len([x for x in filtered_tuples if x[1] == i]) correct = len([x for x in filtered_tuples if x[0] == i and x[0] == x[1]]) if category == 0: accuracy = 0 else: accuracy = float(correct) / category acc.append(accuracy) print("category %d: predict num: %d, ground truth num: %d, correct: %d, %.4f" % (i, predict, category, correct, accuracy)) avg_conf = sum([x[2] for x in filtered_tuples]) / len(filtered_tuples) perc_of_data = float(len(filtered_tuples)) / len(conf) accuracy = sum(acc) / num_classes return accuracy, perc_of_data, avg_conf class ECELoss(nn.Module): """ Calculates the Expected Calibration Error of a model. (This isn't necessary for temperature scaling, just a cool metric). The input to this loss is the logits of a model, NOT the softmax scores. This divides the confidence outputs into equally-sized interval bins. In each bin, we compute the confidence gap: bin_gap = | avg_confidence_in_bin - accuracy_in_bin | We then return a weighted average of the gaps, based on the number of samples in each bin See: Naeini, Mahdi Pakdaman, Gregory F. Cooper, and Milos Hauskrecht. "Obtaining Well Calibrated Probabilities Using Bayesian Binning." AAAI. 2015. """ def __init__(self, n_bins=15): """ n_bins (int): number of confidence interval bins """ super(ECELoss, self).__init__() bin_boundaries = torch.linspace(0, 1, n_bins + 1) self.bin_lowers = bin_boundaries[:-1] self.bin_uppers = bin_boundaries[1:] def forward(self, logits, labels): softmaxes = F.softmax(logits, dim=1) confidences, predictions = torch.max(softmaxes, 1) accuracies = predictions.eq(labels) ece = torch.zeros(1, device=logits.device) for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers): # Calculated |confidence - accuracy| in each bin in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item()) prop_in_bin = in_bin.float().mean() if prop_in_bin.item() > 0: accuracy_in_bin = accuracies[in_bin].float().mean() avg_confidence_in_bin = confidences[in_bin].mean() ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin return ece
8,434
39.748792
169
py
JEMPP
JEMPP-master/Task/fid.py
#!/usr/bin/env python3 ''' Calculates the Frechet Inception Distance (FID) to evalulate GANs. The FID metric calculates the distance between two distributions of images. Typically, we have summary statistics (mean & covariance matrix) of one of these distributions, while the 2nd distribution is given by a GAN. When run as a stand-alone program, it compares the distribution of images that are stored as PNG/JPEG at a specified location with a distribution given by summary statistics (in pickle format). The FID is calculated by assuming that X_1 and X_2 are the activations of the pool_3 layer of the inception net for generated samples and real world samples respectivly. See --help to see further details. ''' from __future__ import absolute_import, division, print_function import numpy as np import os import gzip, pickle import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True from scipy.misc import imread from scipy import linalg import pathlib import urllib import tarfile import warnings MODEL_DIR = './imagenet' DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' pool3 = None class InvalidFIDException(Exception): pass #------------------------------------------------------------------------------- def get_fid_score(images, images_gt): images = np.stack(images, 0) images_gt = np.stack(images_gt, 0) with tf.Session(config=config) as sess: m1, s1 = calculate_activation_statistics(images, sess) m2, s2 = calculate_activation_statistics(images_gt, sess) fid_value = calculate_frechet_distance(m1, s1, m2, s2) print("Obtained fid value of {}".format(fid_value)) return fid_value def create_inception_graph(pth): """Creates a graph from saved GraphDef file.""" # Creates graph from saved graph_def.pb. with tf.gfile.FastGFile( pth, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString( f.read()) _ = tf.import_graph_def( graph_def, name='FID_Inception_Net') #------------------------------------------------------------------------------- # code for handling inception net derived from # https://github.com/openai/improved-gan/blob/master/inception_score/model.py def _get_inception_layer(sess): """Prepares inception net for batched usage and returns pool_3 layer. """ layername = 'FID_Inception_Net/pool_3:0' pool3 = sess.graph.get_tensor_by_name(layername) ops = pool3.graph.get_operations() for op_idx, op in enumerate(ops): for o in op.outputs: shape = o.get_shape() if shape._dims != []: shape = [s.value for s in shape] new_shape = [] for j, s in enumerate(shape): if s == 1 and j == 0: new_shape.append(None) else: new_shape.append(s) o.__dict__['_shape_val'] = tf.TensorShape(new_shape) return pool3 #------------------------------------------------------------------------------- def get_activations(images, sess, batch_size=50, verbose=False): """Calculates the activations of the pool_3 layer for all images. Params: -- images : Numpy array of dimension (n_images, hi, wi, 3). The values must lie between 0 and 256. -- sess : current session -- batch_size : the images numpy array is split into batches with batch size batch_size. A reasonable batch size depends on the disposable hardware. -- verbose : If set to True and parameter out_step is given, the number of calculated batches is reported. Returns: -- A numpy array of dimension (num images, 2048) that contains the activations of the given tensor when feeding inception with the query tensor. """ # inception_layer = _get_inception_layer(sess) d0 = images.shape[0] if batch_size > d0: print("warning: batch size is bigger than the data size. setting batch size to data size") batch_size = d0 n_batches = d0//batch_size n_used_imgs = n_batches*batch_size pred_arr = np.empty((n_used_imgs,2048)) for i in range(n_batches): if verbose: print("\rPropagating batch %d/%d" % (i+1, n_batches), end="", flush=True) start = i*batch_size end = start + batch_size batch = images[start:end] pred = sess.run(pool3, {'ExpandDims:0': batch}) pred_arr[start:end] = pred.reshape(batch_size,-1) if verbose: print(" done") return pred_arr #------------------------------------------------------------------------------- def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): """Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Params: -- mu1 : Numpy array containing the activations of the pool_3 layer of the inception net ( like returned by the function 'get_predictions') for generated samples. -- mu2 : The sample mean over activations of the pool_3 layer, precalcualted on an representive data set. -- sigma1: The covariance matrix over activations of the pool_3 layer for generated samples. -- sigma2: The covariance matrix over activations of the pool_3 layer, precalcualted on an representive data set. Returns: -- : The Frechet Distance. """ mu1 = np.atleast_1d(mu1) mu2 = np.atleast_1d(mu2) sigma1 = np.atleast_2d(sigma1) sigma2 = np.atleast_2d(sigma2) assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths" assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions" diff = mu1 - mu2 # product might be almost singular covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) if not np.isfinite(covmean).all(): msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps warnings.warn(msg) offset = np.eye(sigma1.shape[0]) * eps covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) # numerical error might give slight imaginary component if np.iscomplexobj(covmean): if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): m = np.max(np.abs(covmean.imag)) raise ValueError("Imaginary component {}".format(m)) covmean = covmean.real tr_covmean = np.trace(covmean) return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean #------------------------------------------------------------------------------- def calculate_activation_statistics(images, sess, batch_size=50, verbose=False): """Calculation of the statistics used by the FID. Params: -- images : Numpy array of dimension (n_images, hi, wi, 3). The values must lie between 0 and 255. -- sess : current session -- batch_size : the images numpy array is split into batches with batch size batch_size. A reasonable batch size depends on the available hardware. -- verbose : If set to True and parameter out_step is given, the number of calculated batches is reported. Returns: -- mu : The mean over samples of the activations of the pool_3 layer of the incption model. -- sigma : The covariance matrix of the activations of the pool_3 layer of the incption model. """ act = get_activations(images, sess, batch_size, verbose) mu = np.mean(act, axis=0) sigma = np.cov(act, rowvar=False) return mu, sigma #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # The following functions aren't needed for calculating the FID # they're just here to make this module work as a stand-alone script # for calculating FID scores #------------------------------------------------------------------------------- def check_or_download_inception(inception_path): ''' Checks if the path to the inception file is valid, or downloads the file if it is not present. ''' INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' if inception_path is None: inception_path = '/tmp' inception_path = pathlib.Path(inception_path) model_file = inception_path / 'classify_image_graph_def.pb' if not model_file.exists(): print("Downloading Inception model") from urllib import request import tarfile fn, _ = request.urlretrieve(INCEPTION_URL) with tarfile.open(fn, mode='r') as f: f.extract('classify_image_graph_def.pb', str(model_file.parent)) return str(model_file) def _handle_path(path, sess): if path.endswith('.npz'): f = np.load(path) m, s = f['mu'][:], f['sigma'][:] f.close() else: path = pathlib.Path(path) files = list(path.glob('*.jpg')) + list(path.glob('*.png')) x = np.array([imread(str(fn)).astype(np.float32) for fn in files]) m, s = calculate_activation_statistics(x, sess) return m, s def calculate_fid_given_paths(paths, inception_path): ''' Calculates the FID of two paths. ''' inception_path = check_or_download_inception(inception_path) for p in paths: if not os.path.exists(p): raise RuntimeError("Invalid path: %s" % p) create_inception_graph(str(inception_path)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) m1, s1 = _handle_path(paths[0], sess) m2, s2 = _handle_path(paths[1], sess) fid_value = calculate_frechet_distance(m1, s1, m2, s2) return fid_value def _init_inception(): global pool3 if not os.path.exists(MODEL_DIR): os.makedirs(MODEL_DIR) filename = DATA_URL.split('/')[-1] filepath = os.path.join(MODEL_DIR, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % ( filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR) with tf.gfile.FastGFile(os.path.join( MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='') # Works with an arbitrary minibatch size. with tf.Session() as sess: pool3 = sess.graph.get_tensor_by_name('pool_3:0') ops = pool3.graph.get_operations() for op_idx, op in enumerate(ops): for o in op.outputs: shape = o.get_shape() if shape._dims != []: shape = [s.value for s in shape] new_shape = [] for j, s in enumerate(shape): if s == 1 and j == 0: new_shape.append(None) else: new_shape.append(s) o.__dict__['_shape_val'] = tf.TensorShape(new_shape) if pool3 is None: _init_inception()
11,676
38.583051
103
py
JEMPP
JEMPP-master/Task/eval_buffer.py
import os import torch as t import numpy as np from torch.utils.data import DataLoader from tqdm import tqdm def norm_ip(img, min, max): temp = t.clamp(img, min=min, max=max) temp = (temp + -min) / (max - min + 1e-5) return temp def eval_fid(f, replay_buffer, args): from Task.inception import get_inception_score from Task.fid import get_fid_score if isinstance(replay_buffer, list): images = replay_buffer[0] elif isinstance(replay_buffer, tuple): images = replay_buffer[0] else: images = replay_buffer feed_imgs = [] for i, img in enumerate(images): n_img = norm_ip(img, -1, 1) new_img = n_img.cpu().numpy().transpose(1, 2, 0) * 255 feed_imgs.append(new_img) feed_imgs = np.stack(feed_imgs) if 'cifar100' in args.dataset: from Task.data import Cifar100 test_dataset = Cifar100(args, augment=False) elif 'cifar' in args.dataset: from Task.data import Cifar10 test_dataset = Cifar10(args, full=True, noise=False) elif 'svhn' in args.dataset: from Task.data import Svhn test_dataset = Svhn(args, augment=False) else: assert False, 'dataset %s' % args.dataset test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=0, shuffle=True, drop_last=False) test_ims = [] def rescale_im(im): return np.clip(im * 256, 0, 255).astype(np.uint8) for data_corrupt, data, label_gt in tqdm(test_dataloader): data = data.numpy() test_ims.extend(list(rescale_im(data))) # FID score # n = min(len(images), len(test_ims)) fid = get_fid_score(feed_imgs, test_ims) print("FID of score {}".format(fid)) return fid
1,753
28.728814
120
py
JEMPP
JEMPP-master/models/jem_models.py
import torch as t import torch.nn as nn from models import wideresnet import models from models import wideresnet_yopo im_sz = 32 n_ch = 3 class F(nn.Module): def __init__(self, depth=28, width=2, norm=None, dropout_rate=0.0, n_classes=10, model='wrn', args=None): super(F, self).__init__() # default, wrn self.norm = norm if model == 'yopo': self.f = wideresnet_yopo.Wide_ResNet(depth, width, norm=norm, dropout_rate=dropout_rate) else: self.f = wideresnet.Wide_ResNet(depth, width, norm=norm, dropout_rate=dropout_rate) self.energy_output = nn.Linear(self.f.last_dim, 1) self.class_output = nn.Linear(self.f.last_dim, n_classes) def feature(self, x): penult_z = self.f(x, feature=True) return penult_z def forward(self, x, y=None): penult_z = self.f(x, feature=True) return self.energy_output(penult_z).squeeze() def classify(self, x): penult_z = self.f(x, feature=True) output = self.class_output(penult_z).squeeze() return output class CCF(F): def __init__(self, depth=28, width=2, norm=None, dropout_rate=0.0, n_classes=10, model='wrn', args=None): super(CCF, self).__init__(depth, width, norm=norm, dropout_rate=dropout_rate, n_classes=n_classes, model=model, args=args) def forward(self, x, y=None): logits = self.classify(x) if y is None: v = logits.logsumexp(1) # print("log sum exp", v) return v else: return t.gather(logits, 1, y[:, None]) def init_random(args, bs): im_sz = 32 if args.dataset == 'tinyimagenet': im_sz = 64 return t.FloatTensor(bs, n_ch, im_sz, im_sz).uniform_(-1, 1) def get_model_and_buffer(args, device): model_cls = F if args.uncond else CCF f = model_cls(args.depth, args.width, args.norm, dropout_rate=args.dropout_rate, n_classes=args.n_classes, model=args.model) if not args.uncond: assert args.buffer_size % args.n_classes == 0, "Buffer size must be divisible by args.n_classes" if args.load_path is None: # make replay buffer replay_buffer = init_random(args, args.buffer_size) else: print(f"loading model from {args.load_path}") ckpt_dict = t.load(args.load_path) f.load_state_dict(ckpt_dict["model_state_dict"]) replay_buffer = ckpt_dict["replay_buffer"] f = f.to(device) return f, replay_buffer
2,494
32.716216
130
py
JEMPP
JEMPP-master/models/norms.py
# coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn class ConditionalInstanceNorm2dPlus(nn.Module): def __init__(self, num_features, num_classes, bias=True): super().__init__() self.num_features = num_features self.bias = bias self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False) if bias: self.embed = nn.Embedding(num_classes, num_features * 3) self.embed.weight.data[:, :2 * num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02) self.embed.weight.data[:, 2 * num_features:].zero_() # Initialise bias at 0 else: self.embed = nn.Embedding(num_classes, 2 * num_features) self.embed.weight.data.normal_(1, 0.02) def forward(self, x, y): means = torch.mean(x, dim=(2, 3)) m = torch.mean(means, dim=-1, keepdim=True) v = torch.var(means, dim=-1, keepdim=True) means = (means - m) / (torch.sqrt(v + 1e-5)) h = self.instance_norm(x) if self.bias: gamma, alpha, beta = self.embed(y).chunk(3, dim=-1) h = h + means[..., None, None] * alpha[..., None, None] out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1) else: gamma, alpha = self.embed(y).chunk(2, dim=-1) h = h + means[..., None, None] * alpha[..., None, None] out = gamma.view(-1, self.num_features, 1, 1) * h return out class ConditionalActNorm(nn.Module): def __init__(self, num_features, num_classes): super().__init__() self.num_features = num_features self.num_classes = num_classes self.embed = nn.Embedding(num_classes, num_features * 2) self.embed.weight.data.zero_() self.init = False def forward(self, x, y): if self.init: scale, bias = self.embed(y).chunk(2, dim=-1) return x * scale[:, :, None, None] + bias[:, :, None, None] else: m, v = torch.mean(x, dim=(0, 2, 3)), torch.var(x, dim=(0, 2, 3)) std = torch.sqrt(v + 1e-5) scale_init = 1. / std bias_init = -1. * m / std self.embed.weight.data[:, :self.num_features] = scale_init[None].repeat(self.num_classes, 1) self.embed.weight.data[:, self.num_features:] = bias_init[None].repeat(self.num_classes, 1) self.init = True return self(x, y) logabs = lambda x: torch.log(torch.abs(x)) class ActNorm(nn.Module): def __init__(self, in_channel, logdet=True): super().__init__() self.loc = nn.Parameter(torch.zeros(1, in_channel, 1, 1)) self.scale = nn.Parameter(torch.ones(1, in_channel, 1, 1)) self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8)) self.logdet = logdet def initialize(self, input): with torch.no_grad(): flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1) mean = ( flatten.mean(1) .unsqueeze(1) .unsqueeze(2) .unsqueeze(3) .permute(1, 0, 2, 3) ) std = ( flatten.std(1) .unsqueeze(1) .unsqueeze(2) .unsqueeze(3) .permute(1, 0, 2, 3) ) self.loc.data.copy_(-mean) self.scale.data.copy_(1 / (std + 1e-6)) def forward(self, input): _, _, height, width = input.shape if self.initialized.item() == 0: self.initialize(input) self.initialized.fill_(1) log_abs = logabs(self.scale) logdet = height * width * torch.sum(log_abs) if self.logdet: return self.scale * (input + self.loc), logdet else: return self.scale * (input + self.loc) def reverse(self, output): return output / self.scale - self.loc class ContinuousConditionalActNorm(nn.Module): def __init__(self, num_features, num_classes): super().__init__() del num_classes self.num_features = num_features self.embed = nn.Sequential(nn.Linear(1, 256), nn.ELU(inplace=True), nn.Linear(256, 256), nn.ELU(inplace=True), nn.Linear(256, self.num_features*2), ) def forward(self, x, y): scale, bias = self.embed(y.unsqueeze(-1)).chunk(2, dim=-1) return x * scale[:, :, None, None] + bias[:, :, None, None] class Identity(nn.Module): def __init__(self): super(Identity, self).__init__() def forward(self, x): return x def get_norm(n_filters, norm): if norm is None or norm.lower() == 'none': return Identity() elif norm == "batch": return nn.BatchNorm2d(n_filters, momentum=0.9) elif norm == "instance": return nn.InstanceNorm2d(n_filters, affine=True) elif norm == "layer": return nn.GroupNorm(1, n_filters) elif norm == "act": return ActNorm(n_filters, False) else: return Identity()
5,894
33.881657
107
py
JEMPP
JEMPP-master/models/wideresnet_yopo.py
# coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F import numpy as np from .norms import get_norm, Identity def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True) def conv_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.xavier_uniform(m.weight, gain=np.sqrt(2)) init.constant(m.bias, 0) elif classname.find('BatchNorm') != -1: init.constant(m.weight, 1) init.constant(m.bias, 0) class wide_basic(nn.Module): def __init__(self, in_planes, planes, dropout_rate, stride=1, norm=None, leak=.2): super(wide_basic, self).__init__() self.lrelu = nn.LeakyReLU(leak) self.bn1 = get_norm(in_planes, norm) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True) self.dropout = Identity() if dropout_rate == 0.0 else nn.Dropout(p=dropout_rate) self.bn2 = get_norm(planes, norm) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True) self.shortcut = nn.Sequential() if stride != 1 or in_planes != planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True), ) def forward(self, x): out = self.dropout(self.conv1(self.lrelu(self.bn1(x)))) out = self.conv2(self.lrelu(self.bn2(out))) out += self.shortcut(x) return out class Wide_ResNet(nn.Module): def __init__(self, depth, widen_factor, num_classes=10, input_channels=3, sum_pool=False, norm=None, leak=.2, dropout_rate=0.0): super(Wide_ResNet, self).__init__() self.in_planes = 16 self.sum_pool = sum_pool self.norm = norm self.lrelu = nn.LeakyReLU(leak) self.n_classes = num_classes assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4' n = (depth - 4) // 6 k = widen_factor print('| Wide-Resnet %dx%d yopo' % (depth, k)) nStages = [16, 16 * k, 32 * k, 64 * k] self.layer_one_out = None self.conv1 = conv3x3(input_channels, nStages[0]) self.layer_one = self.conv1 self.other_layers = nn.ModuleList() self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1) self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2) self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2) self.other_layers.append(self.layer1) self.other_layers.append(self.layer2) self.other_layers.append(self.layer3) self.bn1 = get_norm(nStages[3], self.norm) self.other_layers.append(self.bn1) self.last_dim = nStages[3] # self.linear = nn.Linear(nStages[3], num_classes) # self.other_layers.append(self.linear) def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride): strides = [stride] + [1] * (num_blocks - 1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, dropout_rate, stride, norm=self.norm)) self.in_planes = planes return nn.Sequential(*layers) def forward(self, x, vx=None, feature=True): out = self.conv1(x) # for YOPO self.layer_one_out = out self.layer_one_out.requires_grad_() self.layer_one_out.retain_grad() # for YOPO out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.lrelu(self.bn1(out)) if self.sum_pool: out = out.view(out.size(0), out.size(1), -1).sum(2) else: out = F.avg_pool2d(out, 8) out = out.view(out.size(0), -1) return out
4,536
35.58871
98
py
JEMPP
JEMPP-master/models/__init__.py
0
0
0
py
JEMPP
JEMPP-master/models/wideresnet.py
# coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F from .norms import get_norm, Identity import numpy as np def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True) def conv_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.xavier_uniform(m.weight, gain=np.sqrt(2)) init.constant(m.bias, 0) elif classname.find('BatchNorm') != -1: init.constant(m.weight, 1) init.constant(m.bias, 0) class wide_basic(nn.Module): def __init__(self, in_planes, planes, dropout_rate, stride=1, norm=None, leak=.2): super(wide_basic, self).__init__() self.norm = norm self.lrelu = nn.LeakyReLU(leak) self.bn1 = get_norm(in_planes, norm) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True) self.dropout = Identity() if dropout_rate == 0.0 else nn.Dropout(p=dropout_rate) self.bn2 = get_norm(planes, norm) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True) self.shortcut = nn.Sequential() if stride != 1 or in_planes != planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True), ) def forward(self, x): out = self.bn1(x) out = self.dropout(self.conv1(self.lrelu(out))) out = self.bn2(out) out = self.conv2(self.lrelu(out)) out += self.shortcut(x) return out class Wide_ResNet(nn.Module): def __init__(self, depth, widen_factor, num_classes=10, input_channels=3, sum_pool=False, norm=None, leak=.2, dropout_rate=0.0): super(Wide_ResNet, self).__init__() self.leak = leak self.in_planes = 16 self.sum_pool = sum_pool self.norm = norm self.lrelu = nn.LeakyReLU(leak) self.n_classes = num_classes assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4' n = (depth - 4) // 6 k = widen_factor print('| Wide-Resnet %dx%d' % (depth, k)) nStages = [16, 16 * k, 32 * k, 64 * k] self.conv1 = conv3x3(input_channels, nStages[0]) self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1, leak=leak) self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2, leak=leak) self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2, leak=leak) self.bn1 = get_norm(nStages[3], self.norm) self.last_dim = nStages[3] # self.linear = nn.Linear(nStages[3], num_classes) def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride, leak=0.2): strides = [stride] + [1] * (num_blocks - 1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, dropout_rate, stride, leak=leak, norm=self.norm)) self.in_planes = planes return nn.Sequential(*layers) def forward(self, x, logits=False): out = self.conv1(x) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.lrelu(self.bn1(out)) if self.sum_pool: out = out.view(out.size(0), out.size(1), -1).sum(2) else: if self.n_classes > 100: out = F.adaptive_avg_pool2d(out, 1) else: out = F.avg_pool2d(out, 8) out = out.view(out.size(0), -1) if logits: out = self.linear(out) return out
4,325
35.974359
105
py
DiT
DiT-main/sample.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """ Sample new images from a pre-trained DiT. """ import torch torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True from torchvision.utils import save_image from diffusion import create_diffusion from diffusers.models import AutoencoderKL from download import find_model from models import DiT_models import argparse def main(args): # Setup PyTorch: torch.manual_seed(args.seed) torch.set_grad_enabled(False) device = "cuda" if torch.cuda.is_available() else "cpu" if args.ckpt is None: assert args.model == "DiT-XL/2", "Only DiT-XL/2 models are available for auto-download." assert args.image_size in [256, 512] assert args.num_classes == 1000 # Load model: latent_size = args.image_size // 8 model = DiT_models[args.model]( input_size=latent_size, num_classes=args.num_classes ).to(device) # Auto-download a pre-trained model or load a custom DiT checkpoint from train.py: ckpt_path = args.ckpt or f"DiT-XL-2-{args.image_size}x{args.image_size}.pt" state_dict = find_model(ckpt_path) model.load_state_dict(state_dict) model.eval() # important! diffusion = create_diffusion(str(args.num_sampling_steps)) vae = AutoencoderKL.from_pretrained(f"stabilityai/sd-vae-ft-{args.vae}").to(device) # Labels to condition the model with (feel free to change): class_labels = [207, 360, 387, 974, 88, 979, 417, 279] # Create sampling noise: n = len(class_labels) z = torch.randn(n, 4, latent_size, latent_size, device=device) y = torch.tensor(class_labels, device=device) # Setup classifier-free guidance: z = torch.cat([z, z], 0) y_null = torch.tensor([1000] * n, device=device) y = torch.cat([y, y_null], 0) model_kwargs = dict(y=y, cfg_scale=args.cfg_scale) # Sample images: samples = diffusion.p_sample_loop( model.forward_with_cfg, z.shape, z, clip_denoised=False, model_kwargs=model_kwargs, progress=True, device=device ) samples, _ = samples.chunk(2, dim=0) # Remove null class samples samples = vae.decode(samples / 0.18215).sample # Save and display images: save_image(samples, "sample.png", nrow=4, normalize=True, value_range=(-1, 1)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--model", type=str, choices=list(DiT_models.keys()), default="DiT-XL/2") parser.add_argument("--vae", type=str, choices=["ema", "mse"], default="mse") parser.add_argument("--image-size", type=int, choices=[256, 512], default=256) parser.add_argument("--num-classes", type=int, default=1000) parser.add_argument("--cfg-scale", type=float, default=4.0) parser.add_argument("--num-sampling-steps", type=int, default=250) parser.add_argument("--seed", type=int, default=0) parser.add_argument("--ckpt", type=str, default=None, help="Optional path to a DiT checkpoint (default: auto-download a pre-trained DiT-XL/2 model).") args = parser.parse_args() main(args)
3,269
37.928571
120
py
DiT
DiT-main/download.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """ Functions for downloading pre-trained DiT models """ from torchvision.datasets.utils import download_url import torch import os pretrained_models = {'DiT-XL-2-512x512.pt', 'DiT-XL-2-256x256.pt'} def find_model(model_name): """ Finds a pre-trained DiT model, downloading it if necessary. Alternatively, loads a model from a local path. """ if model_name in pretrained_models: # Find/download our pre-trained DiT checkpoints return download_model(model_name) else: # Load a custom DiT checkpoint: assert os.path.isfile(model_name), f'Could not find DiT checkpoint at {model_name}' checkpoint = torch.load(model_name, map_location=lambda storage, loc: storage) if "ema" in checkpoint: # supports checkpoints from train.py checkpoint = checkpoint["ema"] return checkpoint def download_model(model_name): """ Downloads a pre-trained DiT model from the web. """ assert model_name in pretrained_models local_path = f'pretrained_models/{model_name}' if not os.path.isfile(local_path): os.makedirs('pretrained_models', exist_ok=True) web_path = f'https://dl.fbaipublicfiles.com/DiT/models/{model_name}' download_url(web_path, 'pretrained_models') model = torch.load(local_path, map_location=lambda storage, loc: storage) return model if __name__ == "__main__": # Download all DiT checkpoints for model in pretrained_models: download_model(model) print('Done.')
1,713
32.607843
111
py
DiT
DiT-main/sample_ddp.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """ Samples a large number of images from a pre-trained DiT model using DDP. Subsequently saves a .npz file that can be used to compute FID and other evaluation metrics via the ADM repo: https://github.com/openai/guided-diffusion/tree/main/evaluations For a simple single-GPU/CPU sampling script, see sample.py. """ import torch import torch.distributed as dist from models import DiT_models from download import find_model from diffusion import create_diffusion from diffusers.models import AutoencoderKL from tqdm import tqdm import os from PIL import Image import numpy as np import math import argparse def create_npz_from_sample_folder(sample_dir, num=50_000): """ Builds a single .npz file from a folder of .png samples. """ samples = [] for i in tqdm(range(num), desc="Building .npz file from samples"): sample_pil = Image.open(f"{sample_dir}/{i:06d}.png") sample_np = np.asarray(sample_pil).astype(np.uint8) samples.append(sample_np) samples = np.stack(samples) assert samples.shape == (num, samples.shape[1], samples.shape[2], 3) npz_path = f"{sample_dir}.npz" np.savez(npz_path, arr_0=samples) print(f"Saved .npz file to {npz_path} [shape={samples.shape}].") return npz_path def main(args): """ Run sampling. """ torch.backends.cuda.matmul.allow_tf32 = args.tf32 # True: fast but may lead to some small numerical differences assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage" torch.set_grad_enabled(False) # Setup DDP: dist.init_process_group("nccl") rank = dist.get_rank() device = rank % torch.cuda.device_count() seed = args.global_seed * dist.get_world_size() + rank torch.manual_seed(seed) torch.cuda.set_device(device) print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.") if args.ckpt is None: assert args.model == "DiT-XL/2", "Only DiT-XL/2 models are available for auto-download." assert args.image_size in [256, 512] assert args.num_classes == 1000 # Load model: latent_size = args.image_size // 8 model = DiT_models[args.model]( input_size=latent_size, num_classes=args.num_classes ).to(device) # Auto-download a pre-trained model or load a custom DiT checkpoint from train.py: ckpt_path = args.ckpt or f"DiT-XL-2-{args.image_size}x{args.image_size}.pt" state_dict = find_model(ckpt_path) model.load_state_dict(state_dict) model.eval() # important! diffusion = create_diffusion(str(args.num_sampling_steps)) vae = AutoencoderKL.from_pretrained(f"stabilityai/sd-vae-ft-{args.vae}").to(device) assert args.cfg_scale >= 1.0, "In almost all cases, cfg_scale be >= 1.0" using_cfg = args.cfg_scale > 1.0 # Create folder to save samples: model_string_name = args.model.replace("/", "-") ckpt_string_name = os.path.basename(args.ckpt).replace(".pt", "") if args.ckpt else "pretrained" folder_name = f"{model_string_name}-{ckpt_string_name}-size-{args.image_size}-vae-{args.vae}-" \ f"cfg-{args.cfg_scale}-seed-{args.global_seed}" sample_folder_dir = f"{args.sample_dir}/{folder_name}" if rank == 0: os.makedirs(sample_folder_dir, exist_ok=True) print(f"Saving .png samples at {sample_folder_dir}") dist.barrier() # Figure out how many samples we need to generate on each GPU and how many iterations we need to run: n = args.per_proc_batch_size global_batch_size = n * dist.get_world_size() # To make things evenly-divisible, we'll sample a bit more than we need and then discard the extra samples: total_samples = int(math.ceil(args.num_fid_samples / global_batch_size) * global_batch_size) if rank == 0: print(f"Total number of images that will be sampled: {total_samples}") assert total_samples % dist.get_world_size() == 0, "total_samples must be divisible by world_size" samples_needed_this_gpu = int(total_samples // dist.get_world_size()) assert samples_needed_this_gpu % n == 0, "samples_needed_this_gpu must be divisible by the per-GPU batch size" iterations = int(samples_needed_this_gpu // n) pbar = range(iterations) pbar = tqdm(pbar) if rank == 0 else pbar total = 0 for _ in pbar: # Sample inputs: z = torch.randn(n, model.in_channels, latent_size, latent_size, device=device) y = torch.randint(0, args.num_classes, (n,), device=device) # Setup classifier-free guidance: if using_cfg: z = torch.cat([z, z], 0) y_null = torch.tensor([1000] * n, device=device) y = torch.cat([y, y_null], 0) model_kwargs = dict(y=y, cfg_scale=args.cfg_scale) sample_fn = model.forward_with_cfg else: model_kwargs = dict(y=y) sample_fn = model.forward # Sample images: samples = diffusion.p_sample_loop( sample_fn, z.shape, z, clip_denoised=False, model_kwargs=model_kwargs, progress=False, device=device ) if using_cfg: samples, _ = samples.chunk(2, dim=0) # Remove null class samples samples = vae.decode(samples / 0.18215).sample samples = torch.clamp(127.5 * samples + 128.0, 0, 255).permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy() # Save samples to disk as individual .png files for i, sample in enumerate(samples): index = i * dist.get_world_size() + rank + total Image.fromarray(sample).save(f"{sample_folder_dir}/{index:06d}.png") total += global_batch_size # Make sure all processes have finished saving their samples before attempting to convert to .npz dist.barrier() if rank == 0: create_npz_from_sample_folder(sample_folder_dir, args.num_fid_samples) print("Done.") dist.barrier() dist.destroy_process_group() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--model", type=str, choices=list(DiT_models.keys()), default="DiT-XL/2") parser.add_argument("--vae", type=str, choices=["ema", "mse"], default="ema") parser.add_argument("--sample-dir", type=str, default="samples") parser.add_argument("--per-proc-batch-size", type=int, default=32) parser.add_argument("--num-fid-samples", type=int, default=50_000) parser.add_argument("--image-size", type=int, choices=[256, 512], default=256) parser.add_argument("--num-classes", type=int, default=1000) parser.add_argument("--cfg-scale", type=float, default=1.5) parser.add_argument("--num-sampling-steps", type=int, default=250) parser.add_argument("--global-seed", type=int, default=0) parser.add_argument("--tf32", action=argparse.BooleanOptionalAction, default=True, help="By default, use TF32 matmuls. This massively accelerates sampling on Ampere GPUs.") parser.add_argument("--ckpt", type=str, default=None, help="Optional path to a DiT checkpoint (default: auto-download a pre-trained DiT-XL/2 model).") args = parser.parse_args() main(args)
7,411
43.383234
120
py
DiT
DiT-main/models.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # References: # GLIDE: https://github.com/openai/glide-text2im # MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py # -------------------------------------------------------- import torch import torch.nn as nn import numpy as np import math from timm.models.vision_transformer import PatchEmbed, Attention, Mlp def modulate(x, shift, scale): return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) ################################################################################# # Embedding Layers for Timesteps and Class Labels # ################################################################################# class TimestepEmbedder(nn.Module): """ Embeds scalar timesteps into vector representations. """ def __init__(self, hidden_size, frequency_embedding_size=256): super().__init__() self.mlp = nn.Sequential( nn.Linear(frequency_embedding_size, hidden_size, bias=True), nn.SiLU(), nn.Linear(hidden_size, hidden_size, bias=True), ) self.frequency_embedding_size = frequency_embedding_size @staticmethod def timestep_embedding(t, dim, max_period=10000): """ Create sinusoidal timestep embeddings. :param t: a 1-D Tensor of N indices, one per batch element. These may be fractional. :param dim: the dimension of the output. :param max_period: controls the minimum frequency of the embeddings. :return: an (N, D) Tensor of positional embeddings. """ # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py half = dim // 2 freqs = torch.exp( -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half ).to(device=t.device) args = t[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding def forward(self, t): t_freq = self.timestep_embedding(t, self.frequency_embedding_size) t_emb = self.mlp(t_freq) return t_emb class LabelEmbedder(nn.Module): """ Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance. """ def __init__(self, num_classes, hidden_size, dropout_prob): super().__init__() use_cfg_embedding = dropout_prob > 0 self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size) self.num_classes = num_classes self.dropout_prob = dropout_prob def token_drop(self, labels, force_drop_ids=None): """ Drops labels to enable classifier-free guidance. """ if force_drop_ids is None: drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob else: drop_ids = force_drop_ids == 1 labels = torch.where(drop_ids, self.num_classes, labels) return labels def forward(self, labels, train, force_drop_ids=None): use_dropout = self.dropout_prob > 0 if (train and use_dropout) or (force_drop_ids is not None): labels = self.token_drop(labels, force_drop_ids) embeddings = self.embedding_table(labels) return embeddings ################################################################################# # Core DiT Model # ################################################################################# class DiTBlock(nn.Module): """ A DiT block with adaptive layer norm zero (adaLN-Zero) conditioning. """ def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, **block_kwargs): super().__init__() self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.attn = Attention(hidden_size, num_heads=num_heads, qkv_bias=True, **block_kwargs) self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) mlp_hidden_dim = int(hidden_size * mlp_ratio) approx_gelu = lambda: nn.GELU(approximate="tanh") self.mlp = Mlp(in_features=hidden_size, hidden_features=mlp_hidden_dim, act_layer=approx_gelu, drop=0) self.adaLN_modulation = nn.Sequential( nn.SiLU(), nn.Linear(hidden_size, 6 * hidden_size, bias=True) ) def forward(self, x, c): shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(c).chunk(6, dim=1) x = x + gate_msa.unsqueeze(1) * self.attn(modulate(self.norm1(x), shift_msa, scale_msa)) x = x + gate_mlp.unsqueeze(1) * self.mlp(modulate(self.norm2(x), shift_mlp, scale_mlp)) return x class FinalLayer(nn.Module): """ The final layer of DiT. """ def __init__(self, hidden_size, patch_size, out_channels): super().__init__() self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True) self.adaLN_modulation = nn.Sequential( nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True) ) def forward(self, x, c): shift, scale = self.adaLN_modulation(c).chunk(2, dim=1) x = modulate(self.norm_final(x), shift, scale) x = self.linear(x) return x class DiT(nn.Module): """ Diffusion model with a Transformer backbone. """ def __init__( self, input_size=32, patch_size=2, in_channels=4, hidden_size=1152, depth=28, num_heads=16, mlp_ratio=4.0, class_dropout_prob=0.1, num_classes=1000, learn_sigma=True, ): super().__init__() self.learn_sigma = learn_sigma self.in_channels = in_channels self.out_channels = in_channels * 2 if learn_sigma else in_channels self.patch_size = patch_size self.num_heads = num_heads self.x_embedder = PatchEmbed(input_size, patch_size, in_channels, hidden_size, bias=True) self.t_embedder = TimestepEmbedder(hidden_size) self.y_embedder = LabelEmbedder(num_classes, hidden_size, class_dropout_prob) num_patches = self.x_embedder.num_patches # Will use fixed sin-cos embedding: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, hidden_size), requires_grad=False) self.blocks = nn.ModuleList([ DiTBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio) for _ in range(depth) ]) self.final_layer = FinalLayer(hidden_size, patch_size, self.out_channels) self.initialize_weights() def initialize_weights(self): # Initialize transformer layers: def _basic_init(module): if isinstance(module, nn.Linear): torch.nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.constant_(module.bias, 0) self.apply(_basic_init) # Initialize (and freeze) pos_embed by sin-cos embedding: pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.x_embedder.num_patches ** 0.5)) self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0)) # Initialize patch_embed like nn.Linear (instead of nn.Conv2d): w = self.x_embedder.proj.weight.data nn.init.xavier_uniform_(w.view([w.shape[0], -1])) nn.init.constant_(self.x_embedder.proj.bias, 0) # Initialize label embedding table: nn.init.normal_(self.y_embedder.embedding_table.weight, std=0.02) # Initialize timestep embedding MLP: nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02) nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02) # Zero-out adaLN modulation layers in DiT blocks: for block in self.blocks: nn.init.constant_(block.adaLN_modulation[-1].weight, 0) nn.init.constant_(block.adaLN_modulation[-1].bias, 0) # Zero-out output layers: nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0) nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0) nn.init.constant_(self.final_layer.linear.weight, 0) nn.init.constant_(self.final_layer.linear.bias, 0) def unpatchify(self, x): """ x: (N, T, patch_size**2 * C) imgs: (N, H, W, C) """ c = self.out_channels p = self.x_embedder.patch_size[0] h = w = int(x.shape[1] ** 0.5) assert h * w == x.shape[1] x = x.reshape(shape=(x.shape[0], h, w, p, p, c)) x = torch.einsum('nhwpqc->nchpwq', x) imgs = x.reshape(shape=(x.shape[0], c, h * p, h * p)) return imgs def forward(self, x, t, y): """ Forward pass of DiT. x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) t: (N,) tensor of diffusion timesteps y: (N,) tensor of class labels """ x = self.x_embedder(x) + self.pos_embed # (N, T, D), where T = H * W / patch_size ** 2 t = self.t_embedder(t) # (N, D) y = self.y_embedder(y, self.training) # (N, D) c = t + y # (N, D) for block in self.blocks: x = block(x, c) # (N, T, D) x = self.final_layer(x, c) # (N, T, patch_size ** 2 * out_channels) x = self.unpatchify(x) # (N, out_channels, H, W) return x def forward_with_cfg(self, x, t, y, cfg_scale): """ Forward pass of DiT, but also batches the unconditional forward pass for classifier-free guidance. """ # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb half = x[: len(x) // 2] combined = torch.cat([half, half], dim=0) model_out = self.forward(combined, t, y) # For exact reproducibility reasons, we apply classifier-free guidance on only # three channels by default. The standard approach to cfg applies it to all channels. # This can be done by uncommenting the following line and commenting-out the line following that. # eps, rest = model_out[:, :self.in_channels], model_out[:, self.in_channels:] eps, rest = model_out[:, :3], model_out[:, 3:] cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0) half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps) eps = torch.cat([half_eps, half_eps], dim=0) return torch.cat([eps, rest], dim=1) ################################################################################# # Sine/Cosine Positional Embedding Functions # ################################################################################# # https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0): """ grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) """ grid_h = np.arange(grid_size, dtype=np.float32) grid_w = np.arange(grid_size, dtype=np.float32) grid = np.meshgrid(grid_w, grid_h) # here w goes first grid = np.stack(grid, axis=0) grid = grid.reshape([2, 1, grid_size, grid_size]) pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) if cls_token and extra_tokens > 0: pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0) return pos_embed def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): assert embed_dim % 2 == 0 # use half of dimensions to encode grid_h emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) return emb def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): """ embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D) """ assert embed_dim % 2 == 0 omega = np.arange(embed_dim // 2, dtype=np.float64) omega /= embed_dim / 2. omega = 1. / 10000**omega # (D/2,) pos = pos.reshape(-1) # (M,) out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product emb_sin = np.sin(out) # (M, D/2) emb_cos = np.cos(out) # (M, D/2) emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) return emb ################################################################################# # DiT Configs # ################################################################################# def DiT_XL_2(**kwargs): return DiT(depth=28, hidden_size=1152, patch_size=2, num_heads=16, **kwargs) def DiT_XL_4(**kwargs): return DiT(depth=28, hidden_size=1152, patch_size=4, num_heads=16, **kwargs) def DiT_XL_8(**kwargs): return DiT(depth=28, hidden_size=1152, patch_size=8, num_heads=16, **kwargs) def DiT_L_2(**kwargs): return DiT(depth=24, hidden_size=1024, patch_size=2, num_heads=16, **kwargs) def DiT_L_4(**kwargs): return DiT(depth=24, hidden_size=1024, patch_size=4, num_heads=16, **kwargs) def DiT_L_8(**kwargs): return DiT(depth=24, hidden_size=1024, patch_size=8, num_heads=16, **kwargs) def DiT_B_2(**kwargs): return DiT(depth=12, hidden_size=768, patch_size=2, num_heads=12, **kwargs) def DiT_B_4(**kwargs): return DiT(depth=12, hidden_size=768, patch_size=4, num_heads=12, **kwargs) def DiT_B_8(**kwargs): return DiT(depth=12, hidden_size=768, patch_size=8, num_heads=12, **kwargs) def DiT_S_2(**kwargs): return DiT(depth=12, hidden_size=384, patch_size=2, num_heads=6, **kwargs) def DiT_S_4(**kwargs): return DiT(depth=12, hidden_size=384, patch_size=4, num_heads=6, **kwargs) def DiT_S_8(**kwargs): return DiT(depth=12, hidden_size=384, patch_size=8, num_heads=6, **kwargs) DiT_models = { 'DiT-XL/2': DiT_XL_2, 'DiT-XL/4': DiT_XL_4, 'DiT-XL/8': DiT_XL_8, 'DiT-L/2': DiT_L_2, 'DiT-L/4': DiT_L_4, 'DiT-L/8': DiT_L_8, 'DiT-B/2': DiT_B_2, 'DiT-B/4': DiT_B_4, 'DiT-B/8': DiT_B_8, 'DiT-S/2': DiT_S_2, 'DiT-S/4': DiT_S_4, 'DiT-S/8': DiT_S_8, }
14,995
39.420485
113
py
DiT
DiT-main/train.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """ A minimal training script for DiT using PyTorch DDP. """ import torch # the first flag below was False when we tested this script but True makes A100 training a lot faster: torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True import torch.distributed as dist from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from torchvision.datasets import ImageFolder from torchvision import transforms import numpy as np from collections import OrderedDict from PIL import Image from copy import deepcopy from glob import glob from time import time import argparse import logging import os from models import DiT_models from diffusion import create_diffusion from diffusers.models import AutoencoderKL ################################################################################# # Training Helper Functions # ################################################################################# @torch.no_grad() def update_ema(ema_model, model, decay=0.9999): """ Step the EMA model towards the current model. """ ema_params = OrderedDict(ema_model.named_parameters()) model_params = OrderedDict(model.named_parameters()) for name, param in model_params.items(): # TODO: Consider applying only to params that require_grad to avoid small numerical changes of pos_embed ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay) def requires_grad(model, flag=True): """ Set requires_grad flag for all parameters in a model. """ for p in model.parameters(): p.requires_grad = flag def cleanup(): """ End DDP training. """ dist.destroy_process_group() def create_logger(logging_dir): """ Create a logger that writes to a log file and stdout. """ if dist.get_rank() == 0: # real logger logging.basicConfig( level=logging.INFO, format='[\033[34m%(asctime)s\033[0m] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', handlers=[logging.StreamHandler(), logging.FileHandler(f"{logging_dir}/log.txt")] ) logger = logging.getLogger(__name__) else: # dummy logger (does nothing) logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) return logger def center_crop_arr(pil_image, image_size): """ Center cropping implementation from ADM. https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126 """ while min(*pil_image.size) >= 2 * image_size: pil_image = pil_image.resize( tuple(x // 2 for x in pil_image.size), resample=Image.BOX ) scale = image_size / min(*pil_image.size) pil_image = pil_image.resize( tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC ) arr = np.array(pil_image) crop_y = (arr.shape[0] - image_size) // 2 crop_x = (arr.shape[1] - image_size) // 2 return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size]) ################################################################################# # Training Loop # ################################################################################# def main(args): """ Trains a new DiT model. """ assert torch.cuda.is_available(), "Training currently requires at least one GPU." # Setup DDP: dist.init_process_group("nccl") assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size." rank = dist.get_rank() device = rank % torch.cuda.device_count() seed = args.global_seed * dist.get_world_size() + rank torch.manual_seed(seed) torch.cuda.set_device(device) print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.") # Setup an experiment folder: if rank == 0: os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders) experiment_index = len(glob(f"{args.results_dir}/*")) model_string_name = args.model.replace("/", "-") # e.g., DiT-XL/2 --> DiT-XL-2 (for naming folders) experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}" # Create an experiment folder checkpoint_dir = f"{experiment_dir}/checkpoints" # Stores saved model checkpoints os.makedirs(checkpoint_dir, exist_ok=True) logger = create_logger(experiment_dir) logger.info(f"Experiment directory created at {experiment_dir}") else: logger = create_logger(None) # Create model: assert args.image_size % 8 == 0, "Image size must be divisible by 8 (for the VAE encoder)." latent_size = args.image_size // 8 model = DiT_models[args.model]( input_size=latent_size, num_classes=args.num_classes ) # Note that parameter initialization is done within the DiT constructor ema = deepcopy(model).to(device) # Create an EMA of the model for use after training requires_grad(ema, False) model = DDP(model.to(device), device_ids=[rank]) diffusion = create_diffusion(timestep_respacing="") # default: 1000 steps, linear noise schedule vae = AutoencoderKL.from_pretrained(f"stabilityai/sd-vae-ft-{args.vae}").to(device) logger.info(f"DiT Parameters: {sum(p.numel() for p in model.parameters()):,}") # Setup optimizer (we used default Adam betas=(0.9, 0.999) and a constant learning rate of 1e-4 in our paper): opt = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=0) # Setup data: transform = transforms.Compose([ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True) ]) dataset = ImageFolder(args.data_path, transform=transform) sampler = DistributedSampler( dataset, num_replicas=dist.get_world_size(), rank=rank, shuffle=True, seed=args.global_seed ) loader = DataLoader( dataset, batch_size=int(args.global_batch_size // dist.get_world_size()), shuffle=False, sampler=sampler, num_workers=args.num_workers, pin_memory=True, drop_last=True ) logger.info(f"Dataset contains {len(dataset):,} images ({args.data_path})") # Prepare models for training: update_ema(ema, model.module, decay=0) # Ensure EMA is initialized with synced weights model.train() # important! This enables embedding dropout for classifier-free guidance ema.eval() # EMA model should always be in eval mode # Variables for monitoring/logging purposes: train_steps = 0 log_steps = 0 running_loss = 0 start_time = time() logger.info(f"Training for {args.epochs} epochs...") for epoch in range(args.epochs): sampler.set_epoch(epoch) logger.info(f"Beginning epoch {epoch}...") for x, y in loader: x = x.to(device) y = y.to(device) with torch.no_grad(): # Map input images to latent space + normalize latents: x = vae.encode(x).latent_dist.sample().mul_(0.18215) t = torch.randint(0, diffusion.num_timesteps, (x.shape[0],), device=device) model_kwargs = dict(y=y) loss_dict = diffusion.training_losses(model, x, t, model_kwargs) loss = loss_dict["loss"].mean() opt.zero_grad() loss.backward() opt.step() update_ema(ema, model.module) # Log loss values: running_loss += loss.item() log_steps += 1 train_steps += 1 if train_steps % args.log_every == 0: # Measure training speed: torch.cuda.synchronize() end_time = time() steps_per_sec = log_steps / (end_time - start_time) # Reduce loss history over all processes: avg_loss = torch.tensor(running_loss / log_steps, device=device) dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM) avg_loss = avg_loss.item() / dist.get_world_size() logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}") # Reset monitoring variables: running_loss = 0 log_steps = 0 start_time = time() # Save DiT checkpoint: if train_steps % args.ckpt_every == 0 and train_steps > 0: if rank == 0: checkpoint = { "model": model.module.state_dict(), "ema": ema.state_dict(), "opt": opt.state_dict(), "args": args } checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt" torch.save(checkpoint, checkpoint_path) logger.info(f"Saved checkpoint to {checkpoint_path}") dist.barrier() model.eval() # important! This disables randomized embedding dropout # do any sampling/FID calculation/etc. with ema (or model) in eval mode ... logger.info("Done!") cleanup() if __name__ == "__main__": # Default args here will train DiT-XL/2 with the hyperparameters we used in our paper (except training iters). parser = argparse.ArgumentParser() parser.add_argument("--data-path", type=str, required=True) parser.add_argument("--results-dir", type=str, default="results") parser.add_argument("--model", type=str, choices=list(DiT_models.keys()), default="DiT-XL/2") parser.add_argument("--image-size", type=int, choices=[256, 512], default=256) parser.add_argument("--num-classes", type=int, default=1000) parser.add_argument("--epochs", type=int, default=1400) parser.add_argument("--global-batch-size", type=int, default=256) parser.add_argument("--global-seed", type=int, default=0) parser.add_argument("--vae", type=str, choices=["ema", "mse"], default="ema") # Choice doesn't affect training parser.add_argument("--num-workers", type=int, default=4) parser.add_argument("--log-every", type=int, default=100) parser.add_argument("--ckpt-every", type=int, default=50_000) args = parser.parse_args() main(args)
10,949
39.555556
132
py
DiT
DiT-main/diffusion/timestep_sampler.py
# Modified from OpenAI's diffusion repos # GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py # ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion # IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py from abc import ABC, abstractmethod import numpy as np import torch as th import torch.distributed as dist def create_named_schedule_sampler(name, diffusion): """ Create a ScheduleSampler from a library of pre-defined samplers. :param name: the name of the sampler. :param diffusion: the diffusion object to sample for. """ if name == "uniform": return UniformSampler(diffusion) elif name == "loss-second-moment": return LossSecondMomentResampler(diffusion) else: raise NotImplementedError(f"unknown schedule sampler: {name}") class ScheduleSampler(ABC): """ A distribution over timesteps in the diffusion process, intended to reduce variance of the objective. By default, samplers perform unbiased importance sampling, in which the objective's mean is unchanged. However, subclasses may override sample() to change how the resampled terms are reweighted, allowing for actual changes in the objective. """ @abstractmethod def weights(self): """ Get a numpy array of weights, one per diffusion step. The weights needn't be normalized, but must be positive. """ def sample(self, batch_size, device): """ Importance-sample timesteps for a batch. :param batch_size: the number of timesteps. :param device: the torch device to save to. :return: a tuple (timesteps, weights): - timesteps: a tensor of timestep indices. - weights: a tensor of weights to scale the resulting losses. """ w = self.weights() p = w / np.sum(w) indices_np = np.random.choice(len(p), size=(batch_size,), p=p) indices = th.from_numpy(indices_np).long().to(device) weights_np = 1 / (len(p) * p[indices_np]) weights = th.from_numpy(weights_np).float().to(device) return indices, weights class UniformSampler(ScheduleSampler): def __init__(self, diffusion): self.diffusion = diffusion self._weights = np.ones([diffusion.num_timesteps]) def weights(self): return self._weights class LossAwareSampler(ScheduleSampler): def update_with_local_losses(self, local_ts, local_losses): """ Update the reweighting using losses from a model. Call this method from each rank with a batch of timesteps and the corresponding losses for each of those timesteps. This method will perform synchronization to make sure all of the ranks maintain the exact same reweighting. :param local_ts: an integer Tensor of timesteps. :param local_losses: a 1D Tensor of losses. """ batch_sizes = [ th.tensor([0], dtype=th.int32, device=local_ts.device) for _ in range(dist.get_world_size()) ] dist.all_gather( batch_sizes, th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device), ) # Pad all_gather batches to be the maximum batch size. batch_sizes = [x.item() for x in batch_sizes] max_bs = max(batch_sizes) timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes] loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes] dist.all_gather(timestep_batches, local_ts) dist.all_gather(loss_batches, local_losses) timesteps = [ x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs] ] losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]] self.update_with_all_losses(timesteps, losses) @abstractmethod def update_with_all_losses(self, ts, losses): """ Update the reweighting using losses from a model. Sub-classes should override this method to update the reweighting using losses from the model. This method directly updates the reweighting without synchronizing between workers. It is called by update_with_local_losses from all ranks with identical arguments. Thus, it should have deterministic behavior to maintain state across workers. :param ts: a list of int timesteps. :param losses: a list of float losses, one per timestep. """ class LossSecondMomentResampler(LossAwareSampler): def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001): self.diffusion = diffusion self.history_per_term = history_per_term self.uniform_prob = uniform_prob self._loss_history = np.zeros( [diffusion.num_timesteps, history_per_term], dtype=np.float64 ) self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int) def weights(self): if not self._warmed_up(): return np.ones([self.diffusion.num_timesteps], dtype=np.float64) weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1)) weights /= np.sum(weights) weights *= 1 - self.uniform_prob weights += self.uniform_prob / len(weights) return weights def update_with_all_losses(self, ts, losses): for t, loss in zip(ts, losses): if self._loss_counts[t] == self.history_per_term: # Shift out the oldest loss term. self._loss_history[t, :-1] = self._loss_history[t, 1:] self._loss_history[t, -1] = loss else: self._loss_history[t, self._loss_counts[t]] = loss self._loss_counts[t] += 1 def _warmed_up(self): return (self._loss_counts == self.history_per_term).all()
6,013
38.827815
108
py
DiT
DiT-main/diffusion/gaussian_diffusion.py
# Modified from OpenAI's diffusion repos # GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py # ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion # IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py import math import numpy as np import torch as th import enum from .diffusion_utils import discretized_gaussian_log_likelihood, normal_kl def mean_flat(tensor): """ Take the mean over all non-batch dimensions. """ return tensor.mean(dim=list(range(1, len(tensor.shape)))) class ModelMeanType(enum.Enum): """ Which type of output the model predicts. """ PREVIOUS_X = enum.auto() # the model predicts x_{t-1} START_X = enum.auto() # the model predicts x_0 EPSILON = enum.auto() # the model predicts epsilon class ModelVarType(enum.Enum): """ What is used as the model's output variance. The LEARNED_RANGE option has been added to allow the model to predict values between FIXED_SMALL and FIXED_LARGE, making its job easier. """ LEARNED = enum.auto() FIXED_SMALL = enum.auto() FIXED_LARGE = enum.auto() LEARNED_RANGE = enum.auto() class LossType(enum.Enum): MSE = enum.auto() # use raw MSE loss (and KL when learning variances) RESCALED_MSE = ( enum.auto() ) # use raw MSE loss (with RESCALED_KL when learning variances) KL = enum.auto() # use the variational lower-bound RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB def is_vb(self): return self == LossType.KL or self == LossType.RESCALED_KL def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac): betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64) warmup_time = int(num_diffusion_timesteps * warmup_frac) betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64) return betas def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps): """ This is the deprecated API for creating beta schedules. See get_named_beta_schedule() for the new library of schedules. """ if beta_schedule == "quad": betas = ( np.linspace( beta_start ** 0.5, beta_end ** 0.5, num_diffusion_timesteps, dtype=np.float64, ) ** 2 ) elif beta_schedule == "linear": betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64) elif beta_schedule == "warmup10": betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1) elif beta_schedule == "warmup50": betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5) elif beta_schedule == "const": betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64) elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1 betas = 1.0 / np.linspace( num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64 ) else: raise NotImplementedError(beta_schedule) assert betas.shape == (num_diffusion_timesteps,) return betas def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): """ Get a pre-defined beta schedule for the given name. The beta schedule library consists of beta schedules which remain similar in the limit of num_diffusion_timesteps. Beta schedules may be added, but should not be removed or changed once they are committed to maintain backwards compatibility. """ if schedule_name == "linear": # Linear schedule from Ho et al, extended to work for any number of # diffusion steps. scale = 1000 / num_diffusion_timesteps return get_beta_schedule( "linear", beta_start=scale * 0.0001, beta_end=scale * 0.02, num_diffusion_timesteps=num_diffusion_timesteps, ) elif schedule_name == "squaredcos_cap_v2": return betas_for_alpha_bar( num_diffusion_timesteps, lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2, ) else: raise NotImplementedError(f"unknown beta schedule: {schedule_name}") def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t from 0 to 1 and produces the cumulative product of (1-beta) up to that part of the diffusion process. :param max_beta: the maximum beta to use; use values lower than 1 to prevent singularities. """ betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return np.array(betas) class GaussianDiffusion: """ Utilities for training and sampling diffusion models. Original ported from this codebase: https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42 :param betas: a 1-D numpy array of betas for each diffusion timestep, starting at T and going to 1. """ def __init__( self, *, betas, model_mean_type, model_var_type, loss_type ): self.model_mean_type = model_mean_type self.model_var_type = model_var_type self.loss_type = loss_type # Use float64 for accuracy. betas = np.array(betas, dtype=np.float64) self.betas = betas assert len(betas.shape) == 1, "betas must be 1-D" assert (betas > 0).all() and (betas <= 1).all() self.num_timesteps = int(betas.shape[0]) alphas = 1.0 - betas self.alphas_cumprod = np.cumprod(alphas, axis=0) self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1]) self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0) assert self.alphas_cumprod_prev.shape == (self.num_timesteps,) # calculations for diffusion q(x_t | x_{t-1}) and others self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod) self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod) self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod) self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod) self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1) # calculations for posterior q(x_{t-1} | x_t, x_0) self.posterior_variance = ( betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod) ) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.posterior_log_variance_clipped = np.log( np.append(self.posterior_variance[1], self.posterior_variance[1:]) ) if len(self.posterior_variance) > 1 else np.array([]) self.posterior_mean_coef1 = ( betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod) ) self.posterior_mean_coef2 = ( (1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod) ) def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def q_sample(self, x_start, t, noise=None): """ Diffuse the data for a given number of diffusion steps. In other words, sample from q(x_t | x_0). :param x_start: the initial data batch. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :param noise: if specified, the split-out normal noise. :return: A noisy version of x_start. """ if noise is None: noise = th.randn_like(x_start) assert noise.shape == x_start.shape return ( _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def q_posterior_mean_variance(self, x_start, x_t, t): """ Compute the mean and variance of the diffusion posterior: q(x_{t-1} | x_t, x_0) """ assert x_start.shape == x_t.shape posterior_mean = ( _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = _extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) assert ( posterior_mean.shape[0] == posterior_variance.shape[0] == posterior_log_variance_clipped.shape[0] == x_start.shape[0] ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None): """ Apply the model to get p(x_{t-1} | x_t), as well as a prediction of the initial x, x_0. :param model: the model, which takes a signal and a batch of timesteps as input. :param x: the [N x C x ...] tensor at time t. :param t: a 1-D Tensor of timesteps. :param clip_denoised: if True, clip the denoised signal into [-1, 1]. :param denoised_fn: if not None, a function which applies to the x_start prediction before it is used to sample. Applies before clip_denoised. :param model_kwargs: if not None, a dict of extra keyword arguments to pass to the model. This can be used for conditioning. :return: a dict with the following keys: - 'mean': the model mean output. - 'variance': the model variance output. - 'log_variance': the log of 'variance'. - 'pred_xstart': the prediction for x_0. """ if model_kwargs is None: model_kwargs = {} B, C = x.shape[:2] assert t.shape == (B,) model_output = model(x, t, **model_kwargs) if isinstance(model_output, tuple): model_output, extra = model_output else: extra = None if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]: assert model_output.shape == (B, C * 2, *x.shape[2:]) model_output, model_var_values = th.split(model_output, C, dim=1) min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape) max_log = _extract_into_tensor(np.log(self.betas), t, x.shape) # The model_var_values is [-1, 1] for [min_var, max_var]. frac = (model_var_values + 1) / 2 model_log_variance = frac * max_log + (1 - frac) * min_log model_variance = th.exp(model_log_variance) else: model_variance, model_log_variance = { # for fixedlarge, we set the initial (log-)variance like so # to get a better decoder log likelihood. ModelVarType.FIXED_LARGE: ( np.append(self.posterior_variance[1], self.betas[1:]), np.log(np.append(self.posterior_variance[1], self.betas[1:])), ), ModelVarType.FIXED_SMALL: ( self.posterior_variance, self.posterior_log_variance_clipped, ), }[self.model_var_type] model_variance = _extract_into_tensor(model_variance, t, x.shape) model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape) def process_xstart(x): if denoised_fn is not None: x = denoised_fn(x) if clip_denoised: return x.clamp(-1, 1) return x if self.model_mean_type == ModelMeanType.START_X: pred_xstart = process_xstart(model_output) else: pred_xstart = process_xstart( self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) ) model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t) assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape return { "mean": model_mean, "variance": model_variance, "log_variance": model_log_variance, "pred_xstart": pred_xstart, "extra": extra, } def _predict_xstart_from_eps(self, x_t, t, eps): assert x_t.shape == eps.shape return ( _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps ) def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None): """ Compute the mean for the previous step, given a function cond_fn that computes the gradient of a conditional log probability with respect to x. In particular, cond_fn computes grad(log(p(y|x))), and we want to condition on y. This uses the conditioning strategy from Sohl-Dickstein et al. (2015). """ gradient = cond_fn(x, t, **model_kwargs) new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float() return new_mean def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None): """ Compute what the p_mean_variance output would have been, should the model's score function be conditioned by cond_fn. See condition_mean() for details on cond_fn. Unlike condition_mean(), this instead uses the conditioning strategy from Song et al (2020). """ alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"]) eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs) out = p_mean_var.copy() out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps) out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t) return out def p_sample( self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, ): """ Sample x_{t-1} from the model at the given timestep. :param model: the model to sample from. :param x: the current tensor at x_{t-1}. :param t: the value of t, starting at 0 for the first diffusion step. :param clip_denoised: if True, clip the x_start prediction to [-1, 1]. :param denoised_fn: if not None, a function which applies to the x_start prediction before it is used to sample. :param cond_fn: if not None, this is a gradient function that acts similarly to the model. :param model_kwargs: if not None, a dict of extra keyword arguments to pass to the model. This can be used for conditioning. :return: a dict containing the following keys: - 'sample': a random sample from the model. - 'pred_xstart': a prediction of x_0. """ out = self.p_mean_variance( model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs, ) noise = th.randn_like(x) nonzero_mask = ( (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) ) # no noise when t == 0 if cond_fn is not None: out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs) sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise return {"sample": sample, "pred_xstart": out["pred_xstart"]} def p_sample_loop( self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, ): """ Generate samples from the model. :param model: the model module. :param shape: the shape of the samples, (N, C, H, W). :param noise: if specified, the noise from the encoder to sample. Should be of the same shape as `shape`. :param clip_denoised: if True, clip x_start predictions to [-1, 1]. :param denoised_fn: if not None, a function which applies to the x_start prediction before it is used to sample. :param cond_fn: if not None, this is a gradient function that acts similarly to the model. :param model_kwargs: if not None, a dict of extra keyword arguments to pass to the model. This can be used for conditioning. :param device: if specified, the device to create the samples on. If not specified, use a model parameter's device. :param progress: if True, show a tqdm progress bar. :return: a non-differentiable batch of samples. """ final = None for sample in self.p_sample_loop_progressive( model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress, ): final = sample return final["sample"] def p_sample_loop_progressive( self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, ): """ Generate samples from the model and yield intermediate samples from each timestep of diffusion. Arguments are the same as p_sample_loop(). Returns a generator over dicts, where each dict is the return value of p_sample(). """ if device is None: device = next(model.parameters()).device assert isinstance(shape, (tuple, list)) if noise is not None: img = noise else: img = th.randn(*shape, device=device) indices = list(range(self.num_timesteps))[::-1] if progress: # Lazy import so that we don't depend on tqdm. from tqdm.auto import tqdm indices = tqdm(indices) for i in indices: t = th.tensor([i] * shape[0], device=device) with th.no_grad(): out = self.p_sample( model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, ) yield out img = out["sample"] def ddim_sample( self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, eta=0.0, ): """ Sample x_{t-1} from the model using DDIM. Same usage as p_sample(). """ out = self.p_mean_variance( model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs, ) if cond_fn is not None: out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs) # Usually our model outputs epsilon, but we re-derive it # in case we used x_start or x_prev prediction. eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape) sigma = ( eta * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) * th.sqrt(1 - alpha_bar / alpha_bar_prev) ) # Equation 12. noise = th.randn_like(x) mean_pred = ( out["pred_xstart"] * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps ) nonzero_mask = ( (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) ) # no noise when t == 0 sample = mean_pred + nonzero_mask * sigma * noise return {"sample": sample, "pred_xstart": out["pred_xstart"]} def ddim_reverse_sample( self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, eta=0.0, ): """ Sample x_{t+1} from the model using DDIM reverse ODE. """ assert eta == 0.0, "Reverse ODE only for deterministic path" out = self.p_mean_variance( model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs, ) if cond_fn is not None: out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs) # Usually our model outputs epsilon, but we re-derive it # in case we used x_start or x_prev prediction. eps = ( _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x - out["pred_xstart"] ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape) alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape) # Equation 12. reversed mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]} def ddim_sample_loop( self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0, ): """ Generate samples from the model using DDIM. Same usage as p_sample_loop(). """ final = None for sample in self.ddim_sample_loop_progressive( model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress, eta=eta, ): final = sample return final["sample"] def ddim_sample_loop_progressive( self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0, ): """ Use DDIM to sample from the model and yield intermediate samples from each timestep of DDIM. Same usage as p_sample_loop_progressive(). """ if device is None: device = next(model.parameters()).device assert isinstance(shape, (tuple, list)) if noise is not None: img = noise else: img = th.randn(*shape, device=device) indices = list(range(self.num_timesteps))[::-1] if progress: # Lazy import so that we don't depend on tqdm. from tqdm.auto import tqdm indices = tqdm(indices) for i in indices: t = th.tensor([i] * shape[0], device=device) with th.no_grad(): out = self.ddim_sample( model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, eta=eta, ) yield out img = out["sample"] def _vb_terms_bpd( self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None ): """ Get a term for the variational lower-bound. The resulting units are bits (rather than nats, as one might expect). This allows for comparison to other papers. :return: a dict with the following keys: - 'output': a shape [N] tensor of NLLs or KLs. - 'pred_xstart': the x_0 predictions. """ true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance( x_start=x_start, x_t=x_t, t=t ) out = self.p_mean_variance( model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs ) kl = normal_kl( true_mean, true_log_variance_clipped, out["mean"], out["log_variance"] ) kl = mean_flat(kl) / np.log(2.0) decoder_nll = -discretized_gaussian_log_likelihood( x_start, means=out["mean"], log_scales=0.5 * out["log_variance"] ) assert decoder_nll.shape == x_start.shape decoder_nll = mean_flat(decoder_nll) / np.log(2.0) # At the first timestep return the decoder NLL, # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t)) output = th.where((t == 0), decoder_nll, kl) return {"output": output, "pred_xstart": out["pred_xstart"]} def training_losses(self, model, x_start, t, model_kwargs=None, noise=None): """ Compute training losses for a single timestep. :param model: the model to evaluate loss on. :param x_start: the [N x C x ...] tensor of inputs. :param t: a batch of timestep indices. :param model_kwargs: if not None, a dict of extra keyword arguments to pass to the model. This can be used for conditioning. :param noise: if specified, the specific Gaussian noise to try to remove. :return: a dict with the key "loss" containing a tensor of shape [N]. Some mean or variance settings may also have other keys. """ if model_kwargs is None: model_kwargs = {} if noise is None: noise = th.randn_like(x_start) x_t = self.q_sample(x_start, t, noise=noise) terms = {} if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL: terms["loss"] = self._vb_terms_bpd( model=model, x_start=x_start, x_t=x_t, t=t, clip_denoised=False, model_kwargs=model_kwargs, )["output"] if self.loss_type == LossType.RESCALED_KL: terms["loss"] *= self.num_timesteps elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE: model_output = model(x_t, t, **model_kwargs) if self.model_var_type in [ ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE, ]: B, C = x_t.shape[:2] assert model_output.shape == (B, C * 2, *x_t.shape[2:]) model_output, model_var_values = th.split(model_output, C, dim=1) # Learn the variance using the variational bound, but don't let # it affect our mean prediction. frozen_out = th.cat([model_output.detach(), model_var_values], dim=1) terms["vb"] = self._vb_terms_bpd( model=lambda *args, r=frozen_out: r, x_start=x_start, x_t=x_t, t=t, clip_denoised=False, )["output"] if self.loss_type == LossType.RESCALED_MSE: # Divide by 1000 for equivalence with initial implementation. # Without a factor of 1/1000, the VB term hurts the MSE term. terms["vb"] *= self.num_timesteps / 1000.0 target = { ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance( x_start=x_start, x_t=x_t, t=t )[0], ModelMeanType.START_X: x_start, ModelMeanType.EPSILON: noise, }[self.model_mean_type] assert model_output.shape == target.shape == x_start.shape terms["mse"] = mean_flat((target - model_output) ** 2) if "vb" in terms: terms["loss"] = terms["mse"] + terms["vb"] else: terms["loss"] = terms["mse"] else: raise NotImplementedError(self.loss_type) return terms def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 ) return mean_flat(kl_prior) / np.log(2.0) def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None): """ Compute the entire variational lower-bound, measured in bits-per-dim, as well as other related quantities. :param model: the model to evaluate loss on. :param x_start: the [N x C x ...] tensor of inputs. :param clip_denoised: if True, clip denoised samples. :param model_kwargs: if not None, a dict of extra keyword arguments to pass to the model. This can be used for conditioning. :return: a dict containing the following keys: - total_bpd: the total variational lower-bound, per batch element. - prior_bpd: the prior term in the lower-bound. - vb: an [N x T] tensor of terms in the lower-bound. - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep. - mse: an [N x T] tensor of epsilon MSEs for each timestep. """ device = x_start.device batch_size = x_start.shape[0] vb = [] xstart_mse = [] mse = [] for t in list(range(self.num_timesteps))[::-1]: t_batch = th.tensor([t] * batch_size, device=device) noise = th.randn_like(x_start) x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise) # Calculate VLB term at the current timestep with th.no_grad(): out = self._vb_terms_bpd( model, x_start=x_start, x_t=x_t, t=t_batch, clip_denoised=clip_denoised, model_kwargs=model_kwargs, ) vb.append(out["output"]) xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2)) eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"]) mse.append(mean_flat((eps - noise) ** 2)) vb = th.stack(vb, dim=1) xstart_mse = th.stack(xstart_mse, dim=1) mse = th.stack(mse, dim=1) prior_bpd = self._prior_bpd(x_start) total_bpd = vb.sum(dim=1) + prior_bpd return { "total_bpd": total_bpd, "prior_bpd": prior_bpd, "vb": vb, "xstart_mse": xstart_mse, "mse": mse, } def _extract_into_tensor(arr, timesteps, broadcast_shape): """ Extract values from a 1-D numpy array for a batch of indices. :param arr: the 1-D numpy array. :param timesteps: a tensor of indices into the array to extract. :param broadcast_shape: a larger shape of K dimensions with the batch dimension equal to the length of timesteps. :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims. """ res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float() while len(res.shape) < len(broadcast_shape): res = res[..., None] return res + th.zeros(broadcast_shape, device=timesteps.device)
34,326
38.275744
129
py
DiT
DiT-main/diffusion/__init__.py
# Modified from OpenAI's diffusion repos # GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py # ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion # IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py from . import gaussian_diffusion as gd from .respace import SpacedDiffusion, space_timesteps def create_diffusion( timestep_respacing, noise_schedule="linear", use_kl=False, sigma_small=False, predict_xstart=False, learn_sigma=True, rescale_learned_sigmas=False, diffusion_steps=1000 ): betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps) if use_kl: loss_type = gd.LossType.RESCALED_KL elif rescale_learned_sigmas: loss_type = gd.LossType.RESCALED_MSE else: loss_type = gd.LossType.MSE if timestep_respacing is None or timestep_respacing == "": timestep_respacing = [diffusion_steps] return SpacedDiffusion( use_timesteps=space_timesteps(diffusion_steps, timestep_respacing), betas=betas, model_mean_type=( gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X ), model_var_type=( ( gd.ModelVarType.FIXED_LARGE if not sigma_small else gd.ModelVarType.FIXED_SMALL ) if not learn_sigma else gd.ModelVarType.LEARNED_RANGE ), loss_type=loss_type # rescale_timesteps=rescale_timesteps, )
1,622
33.531915
108
py
DiT
DiT-main/diffusion/diffusion_utils.py
# Modified from OpenAI's diffusion repos # GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py # ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion # IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py import torch as th import numpy as np def normal_kl(mean1, logvar1, mean2, logvar2): """ Compute the KL divergence between two gaussians. Shapes are automatically broadcasted, so batches can be compared to scalars, among other use cases. """ tensor = None for obj in (mean1, logvar1, mean2, logvar2): if isinstance(obj, th.Tensor): tensor = obj break assert tensor is not None, "at least one argument must be a Tensor" # Force variances to be Tensors. Broadcasting helps convert scalars to # Tensors, but it does not work for th.exp(). logvar1, logvar2 = [ x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor) for x in (logvar1, logvar2) ] return 0.5 * ( -1.0 + logvar2 - logvar1 + th.exp(logvar1 - logvar2) + ((mean1 - mean2) ** 2) * th.exp(-logvar2) ) def approx_standard_normal_cdf(x): """ A fast approximation of the cumulative distribution function of the standard normal. """ return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3)))) def continuous_gaussian_log_likelihood(x, *, means, log_scales): """ Compute the log-likelihood of a continuous Gaussian distribution. :param x: the targets :param means: the Gaussian mean Tensor. :param log_scales: the Gaussian log stddev Tensor. :return: a tensor like x of log probabilities (in nats). """ centered_x = x - means inv_stdv = th.exp(-log_scales) normalized_x = centered_x * inv_stdv log_probs = th.distributions.Normal(th.zeros_like(x), th.ones_like(x)).log_prob(normalized_x) return log_probs def discretized_gaussian_log_likelihood(x, *, means, log_scales): """ Compute the log-likelihood of a Gaussian distribution discretizing to a given image. :param x: the target images. It is assumed that this was uint8 values, rescaled to the range [-1, 1]. :param means: the Gaussian mean Tensor. :param log_scales: the Gaussian log stddev Tensor. :return: a tensor like x of log probabilities (in nats). """ assert x.shape == means.shape == log_scales.shape centered_x = x - means inv_stdv = th.exp(-log_scales) plus_in = inv_stdv * (centered_x + 1.0 / 255.0) cdf_plus = approx_standard_normal_cdf(plus_in) min_in = inv_stdv * (centered_x - 1.0 / 255.0) cdf_min = approx_standard_normal_cdf(min_in) log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12)) log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12)) cdf_delta = cdf_plus - cdf_min log_probs = th.where( x < -0.999, log_cdf_plus, th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))), ) assert log_probs.shape == x.shape return log_probs
3,189
34.842697
108
py
DiT
DiT-main/diffusion/respace.py
# Modified from OpenAI's diffusion repos # GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py # ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion # IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py import numpy as np import torch as th from .gaussian_diffusion import GaussianDiffusion def space_timesteps(num_timesteps, section_counts): """ Create a list of timesteps to use from an original diffusion process, given the number of timesteps we want to take from equally-sized portions of the original process. For example, if there's 300 timesteps and the section counts are [10,15,20] then the first 100 timesteps are strided to be 10 timesteps, the second 100 are strided to be 15 timesteps, and the final 100 are strided to be 20. If the stride is a string starting with "ddim", then the fixed striding from the DDIM paper is used, and only one section is allowed. :param num_timesteps: the number of diffusion steps in the original process to divide up. :param section_counts: either a list of numbers, or a string containing comma-separated numbers, indicating the step count per section. As a special case, use "ddimN" where N is a number of steps to use the striding from the DDIM paper. :return: a set of diffusion steps from the original process to use. """ if isinstance(section_counts, str): if section_counts.startswith("ddim"): desired_count = int(section_counts[len("ddim") :]) for i in range(1, num_timesteps): if len(range(0, num_timesteps, i)) == desired_count: return set(range(0, num_timesteps, i)) raise ValueError( f"cannot create exactly {num_timesteps} steps with an integer stride" ) section_counts = [int(x) for x in section_counts.split(",")] size_per = num_timesteps // len(section_counts) extra = num_timesteps % len(section_counts) start_idx = 0 all_steps = [] for i, section_count in enumerate(section_counts): size = size_per + (1 if i < extra else 0) if size < section_count: raise ValueError( f"cannot divide section of {size} steps into {section_count}" ) if section_count <= 1: frac_stride = 1 else: frac_stride = (size - 1) / (section_count - 1) cur_idx = 0.0 taken_steps = [] for _ in range(section_count): taken_steps.append(start_idx + round(cur_idx)) cur_idx += frac_stride all_steps += taken_steps start_idx += size return set(all_steps) class SpacedDiffusion(GaussianDiffusion): """ A diffusion process which can skip steps in a base diffusion process. :param use_timesteps: a collection (sequence or set) of timesteps from the original diffusion process to retain. :param kwargs: the kwargs to create the base diffusion process. """ def __init__(self, use_timesteps, **kwargs): self.use_timesteps = set(use_timesteps) self.timestep_map = [] self.original_num_steps = len(kwargs["betas"]) base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa last_alpha_cumprod = 1.0 new_betas = [] for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod): if i in self.use_timesteps: new_betas.append(1 - alpha_cumprod / last_alpha_cumprod) last_alpha_cumprod = alpha_cumprod self.timestep_map.append(i) kwargs["betas"] = np.array(new_betas) super().__init__(**kwargs) def p_mean_variance( self, model, *args, **kwargs ): # pylint: disable=signature-differs return super().p_mean_variance(self._wrap_model(model), *args, **kwargs) def training_losses( self, model, *args, **kwargs ): # pylint: disable=signature-differs return super().training_losses(self._wrap_model(model), *args, **kwargs) def condition_mean(self, cond_fn, *args, **kwargs): return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs) def condition_score(self, cond_fn, *args, **kwargs): return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs) def _wrap_model(self, model): if isinstance(model, _WrappedModel): return model return _WrappedModel( model, self.timestep_map, self.original_num_steps ) def _scale_timesteps(self, t): # Scaling is done by the wrapped model. return t class _WrappedModel: def __init__(self, model, timestep_map, original_num_steps): self.model = model self.timestep_map = timestep_map # self.rescale_timesteps = rescale_timesteps self.original_num_steps = original_num_steps def __call__(self, x, ts, **kwargs): map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype) new_ts = map_tensor[ts] # if self.rescale_timesteps: # new_ts = new_ts.float() * (1000.0 / self.original_num_steps) return self.model(x, new_ts, **kwargs)
5,485
41.2
108
py
FATE
FATE-master/examples/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
661
35.777778
75
py
FATE
FATE-master/examples/pipeline/__init__.py
0
0
0
py
FATE
FATE-master/examples/pipeline/hetero_pearson/pipeline_hetero_pearson_sole.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform, HeteroPearson, Intersection, Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): common_param = dict(column_indexes=-1, cross_parties=False) pipeline = run_pearson_pipeline( config=config, namespace=namespace, data=dataset.breast, common_param=common_param, ) print(pipeline.get_component("hetero_pearson_0").get_model_param()) print(pipeline.get_component("hetero_pearson_0").get_summary()) def run_pearson_pipeline( config, namespace, data, common_param=None, guest_only_param=None, host_only_param=None, ): if isinstance(config, str): config = load_job_config(config) guest_data = data["guest"] host_data = data["host"][0] guest_data["namespace"] = f"{guest_data['namespace']}{namespace}" host_data["namespace"] = f"{host_data['namespace']}{namespace}" pipeline = ( PipeLine() .set_initiator(role="guest", party_id=config.parties.guest[0]) .set_roles(guest=config.parties.guest[0], host=config.parties.host[0]) ) reader_0 = Reader(name="reader_0") reader_0.get_party_instance( role="guest", party_id=config.parties.guest[0] ).component_param(table=guest_data) reader_0.get_party_instance( role="host", party_id=config.parties.host[0] ).component_param(table=host_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance( role="guest", party_id=config.parties.guest[0] ).component_param(with_label=True, output_format="dense") data_transform_0.get_party_instance( role="host", party_id=config.parties.host[0] ).component_param(with_label=False) intersect_0 = Intersection(name="intersection_0") if common_param is None: common_param = {} hetero_pearson_component = HeteroPearson(name="hetero_pearson_0", **common_param) if guest_only_param: hetero_pearson_component.get_party_instance( "guest", config.parties.guest[0] ).component_param(**guest_only_param) if host_only_param: hetero_pearson_component.get_party_instance( "host", config.parties.host[0] ).component_param(**host_only_param) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data)) pipeline.add_component( hetero_pearson_component, data=Data(train_data=intersect_0.output.data) ) pipeline.compile() pipeline.fit() return pipeline class dataset_meta(type): @property def breast(cls): return { "guest": {"name": "breast_hetero_guest", "namespace": "experiment"}, "host": [{"name": "breast_hetero_host", "namespace": "experiment"}], } class dataset(metaclass=dataset_meta): ... if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
4,004
31.560976
85
py
FATE
FATE-master/examples/pipeline/hetero_pearson/pipeline_hetero_pearson_mix_rand.py
# Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform, HeteroPearson, Intersection, Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): common_param = dict(column_indexes=-1, use_mix_rand=True) pipeline = run_pearson_pipeline( config=config, namespace=namespace, data=dataset.breast, common_param=common_param, ) print(pipeline.get_component("hetero_pearson_0").get_model_param()) print(pipeline.get_component("hetero_pearson_0").get_summary()) def run_pearson_pipeline( config, namespace, data, common_param=None, guest_only_param=None, host_only_param=None, ): if isinstance(config, str): config = load_job_config(config) guest_data = data["guest"] host_data = data["host"][0] guest_data["namespace"] = f"{guest_data['namespace']}{namespace}" host_data["namespace"] = f"{host_data['namespace']}{namespace}" pipeline = ( PipeLine() .set_initiator(role="guest", party_id=config.parties.guest[0]) .set_roles(guest=config.parties.guest[0], host=config.parties.host[0]) ) reader_0 = Reader(name="reader_0") reader_0.get_party_instance( role="guest", party_id=config.parties.guest[0] ).component_param(table=guest_data) reader_0.get_party_instance( role="host", party_id=config.parties.host[0] ).component_param(table=host_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance( role="guest", party_id=config.parties.guest[0] ).component_param(with_label=True, output_format="dense") data_transform_0.get_party_instance( role="host", party_id=config.parties.host[0] ).component_param(with_label=False) intersect_0 = Intersection(name="intersection_0") if common_param is None: common_param = {} hetero_pearson_component = HeteroPearson(name="hetero_pearson_0", **common_param) if guest_only_param: hetero_pearson_component.get_party_instance( "guest", config.parties.guest[0] ).component_param(**guest_only_param) if host_only_param: hetero_pearson_component.get_party_instance( "host", config.parties.host[0] ).component_param(**host_only_param) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data)) pipeline.add_component( hetero_pearson_component, data=Data(train_data=intersect_0.output.data) ) pipeline.compile() pipeline.fit() return pipeline class dataset_meta(type): @property def breast(cls): return { "guest": {"name": "breast_hetero_guest", "namespace": "experiment"}, "host": [{"name": "breast_hetero_host", "namespace": "experiment"}], } class dataset(metaclass=dataset_meta): ... if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
4,000
31.795082
85
py
FATE
FATE-master/examples/pipeline/hetero_pearson/pipeline_hetero_pearson.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform, HeteroPearson, Intersection, Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): common_param = dict(column_indexes=-1) pipeline = run_pearson_pipeline( config=config, namespace=namespace, data=dataset.breast, common_param=common_param, ) print(pipeline.get_component("hetero_pearson_0").get_model_param()) print(pipeline.get_component("hetero_pearson_0").get_summary()) def run_pearson_pipeline( config, namespace, data, common_param=None, guest_only_param=None, host_only_param=None, ): if isinstance(config, str): config = load_job_config(config) guest_data = data["guest"] host_data = data["host"][0] guest_data["namespace"] = f"{guest_data['namespace']}{namespace}" host_data["namespace"] = f"{host_data['namespace']}{namespace}" pipeline = ( PipeLine() .set_initiator(role="guest", party_id=config.parties.guest[0]) .set_roles(guest=config.parties.guest[0], host=config.parties.host[0]) ) reader_0 = Reader(name="reader_0") reader_0.get_party_instance( role="guest", party_id=config.parties.guest[0] ).component_param(table=guest_data) reader_0.get_party_instance( role="host", party_id=config.parties.host[0] ).component_param(table=host_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance( role="guest", party_id=config.parties.guest[0] ).component_param(with_label=True, output_format="dense") data_transform_0.get_party_instance( role="host", party_id=config.parties.host[0] ).component_param(with_label=False) intersect_0 = Intersection(name="intersection_0") if common_param is None: common_param = {} hetero_pearson_component = HeteroPearson(name="hetero_pearson_0", **common_param) if guest_only_param: hetero_pearson_component.get_party_instance( "guest", config.parties.guest[0] ).component_param(**guest_only_param) if host_only_param: hetero_pearson_component.get_party_instance( "host", config.parties.host[0] ).component_param(**host_only_param) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data)) pipeline.add_component( hetero_pearson_component, data=Data(train_data=intersect_0.output.data) ) pipeline.compile() pipeline.fit() return pipeline class dataset_meta(type): @property def breast(cls): return { "guest": {"name": "breast_hetero_guest", "namespace": "experiment"}, "host": [{"name": "breast_hetero_host", "namespace": "experiment"}], } class dataset(metaclass=dataset_meta): ... if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,983
31.390244
85
py
FATE
FATE-master/examples/pipeline/hetero_pearson/__init__.py
import os import sys additional_path = os.path.realpath("../") if additional_path not in sys.path: sys.path.append(additional_path)
137
18.714286
41
py
FATE
FATE-master/examples/pipeline/hetero_pearson/pipeline_hetero_pearson_host_only.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform, HeteroPearson, Intersection, Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): common_param = dict(column_indexes=-1, cross_parties=False) guest_only_param = dict(need_run=False) pipeline = run_pearson_pipeline( config=config, namespace=namespace, data=dataset.breast, common_param=common_param, guest_only_param=guest_only_param, ) def run_pearson_pipeline( config, namespace, data, common_param=None, guest_only_param=None, host_only_param=None, ): if isinstance(config, str): config = load_job_config(config) guest_data = data["guest"] host_data = data["host"][0] guest_data["namespace"] = f"{guest_data['namespace']}{namespace}" host_data["namespace"] = f"{host_data['namespace']}{namespace}" pipeline = ( PipeLine() .set_initiator(role="guest", party_id=config.parties.guest[0]) .set_roles(guest=config.parties.guest[0], host=config.parties.host[0]) ) reader_0 = Reader(name="reader_0") reader_0.get_party_instance( role="guest", party_id=config.parties.guest[0] ).component_param(table=guest_data) reader_0.get_party_instance( role="host", party_id=config.parties.host[0] ).component_param(table=host_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance( role="guest", party_id=config.parties.guest[0] ).component_param(with_label=True, output_format="dense") data_transform_0.get_party_instance( role="host", party_id=config.parties.host[0] ).component_param(with_label=False) intersect_0 = Intersection(name="intersection_0") if common_param is None: common_param = {} hetero_pearson_component = HeteroPearson(name="hetero_pearson_0", **common_param) if guest_only_param: hetero_pearson_component.get_party_instance( "guest", config.parties.guest[0] ).component_param(**guest_only_param) if host_only_param: hetero_pearson_component.get_party_instance( "host", config.parties.host[0] ).component_param(**host_only_param) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data)) pipeline.add_component( hetero_pearson_component, data=Data(train_data=intersect_0.output.data) ) pipeline.compile() pipeline.fit() return pipeline class dataset_meta(type): @property def breast(cls): return { "guest": {"name": "breast_hetero_guest", "namespace": "experiment"}, "host": [{"name": "breast_hetero_host", "namespace": "experiment"}], } class dataset(metaclass=dataset_meta): ... if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,951
31.130081
85
py
FATE
FATE-master/examples/pipeline/homo_feature_binning/pipeline-homo-recursive-binning.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import json from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component.homo_feature_binning import HomoFeatureBinning from pipeline.component.reader import Reader from pipeline.component.scale import FeatureScale from pipeline.interface.data import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_homo_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_homo_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) # define DataTransform components data_transform_0 = DataTransform( name="data_transform_0", with_label=True, output_format="dense") # start component numbering at 0 homo_binning_0 = HomoFeatureBinning(name='homo_binning_0', sample_bins=1000, method="recursive_query") # add components to pipeline, in order of task execution pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) # set data input sources of intersection components pipeline.add_component(homo_binning_0, data=Data(data=data_transform_0.output.data)) # compile pipeline once finished adding modules, this step will form conf and dsl files for running job pipeline.compile() # fit model pipeline.fit() # query component summary # print(json.dumps(pipeline.get_component("homo_binning_0").get_summary(), indent=4, ensure_ascii=False)) if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,257
36.448276
109
py
FATE
FATE-master/examples/pipeline/homo_feature_binning/pipeline-homo-virtual-summary-binning-predict.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import json from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import HomoFeatureBinning from pipeline.component import Reader from pipeline.component import FeatureScale from pipeline.interface import Data from pipeline.interface import Model from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_homo_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_homo_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) # define DataTransform components data_transform_0 = DataTransform( name="data_transform_0", with_label=True, output_format="dense") # start component numbering at 0 homo_binning_0 = HomoFeatureBinning(name='homo_binning_0', sample_bins=1000) homo_binning_1 = HomoFeatureBinning(name='homo_binning_1', sample_bins=1000) # add components to pipeline, in order of task execution pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) # set data input sources of intersection components pipeline.add_component(homo_binning_0, data=Data(data=data_transform_0.output.data)) pipeline.add_component(homo_binning_1, data=Data(data=data_transform_0.output.data), model=Model(model=homo_binning_0.output.model)) # compile pipeline once finished adding modules, this step will form conf and dsl files for running job pipeline.compile() # fit model pipeline.fit() # query component summary # print(json.dumps(pipeline.get_component("homo_binning_0").get_summary(), indent=4, ensure_ascii=False)) if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,473
37.6
109
py
FATE
FATE-master/examples/pipeline/homo_feature_binning/pipeline-homo-recursive-binning-select-cols.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import json from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import HomoFeatureBinning from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_homo_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_homo_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) # define DataTransform components data_transform_0 = DataTransform( name="data_transform_0", with_label=True, output_format="dense") # start component numbering at 0 homo_binning_0 = HomoFeatureBinning(name='homo_binning_0', sample_bins=1000, method="recursive_query", bin_indexes=[0, 2, 4, 6]) # add components to pipeline, in order of task execution pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) # set data input sources of intersection components pipeline.add_component(homo_binning_0, data=Data(data=data_transform_0.output.data)) # compile pipeline once finished adding modules, this step will form conf and dsl files for running job pipeline.compile() # fit model pipeline.fit() # query component summary # print(json.dumps(pipeline.get_component("homo_binning_0").get_summary(), indent=4, ensure_ascii=False)) if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,240
36.252874
109
py
FATE
FATE-master/examples/pipeline/homo_feature_binning/__init__.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
614
40
75
py
FATE
FATE-master/examples/pipeline/homo_feature_binning/pipeline-homo-virtual-summary-binning-select-cols.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import json from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import HomoFeatureBinning from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_homo_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_homo_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) # define DataTransform components data_transform_0 = DataTransform( name="data_transform_0", with_label=True, output_format="dense") # start component numbering at 0 homo_binning_0 = HomoFeatureBinning(name='homo_binning_0', sample_bins=1000, bin_indexes=[0, 2, 4, 6], bin_names=['x1']) # add components to pipeline, in order of task execution pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) # set data input sources of intersection components pipeline.add_component(homo_binning_0, data=Data(data=data_transform_0.output.data)) # compile pipeline once finished adding modules, this step will form conf and dsl files for running job pipeline.compile() # fit model pipeline.fit() # query component summary # print(json.dumps(pipeline.get_component("homo_binning_0").get_summary(), indent=4, ensure_ascii=False)) if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,232
36.16092
109
py
FATE
FATE-master/examples/pipeline/homo_feature_binning/pipeline-homo-recursive-binning-predict.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import json from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import HomoFeatureBinning from pipeline.component import Reader from pipeline.interface import Data from pipeline.interface import Model from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_homo_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_homo_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) # define DataTransform components data_transform_0 = DataTransform( name="data_transform_0", with_label=True, output_format="dense") # start component numbering at 0 homo_binning_0 = HomoFeatureBinning(name='homo_binning_0', sample_bins=1000, method="recursive_query") homo_binning_1 = HomoFeatureBinning(name='homo_binning_1', sample_bins=1000) # add components to pipeline, in order of task execution pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) # set data input sources of intersection components pipeline.add_component(homo_binning_0, data=Data(data=data_transform_0.output.data)) pipeline.add_component(homo_binning_1, data=Data(data=data_transform_0.output.data), model=Model(model=homo_binning_0.output.model)) # compile pipeline once finished adding modules, this step will form conf and dsl files for running job pipeline.compile() # fit model pipeline.fit() # query component summary # print(json.dumps(pipeline.get_component("homo_binning_0").get_summary(), indent=4, ensure_ascii=False)) if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,456
37.411111
109
py
FATE
FATE-master/examples/pipeline/homo_feature_binning/pipeline-homo-virtual-summary-binning.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import HomoFeatureBinning from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_homo_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_homo_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) # define DataTransform components data_transform_0 = DataTransform( name="data_transform_0", with_label=True, output_format="dense") # start component numbering at 0 homo_binning_0 = HomoFeatureBinning(name='homo_binning_0', sample_bins=1000) # add components to pipeline, in order of task execution pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) # set data input sources of intersection components pipeline.add_component(homo_binning_0, data=Data(data=data_transform_0.output.data)) # compile pipeline once finished adding modules, this step will form conf and dsl files for running job pipeline.compile() # fit model pipeline.fit() # query component summary # print(json.dumps(pipeline.get_component("homo_binning_0").get_summary(), indent=4, ensure_ascii=False)) if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,136
35.905882
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-sparse-cv.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", output_format='sparse') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) lr_param = { "penalty": "L2", "optimizer": "nesterov_momentum_sgd", "tol": 0.0001, "alpha": 0.01, "max_iter": 10, "early_stop": "diff", "batch_size": -1, "learning_rate": 0.15, "init_param": { "init_method": "random_uniform" }, "cv_param": { "n_splits": 3, "shuffle": False, "random_seed": 103, "need_cv": True } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,758
34.8
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-batch-random-strategy.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", output_format='dense') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) lr_param = { "penalty": "L2", "optimizer": "rmsprop", "tol": 0.0001, "alpha": 0.01, "max_iter": 30, "early_stop": "diff", "batch_size": 320, "batch_strategy": "random", "learning_rate": 0.15, "init_param": { "init_method": "zeros" }, "sqn_param": { "update_interval_L": 3, "memory_M": 5, "sample_size": 5000, "random_seed": None }, "cv_param": { "n_splits": 5, "shuffle": False, "random_seed": 103, "need_cv": False }, "callback_param": { "callbacks": ["ModelCheckpoint"], "save_freq": 1 } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data)) evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary") pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
4,240
34.940678
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-no-intercept.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", output_format='dense') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) lr_param = { "penalty": "L2", "optimizer": "rmsprop", "tol": 0.0001, "alpha": 0.01, "max_iter": 30, "early_stop": "diff", "batch_size": 320, "learning_rate": 0.15, "init_param": { "init_method": "zeros", "fit_intercept": False }, "sqn_param": { "update_interval_L": 3, "memory_M": 5, "sample_size": 5000, "random_seed": None }, "cv_param": { "n_splits": 5, "shuffle": False, "random_seed": 103, "need_cv": False }, "callback_param": { "callbacks": ["ModelCheckpoint"], "save_freq": 1 } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data)) evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary") pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
4,239
35.239316
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-ovr-cv.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) # define DataTransform components data_transform_0 = DataTransform(name="data_transform_0", output_format='dense') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) lr_param = { "penalty": "L2", "optimizer": "nesterov_momentum_sgd", "tol": 0.0001, "alpha": 0.01, "max_iter": 10, "early_stop": "diff", "batch_size": -1, "learning_rate": 0.15, "init_param": { "init_method": "zeros" }, "cv_param": { "n_splits": 3, "shuffle": False, "random_seed": 103, "need_cv": True } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,799
35.190476
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-early-stop.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data, Model from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} guest_eval_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_eval_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", output_format='dense') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) reader_1 = Reader(name="reader_1") reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_eval_data) reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_eval_data) pipeline.add_component(reader_1) data_transform_1 = DataTransform(name="data_transform_1", output_format='dense') pipeline.add_component(data_transform_1, data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model)) # define Intersection components intersection_1 = Intersection(name="intersection_1") pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data)) lr_param = { "penalty": "L2", "optimizer": "rmsprop", "tol": 0.0001, "alpha": 0.01, "max_iter": 30, "early_stop": "diff", "batch_size": -1, "learning_rate": 0.15, "callback_param": { "callbacks": ["ModelCheckpoint", "EarlyStopping"], "validation_freqs": 1, "early_stopping_rounds": 1, "metrics": None, "use_first_metric_only": False, "save_freq": 1 }, "init_param": { "init_method": "zeros" }, "sqn_param": { "update_interval_L": 3, "memory_M": 5, "sample_size": 5000, "random_seed": None }, "cv_param": { "n_splits": 5, "shuffle": False, "random_seed": 103, "need_cv": False } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data, validate_data=intersection_1.output.data)) hetero_lr_1 = HeteroLR(name='hetero_lr_1') pipeline.add_component(hetero_lr_1, data=Data(test_data=intersection_1.output.data), model=Model(hetero_lr_0.output.model)) evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary") pipeline.add_component(evaluation_0, data=Data(data=[hetero_lr_0.output.data, hetero_lr_1.output.data])) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
5,666
38.354167
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-ovr-validate.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data, Model from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"} guest_eval_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"} host_eval_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) # define DataTransform components data_transform_0 = DataTransform(name="data_transform_0", output_format='dense') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) reader_1 = Reader(name="reader_1") reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_eval_data) reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_eval_data) pipeline.add_component(reader_1) data_transform_1 = DataTransform(name="data_transform_1", output_format='dense') pipeline.add_component(data_transform_1, data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model)) # define Intersection components intersection_1 = Intersection(name="intersection_1") pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data)) lr_param = { "penalty": "L2", "optimizer": "nesterov_momentum_sgd", "tol": 0.0001, "alpha": 0.01, "max_iter": 10, "early_stop": "diff", "batch_size": -1, "learning_rate": 0.15, "callback_param": { "callbacks": ["EarlyStopping"], "validation_freqs": 1, "early_stopping_rounds": 3 }, "init_param": { "init_method": "zeros" }, "cv_param": { "n_splits": 3, "shuffle": False, "random_seed": 103, "need_cv": False } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data, validate_data=intersection_1.output.data)) hetero_lr_1 = HeteroLR(name='hetero_lr_1') pipeline.add_component(hetero_lr_1, data=Data(test_data=intersection_1.output.data), model=Model(hetero_lr_0.output.model)) evaluation_0 = Evaluation(name="evaluation_0", eval_type="multi") pipeline.add_component(evaluation_0, data=Data(data=[hetero_lr_0.output.data, hetero_lr_1.output.data])) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
5,465
38.608696
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-sample-weights.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import json from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import FeatureScale from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.component import SampleWeight from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) # define DataTransform components data_transform_0 = DataTransform( name="data_transform_0", with_label=True, output_format="dense") # start component numbering at 0 data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False) intersect_0 = Intersection(name='intersect_0') scale_0 = FeatureScale(name='scale_0', need_run=False) sample_weight_0 = SampleWeight(name="sample_weight_0", class_weight={"0": 1, "1": 2}) sample_weight_0.get_party_instance(role="host", party_id=host).component_param(need_run=False) param = { "penalty": None, "optimizer": "sgd", "tol": 1e-05, "alpha": 0.01, "max_iter": 3, "early_stop": "diff", "batch_size": 320, "learning_rate": 0.15, "decay": 0, "decay_sqrt": True, "init_param": { "init_method": "ones" }, "cv_param": { "n_splits": 5, "shuffle": True, "random_seed": 33, "need_cv": False } } hetero_lr_0 = HeteroLR(name='hetero_lr_0', **param) evaluation_0 = Evaluation(name='evaluation_0') # add components to pipeline, in order of task execution pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data)) # set data input sources of intersection components pipeline.add_component(scale_0, data=Data(data=intersect_0.output.data)) pipeline.add_component(sample_weight_0, data=Data(data=scale_0.output.data)) pipeline.add_component(hetero_lr_0, data=Data(train_data=sample_weight_0.output.data)) pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data)) # compile pipeline once finished adding modules, this step will form conf and dsl files for running job pipeline.compile() # fit model pipeline.fit() # query component summary print(json.dumps(pipeline.get_component("evaluation_0").get_summary(), indent=4, ensure_ascii=False)) if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
4,562
36.401639
107
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-cv.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", output_format='dense') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) lr_param = { "penalty": "L2", "optimizer": "rmsprop", "tol": 0.0001, "alpha": 0.01, "max_iter": 10, "early_stop": "diff", "batch_size": -1, "learning_rate": 0.15, "init_param": { "init_method": "zeros" }, "cv_param": { "n_splits": 3, "shuffle": False, "random_seed": 103, "need_cv": True } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,713
35.058252
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-feature-engineering.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import FeatureScale from pipeline.component import HeteroFeatureBinning from pipeline.component import HeteroFeatureSelection from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import OneHotEncoder from pipeline.component import Reader from pipeline.interface import Data from pipeline.interface import Model from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) # define DataTransform components data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0 # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense") # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) feature_scale_0 = FeatureScale(name='feature_scale_0', method="standard_scale", need_run=True) pipeline.add_component(feature_scale_0, data=Data(data=intersection_0.output.data)) binning_param = { "method": "quantile", "compress_thres": 10000, "head_size": 10000, "error": 0.001, "bin_num": 10, "bin_indexes": -1, "adjustment_factor": 0.5, "local_only": False, "need_run": True, "transform_param": { "transform_cols": -1, "transform_type": "bin_num" } } hetero_feature_binning_0 = HeteroFeatureBinning(name='hetero_feature_binning_0', **binning_param) pipeline.add_component(hetero_feature_binning_0, data=Data(data=feature_scale_0.output.data)) selection_param = { "select_col_indexes": -1, "filter_methods": [ "manually", "iv_value_thres", "iv_percentile" ], "manually_param": { "filter_out_indexes": None }, "iv_value_param": { "value_threshold": 1.0 }, "iv_percentile_param": { "percentile_threshold": 0.9 }, "need_run": True } hetero_feature_selection_0 = HeteroFeatureSelection(name='hetero_feature_selection_0', **selection_param) pipeline.add_component(hetero_feature_selection_0, data=Data(data=hetero_feature_binning_0.output.data), model=Model(isometric_model=[hetero_feature_binning_0.output.model])) onehot_param = { "transform_col_indexes": -1, "transform_col_names": None, "need_run": True } one_hot_encoder_0 = OneHotEncoder(name='one_hot_encoder_0', **onehot_param) pipeline.add_component(one_hot_encoder_0, data=Data(data=hetero_feature_selection_0.output.data)) lr_param = { "penalty": "L2", "optimizer": "rmsprop", "tol": 1e-05, "alpha": 0.01, "max_iter": 10, "early_stop": "diff", "batch_size": -1, "learning_rate": 0.15, "init_param": { "init_method": "random_uniform" }, "cv_param": { "n_splits": 5, "shuffle": False, "random_seed": 103, "need_cv": False } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=one_hot_encoder_0.output.data)) evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary") pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
6,244
36.172619
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-warm-start.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import json from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data from pipeline.interface import Model from pipeline.utils.tools import load_job_config def prettify(response, verbose=True): if verbose: print(json.dumps(response, indent=4, ensure_ascii=False)) print() return response def main(config="../../config.yaml", namespace=""): if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] hosts = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} # guest_train_data = {"name": "default_credit_hetero_guest", "namespace": f"experiment{namespace}"} # host_train_data = {"name": "default_credit_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=hosts, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=hosts).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", output_format='dense') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=hosts).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) lr_param = { "penalty": "L2", "optimizer": "sgd", "tol": 0.0001, "alpha": 0.01, "early_stop": "diff", "batch_size": -1, "learning_rate": 0.15, "init_param": { "init_method": "zeros", "fit_intercept": True }, "encrypt_param": { "key_length": 1024 }, "callback_param": { "callbacks": ["ModelCheckpoint"], "validation_freqs": 1, "early_stopping_rounds": 1, "metrics": None, "use_first_metric_only": False, "save_freq": 1 } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", max_iter=5, **lr_param) hetero_lr_1 = HeteroLR(name="hetero_lr_1", max_iter=30, **lr_param) hetero_lr_2 = HeteroLR(name="hetero_lr_2", max_iter=30, **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data)) pipeline.add_component(hetero_lr_1, data=Data(train_data=intersection_0.output.data), model=Model(model=hetero_lr_0.output.model)) pipeline.add_component(hetero_lr_2, data=Data(train_data=intersection_0.output.data)) evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary") pipeline.add_component(evaluation_0, data=Data(data=[hetero_lr_1.output.data, hetero_lr_2.output.data])) pipeline.compile() # fit model pipeline.fit() # query component summary # prettify(pipeline.get_component("hetero_lr_0").get_summary()) # prettify(pipeline.get_component("hetero_lr_1").get_summary()) # prettify(pipeline.get_component("evaluation_0").get_summary()) if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
5,260
36.848921
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-multi-host.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] hosts = parties.host arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=hosts, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=hosts).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", output_format='dense') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=hosts).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) lr_param = { "penalty": "L2", "optimizer": "nesterov_momentum_sgd", "tol": 0.0001, "alpha": 0.01, "max_iter": 30, "early_stop": "weight_diff", "batch_size": -1, "learning_rate": 0.15, "init_param": { "init_method": "zeros" }, "sqn_param": { "update_interval_L": 3, "memory_M": 5, "sample_size": 5000, "random_seed": None }, "cv_param": { "n_splits": 5, "shuffle": False, "random_seed": 103, "need_cv": False } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data)) evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary") pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
4,114
35.096491
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-validate.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data, Model from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} guest_eval_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_eval_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", output_format='dense') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) reader_1 = Reader(name="reader_1") reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_eval_data) reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_eval_data) pipeline.add_component(reader_1) data_transform_1 = DataTransform(name="data_transform_1", output_format='dense') pipeline.add_component(data_transform_1, data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model)) # define Intersection components intersection_1 = Intersection(name="intersection_1") pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data)) lr_param = { "penalty": "L2", "optimizer": "rmsprop", "tol": 0.0001, "alpha": 0.01, "max_iter": 30, "callback_param": { "callbacks": ["EarlyStopping"], "validation_freqs": 3, "early_stopping_rounds": 3 }, "early_stop": "diff", "batch_size": -1, "learning_rate": 0.15, "init_param": { "init_method": "zeros", "fit_intercept": True }, "encrypt_param": { "key_length": 2048 }, "cv_param": { "n_splits": 5, "shuffle": False, "random_seed": 103, "need_cv": False } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data, validate_data=intersection_1.output.data)) hetero_lr_1 = HeteroLR(name='hetero_lr_1') pipeline.add_component(hetero_lr_1, data=Data(test_data=intersection_1.output.data), model=Model(hetero_lr_0.output.model)) evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary") pipeline.add_component(evaluation_0, data=Data(data=[hetero_lr_0.output.data, hetero_lr_1.output.data])) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
5,489
38.214286
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-sparse.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", output_format='sparse') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) lr_param = { "penalty": "L2", "optimizer": "rmsprop", "tol": 0.0001, "alpha": 0.01, "max_iter": 30, "early_stop": "diff", "batch_size": -1, "learning_rate": 0.15, "init_param": { "init_method": "zeros" }, "sqn_param": { "update_interval_L": 3, "memory_M": 5, "sample_size": 5000, "random_seed": None }, "cv_param": { "n_splits": 5, "shuffle": False, "random_seed": 103, "need_cv": False } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data)) evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary") pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
4,093
34.912281
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-one-vs-all.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) # define DataTransform components data_transform_0 = DataTransform(name="data_transform_0", output_format='dense') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) lr_param = { "penalty": "L2", "optimizer": "nesterov_momentum_sgd", "tol": 1e-05, "alpha": 0.0001, "max_iter": 1, "early_stop": "diff", "multi_class": "ovr", "batch_size": -1, "learning_rate": 0.15, "init_param": { "init_method": "zeros" } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data)) evaluation_0 = Evaluation(name="evaluation_0", eval_type="multi") pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,874
36.259615
109
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/__init__.py
0
0
0
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/generated_testsuite.py
import json import os import sys cur_path = os.path.realpath(__file__) for i in range(4): cur_path = os.path.dirname(cur_path) print(f'fate_path: {cur_path}') sys.path.append(cur_path) cur_dir = os.path.abspath(os.path.dirname(__file__)) def insert_extract_code(file_path, fold_name): f_str = open(cur_dir + '/' + file_path, 'r').read() code = \ """ from examples.pipeline.{}.generated_testsuite import extract extract(pipeline, __file__) """.format(fold_name) f_str = f_str.replace('pipeline.fit(work_mode=work_mode)', '# pipeline.fit(work_mode=work_mode)\n' + code) f_str = f_str.replace('common_tools.prettify(pipeline.get_component("hetero_lr_0").get_summary())', '') f_str = f_str.replace('common_tools.prettify(pipeline.get_component("evaluation_0").get_summary())', '') f_str = f_str.replace('for i in range(4):', 'for i in range(5):') return f_str def extract(my_pipeline, file_name, output_path='generated_conf_and_dsl'): out_name = file_name.split('/')[-1] out_name = out_name.replace('pipeline-', '').replace('.py', '').replace('-', '_') conf = my_pipeline.get_train_conf() dsl = my_pipeline.get_train_dsl() conf_name = './{}/{}_conf.json'.format(output_path, out_name) dsl_name = './{}/{}_dsl.json'.format(output_path, out_name) json.dump(conf, open(conf_name, 'w'), indent=4) print('conf name is {}'.format(conf_name)) json.dump(dsl, open(dsl_name, 'w'), indent=4) print('dsl name is {}'.format(dsl_name)) def get_testsuite_file(testsuite_file_path): import examples cpn_path = os.path.dirname(examples.__file__) + f'/dsl/v1/{testsuite_file_path}' with open(cpn_path, 'r', encoding='utf-8') as load_f: testsuite_json = json.load(load_f) testsuite_json['tasks'] = {} return testsuite_json def do_generated(fold_name='hetero_logistic_regression'): folder = '.' files = os.listdir(".") cmd = 'python {}' replaced_path = 'replaced_code' generated_path = 'generated_conf_and_dsl' if not os.path.exists('./{}'.format(replaced_path)): os.system('mkdir {}'.format(replaced_path)) if not os.path.exists('./{}'.format(generated_path)): os.system('mkdir {}'.format(generated_path)) for f in files: if not f.startswith("pipeline"): continue print(f) code_str = insert_extract_code(f, fold_name) open('./{}/{}'.format(replaced_path, f), 'w').write(code_str) print('replace done') # file_path = folder + f # os.system(cmd.format(folder + f)) exe_files = os.listdir('./{}/'.format(replaced_path)) for f in exe_files: print('executing {}'.format(f)) os.system(cmd.format('./{}/'.format(replaced_path) + f)) suite_json = get_testsuite_file('hetero_logistic_regression/hetero_lr_testsuite.json') conf_files = os.listdir('./{}/'.format(generated_path)) f_dsl = {"-".join(f.split('_')[2: -1]): f for f in conf_files if 'dsl.json' in f} f_conf = {"-".join(f.split('_')[2: -1]): f for f in conf_files if 'conf.json' in f} for task_type, dsl_file in f_dsl.items(): conf_file = f_conf[task_type] suite_json['tasks'][task_type] = { "conf": conf_file, "dsl": dsl_file } with open('./{}/{}_testsuite.json'.format(generated_path, fold_name), 'w', encoding='utf-8') as json_file: json.dump(suite_json, json_file, ensure_ascii=False, indent=4) # os.system('rm -rf {}'.format(replaced_path)) from sklearn.metrics import fowlkes_mallows_score if __name__ == '__main__': do_generated() # pass
3,755
34.433962
110
py
FATE
FATE-master/examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-normal.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", output_format='dense') # get DataTransform party instance of guest data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest) # configure DataTransform for guest data_transform_0_guest_party_instance.component_param(with_label=True) # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) lr_param = { "penalty": "L2", "optimizer": "rmsprop", "tol": 0.0001, "alpha": 0.01, "max_iter": 30, "early_stop": "diff", "batch_size": 320, "learning_rate": 0.15, "init_param": { "init_method": "zeros" }, "sqn_param": { "update_interval_L": 3, "memory_M": 5, "sample_size": 5000, "random_seed": None }, "cv_param": { "n_splits": 5, "shuffle": False, "random_seed": 103, "need_cv": False }, "callback_param": { "callbacks": ["ModelCheckpoint"], "save_freq": 1 } } hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param) pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data)) evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary") pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
4,205
34.644068
109
py
FATE
FATE-master/examples/pipeline/data_transform/pipeline-data-transform-svmlight.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Reader from pipeline.interface import Data from pipeline.interface import Model from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "svmlight_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "svmlight_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", input_format="sparse") data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True) data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) data_transform_1 = DataTransform(name="data_transform_1") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(data_transform_1, data=Data(data=reader_0.output.data), model=Model(model=data_transform_0.output.model)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
2,607
36.257143
103
py
FATE
FATE-master/examples/pipeline/data_transform/pipeline-data-transform-missing-fill.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "ionosphere_scale_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "ionosphere_scale_hetero_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True, label_name="LABEL", missing_fill=True, missing_fill_method="mean", outlier_replace=True) data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False, missing_fill=True, missing_fill_method="designated", default_value=0, outlier_replace=False) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,216
43.068493
117
py
FATE
FATE-master/examples/pipeline/data_transform/pipeline-data-transform-tag-value-match-id.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Reader from pipeline.interface import Data from pipeline.interface import Model from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "tag_value_1000_140", "namespace": f"experiment{namespace}"} host_train_data = {"name": "tag_value_1000_140", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) reader_1 = Reader(name="reader_1") reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", input_format="tag", tag_with_value=True, with_match_id=True) data_transform_1 = DataTransform(name="data_transform_1") pipeline.add_component(reader_0) pipeline.add_component(reader_1) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(data_transform_1, data=Data(data=reader_1.output.data), model=Model(model=data_transform_0.output.model)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
2,763
36.863014
103
py
FATE
FATE-master/examples/pipeline/data_transform/pipeline-data-transform-tag-value.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "tag_value_1000_140", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True) data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False, input_format="tag", tag_with_value=True) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
2,543
36.970149
104
py
FATE
FATE-master/examples/pipeline/data_transform/__init__.py
0
0
0
py
FATE
FATE-master/examples/pipeline/data_transform/pipeline-data-transform-dense.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Reader from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True) data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
2,334
34.923077
103
py
FATE
FATE-master/examples/pipeline/data_transform/pipeline-data-transform-dense-match-id.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Reader from pipeline.interface import Data from pipeline.interface import Model from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) reader_1 = Reader(name="reader_1") reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_match_id=True) data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_match_id=True) data_transform_1 = DataTransform(name="data_transform_1") pipeline.add_component(reader_0) pipeline.add_component(reader_1) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(data_transform_1, data=Data(data=reader_1.output.data), model=Model(model=data_transform_0.output.model)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
2,877
37.373333
105
py
FATE
FATE-master/examples/pipeline/homo_nn/pipeline_homo_nn_train_binary.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse # torch import torch as t from torch import nn from pipeline import fate_torch_hook # pipeline from pipeline.backend.pipeline import PipeLine from pipeline.component import Reader, DataTransform, HomoNN, Evaluation from pipeline.component.nn import TrainerParam from pipeline.interface import Data from pipeline.utils.tools import load_job_config fate_torch_hook(t) def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter) train_data_0 = {"name": "breast_homo_guest", "namespace": "experiment"} train_data_1 = {"name": "breast_homo_host", "namespace": "experiment"} reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=train_data_0) reader_0.get_party_instance(role='host', party_id=host).component_param(table=train_data_1) data_transform_0 = DataTransform(name='data_transform_0') data_transform_0.get_party_instance( role='guest', party_id=guest).component_param( with_label=True, output_format="dense") data_transform_0.get_party_instance( role='host', party_id=host).component_param( with_label=True, output_format="dense") model = nn.Sequential( nn.Linear(30, 1), nn.Sigmoid() ) loss = nn.BCELoss() optimizer = t.optim.Adam(model.parameters(), lr=0.01) nn_component = HomoNN(name='nn_0', model=model, loss=loss, optimizer=optimizer, trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=20, batch_size=128, validation_freqs=1), torch_seed=100 ) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(nn_component, data=Data(train_data=data_transform_0.output.data)) pipeline.add_component(Evaluation(name='eval_0'), data=Data(data=nn_component.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,330
34.817204
120
py
FATE
FATE-master/examples/pipeline/homo_nn/pipeline_homo_nn_train_regression.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse # torch import torch as t from torch import nn from pipeline import fate_torch_hook # pipeline from pipeline.backend.pipeline import PipeLine from pipeline.component import Reader, DataTransform, HomoNN, Evaluation from pipeline.component.nn import TrainerParam from pipeline.interface import Data from pipeline.utils.tools import load_job_config fate_torch_hook(t) def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter) train_data_0 = {"name": "student_homo_guest", "namespace": "experiment"} train_data_1 = {"name": "student_homo_host", "namespace": "experiment"} reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=train_data_0) reader_0.get_party_instance(role='host', party_id=host).component_param(table=train_data_1) data_transform_0 = DataTransform(name='data_transform_0') data_transform_0.get_party_instance( role='guest', party_id=guest).component_param( with_label=True, output_format="dense") data_transform_0.get_party_instance( role='host', party_id=host).component_param( with_label=True, output_format="dense") model = nn.Sequential( nn.Linear(13, 1) ) loss = nn.MSELoss() optimizer = t.optim.Adam(model.parameters(), lr=0.01) nn_component = HomoNN(name='nn_0', model=model, loss=loss, optimizer=optimizer, trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=20, batch_size=128, validation_freqs=1), torch_seed=100 ) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(nn_component, data=Data(train_data=data_transform_0.output.data)) pipeline.add_component(Evaluation(name='eval_0', eval_type='regression'), data=Data(data=nn_component.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,333
35.637363
120
py
FATE
FATE-master/examples/pipeline/homo_nn/pipeline_homo_nn_train_multi.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse # torch import torch as t from torch import nn from pipeline import fate_torch_hook # pipeline from pipeline.backend.pipeline import PipeLine from pipeline.component import Reader, DataTransform, HomoNN, Evaluation from pipeline.component.nn import TrainerParam, DatasetParam from pipeline.interface import Data from pipeline.utils.tools import load_job_config fate_torch_hook(t) def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter) train_data_0 = {"name": "vehicle_scale_homo_guest", "namespace": "experiment"} train_data_1 = {"name": "vehicle_scale_homo_host", "namespace": "experiment"} reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=train_data_0) reader_0.get_party_instance(role='host', party_id=host).component_param(table=train_data_1) data_transform_0 = DataTransform(name='data_transform_0') data_transform_0.get_party_instance( role='guest', party_id=guest).component_param( with_label=True, output_format="dense") data_transform_0.get_party_instance( role='host', party_id=host).component_param( with_label=True, output_format="dense") model = nn.Sequential( nn.Linear(18, 4), nn.Softmax(dim=1) # actually cross-entropy loss does the softmax ) loss = nn.CrossEntropyLoss() optimizer = t.optim.Adam(model.parameters(), lr=0.01) nn_component = HomoNN(name='nn_0', model=model, loss=loss, optimizer=optimizer, trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=50, batch_size=128, validation_freqs=1), # reshape and set label to long for CrossEntropyLoss dataset=DatasetParam(dataset_name='table', flatten_label=True, label_dtype='long'), torch_seed=100 ) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(nn_component, data=Data(train_data=data_transform_0.output.data)) pipeline.add_component(Evaluation(name='eval_0', eval_type='multi'), data=Data(data=nn_component.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,628
37.2
120
py
FATE
FATE-master/examples/pipeline/homo_nn/pipeline_homo_nn_aggregate_n_epoch.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse # torch import torch as t from torch import nn from pipeline import fate_torch_hook # pipeline from pipeline.backend.pipeline import PipeLine from pipeline.component import Reader, DataTransform, HomoNN, Evaluation from pipeline.component.nn import TrainerParam from pipeline.interface import Data from pipeline.utils.tools import load_job_config fate_torch_hook(t) def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter) train_data_0 = {"name": "breast_homo_guest", "namespace": "experiment"} train_data_1 = {"name": "breast_homo_host", "namespace": "experiment"} reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=train_data_0) reader_0.get_party_instance(role='host', party_id=host).component_param(table=train_data_1) data_transform_0 = DataTransform(name='data_transform_0') data_transform_0.get_party_instance( role='guest', party_id=guest).component_param( with_label=True, output_format="dense") data_transform_0.get_party_instance( role='host', party_id=host).component_param( with_label=True, output_format="dense") model = nn.Sequential( nn.Linear(30, 1), nn.Sigmoid() ) loss = nn.BCELoss() optimizer = t.optim.Adam(model.parameters(), lr=0.01) nn_component = HomoNN(name='nn_0', model=model, loss=loss, optimizer=optimizer, trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=20, batch_size=128, validation_freqs=1, aggregate_every_n_epoch=5), torch_seed=100 ) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(nn_component, data=Data(train_data=data_transform_0.output.data)) pipeline.add_component(Evaluation(name='eval_0'), data=Data(data=nn_component.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,357
35.107527
120
py
FATE
FATE-master/examples/pipeline/feature_scale/pipeline-feature-scale-normal.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.utils.tools import load_job_config from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import FeatureScale from pipeline.component import FederatedSample from pipeline.component import HeteroFeatureBinning from pipeline.component import HeteroFeatureSelection from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import OneHotEncoder from pipeline.component import Reader from pipeline.interface import Data def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance( role='guest', party_id=guest).component_param( with_label=True, missing_fill=True, outlier_replace=True) data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False, missing_fill=True, outlier_replace=True) intersection_0 = Intersection(name="intersection_0") federated_sample_0 = FederatedSample(name="federated_sample_0", mode="stratified", method="upsample", fractions=[[0, 1.5], [1, 2.0]]) feature_scale_0 = FeatureScale(name="feature_scale_0", method="min_max_scale", mode="normal") feature_scale_0.get_party_instance( role='guest', party_id=guest).component_param( feat_upper=[ 1, 2, 1, 1, 0.5, 1, 2, 2, 1, 1]) hetero_feature_binning_0 = HeteroFeatureBinning(name="hetero_feature_binning_0") hetero_feature_selection_0 = HeteroFeatureSelection(name="hetero_feature_selection_0") one_hot_0 = OneHotEncoder(name="one_hot_0") hetero_lr_0 = HeteroLR(name="hetero_lr_0", penalty="L2", optimizer="rmsprop", tol=1e-5, init_param={"init_method": "random_uniform"}, alpha=0.01, max_iter=10, early_stop="diff", batch_size=320, learning_rate=0.15) evaluation_0 = Evaluation(name="evaluation_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) pipeline.add_component(federated_sample_0, data=Data(data=intersection_0.output.data)) pipeline.add_component(feature_scale_0, data=Data(data=federated_sample_0.output.data)) pipeline.add_component(hetero_feature_binning_0, data=Data(data=feature_scale_0.output.data)) pipeline.add_component(hetero_feature_selection_0, data=Data(data=hetero_feature_binning_0.output.data)) pipeline.add_component(one_hot_0, data=Data(data=hetero_feature_selection_0.output.data)) pipeline.add_component(hetero_lr_0, data=Data(train_data=one_hot_0.output.data)) pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data)) pipeline.compile() pipeline.fit() print(pipeline.get_component("evaluation_0").get_summary()) if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
4,805
45.660194
120
py
FATE
FATE-master/examples/pipeline/feature_scale/pipeline-feature-scale-cap.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.utils.tools import load_job_config from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import Evaluation from pipeline.component import FeatureScale from pipeline.component import FederatedSample from pipeline.component import HeteroFeatureBinning from pipeline.component import HeteroFeatureSelection from pipeline.component import HeteroLR from pipeline.component import Intersection from pipeline.component import OneHotEncoder from pipeline.component import Reader from pipeline.interface import Data def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance( role='guest', party_id=guest).component_param( with_label=True, missing_fill=True, outlier_replace=True) data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False, missing_fill=True, outlier_replace=True) intersection_0 = Intersection(name="intersection_0") federated_sample_0 = FederatedSample(name="federated_sample_0", mode="stratified", method="upsample", fractions=[[0, 1.5], [1, 2.0]]) feature_scale_0 = FeatureScale(name="feature_scale_0", method="min_max_scale", mode="cap", feat_upper=1, feat_lower=0) hetero_feature_binning_0 = HeteroFeatureBinning(name="hetero_feature_binning_0") hetero_feature_selection_0 = HeteroFeatureSelection(name="hetero_feature_selection_0") one_hot_0 = OneHotEncoder(name="one_hot_0") hetero_lr_0 = HeteroLR(name="hetero_lr_0", penalty="L2", optimizer="rmsprop", tol=1e-5, init_param={"init_method": "random_uniform"}, alpha=0.01, max_iter=10, early_stop="diff", batch_size=320, learning_rate=0.15) evaluation_0 = Evaluation(name="evaluation_0") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) pipeline.add_component(federated_sample_0, data=Data(data=intersection_0.output.data)) pipeline.add_component(feature_scale_0, data=Data(data=federated_sample_0.output.data)) pipeline.add_component(hetero_feature_binning_0, data=Data(data=feature_scale_0.output.data)) pipeline.add_component(hetero_feature_selection_0, data=Data(data=hetero_feature_binning_0.output.data)) pipeline.add_component(one_hot_0, data=Data(data=hetero_feature_selection_0.output.data)) pipeline.add_component(hetero_lr_0, data=Data(train_data=one_hot_0.output.data)) pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data)) pipeline.compile() pipeline.fit() print(pipeline.get_component("evaluation_0").get_summary()) if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
4,704
46.05
120
py
FATE
FATE-master/examples/pipeline/feature_scale/__init__.py
0
0
0
py
FATE
FATE-master/examples/pipeline/feldman_verifiable_sum/pipeline-feldman-verifiable-sum.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import Reader from pipeline.component import DataTransform from pipeline.component import FeldmanVerifiableSum from pipeline.interface import Data from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] hosts = parties.host guest_train_data = {"name": "breast_homo_test", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_homo_test", "namespace": f"experiment{namespace}"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role="guest", party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=hosts) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance(role="host", party_id=hosts).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0") # get and configure DataTransform party instance of guest data_transform_0.get_party_instance( role="guest", party_id=guest).component_param( with_label=False, output_format="dense") # get and configure DataTransform party instance of host data_transform_0.get_party_instance(role="host", party_id=hosts).component_param(with_label=False) # define FeldmanVerifiableSum components feldmanverifiablesum_0 = FeldmanVerifiableSum(name="feldmanverifiablesum_0") feldmanverifiablesum_0.get_party_instance(role="guest", party_id=guest).component_param(sum_cols=[1, 2, 3], q_n=6) feldmanverifiablesum_0.get_party_instance(role="host", party_id=hosts).component_param(sum_cols=[1, 2, 3], q_n=6) # add components to pipeline, in order of task execution. pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(feldmanverifiablesum_0, data=Data(data=data_transform_0.output.data)) # compile pipeline once finished adding modules, this step will form conf and dsl files for running job pipeline.compile() # fit model pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,420
37.011111
118
py
FATE
FATE-master/examples/pipeline/hetero_ftl/pipeline-hetero-ftl-with-predict.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component.hetero_ftl import HeteroFTL from pipeline.component.reader import Reader from pipeline.interface.data import Data from tensorflow.keras import optimizers from tensorflow.keras.layers import Dense from tensorflow.keras import initializers from pipeline.component.evaluation import Evaluation from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "nus_wide_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "nus_wide_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance( role='guest', party_id=guest).component_param( with_label=True, output_format="dense") data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0', epochs=10, alpha=1, batch_size=-1, mode='plain') hetero_ftl_0.add_nn_layer(Dense(units=32, activation='sigmoid', kernel_initializer=initializers.RandomNormal(stddev=1.0), bias_initializer=initializers.Zeros())) hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01)) evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(hetero_ftl_0, data=Data(train_data=data_transform_0.output.data)) pipeline.add_component(evaluation_0, data=Data(data=hetero_ftl_0.output.data)) pipeline.compile() pipeline.fit() # predict # deploy required components pipeline.deploy_component([data_transform_0, hetero_ftl_0]) predict_pipeline = PipeLine() # add data reader onto predict pipeline predict_pipeline.add_component(reader_0) # add selected components from train pipeline onto predict pipeline # specify data source predict_pipeline.add_component( pipeline, data=Data( predict_input={ pipeline.data_transform_0.input.data: reader_0.output.data})) # run predict model predict_pipeline.predict() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,842
37.818182
103
py
FATE
FATE-master/examples/pipeline/hetero_ftl/pipeline-hetero-ftl-encrypted.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component.hetero_ftl import HeteroFTL from pipeline.component.reader import Reader from pipeline.interface.data import Data from tensorflow.keras import optimizers from tensorflow.keras.layers import Dense from tensorflow.keras import initializers from pipeline.component.evaluation import Evaluation from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "nus_wide_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "nus_wide_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance( role='guest', party_id=guest).component_param( with_label=True, output_format="dense") data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0', epochs=10, alpha=1, batch_size=-1, mode='encrypted') hetero_ftl_0.add_nn_layer(Dense(units=32, activation='sigmoid', kernel_initializer=initializers.RandomNormal(stddev=1.0), bias_initializer=initializers.Zeros())) hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01)) evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(hetero_ftl_0, data=Data(train_data=data_transform_0.output.data)) pipeline.add_component(evaluation_0, data=Data(data=hetero_ftl_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,286
38.60241
103
py
FATE
FATE-master/examples/pipeline/hetero_ftl/pipeline-hetero-ftl-communication-efficient.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component.hetero_ftl import HeteroFTL from pipeline.component.reader import Reader from pipeline.interface.data import Data from tensorflow.keras import optimizers from tensorflow.keras.layers import Dense from tensorflow.keras import initializers from pipeline.component.evaluation import Evaluation from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "nus_wide_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "nus_wide_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance( role='guest', party_id=guest).component_param( with_label=True, output_format="dense") data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0', epochs=10, alpha=1, batch_size=-1, mode='plain', communication_efficient=True, local_round=5) hetero_ftl_0.add_nn_layer(Dense(units=32, activation='sigmoid', kernel_initializer=initializers.RandomNormal(stddev=1.0), bias_initializer=initializers.Zeros())) hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01)) evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(hetero_ftl_0, data=Data(train_data=data_transform_0.output.data)) pipeline.add_component(evaluation_0, data=Data(data=hetero_ftl_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,471
38.908046
103
py
FATE
FATE-master/examples/pipeline/hetero_ftl/__init__.py
0
0
0
py
FATE
FATE-master/examples/pipeline/hetero_ftl/pipeline-hetero-ftl-plain.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component.hetero_ftl import HeteroFTL from pipeline.component.reader import Reader from pipeline.interface.data import Data from tensorflow.keras import optimizers from tensorflow.keras.layers import Dense from tensorflow.keras import initializers from pipeline.component.evaluation import Evaluation from pipeline.utils.tools import load_job_config def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "nus_wide_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "nus_wide_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0") data_transform_0.get_party_instance( role='guest', party_id=guest).component_param( with_label=True, output_format="dense") data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False) hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0', epochs=10, alpha=1, batch_size=-1, mode='plain') hetero_ftl_0.add_nn_layer(Dense(units=32, activation='sigmoid', kernel_initializer=initializers.RandomNormal(stddev=1.0), bias_initializer=initializers.Zeros())) hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01)) evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(hetero_ftl_0, data=Data(train_data=data_transform_0.output.data)) pipeline.add_component(evaluation_0, data=Data(data=hetero_ftl_0.output.data)) pipeline.compile() pipeline.fit() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,282
38.554217
103
py
FATE
FATE-master/examples/pipeline/feature_imputation/pipeline-feature-imputation-method.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.utils.tools import load_job_config from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import FeatureImputation from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data, Model def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "dvisits_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "dvisits_hetero_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", with_label=False, output_format="dense") intersection_0 = Intersection(name="intersection_0") feature_imputation_0 = FeatureImputation(name="feature_imputation_0", missing_fill_method="max", missing_impute=[0]) feature_imputation_1 = FeatureImputation(name="feature_imputation_1") pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) pipeline.add_component(feature_imputation_0, data=Data(data=intersection_0.output.data)) pipeline.add_component(feature_imputation_1, data=Data(data=intersection_0.output.data), model=Model(model=feature_imputation_0.output.model)) pipeline.compile() pipeline.fit() # predict # deploy required components pipeline.deploy_component([data_transform_0, intersection_0, feature_imputation_0]) predict_pipeline = PipeLine() # add data reader onto predict pipeline predict_pipeline.add_component(reader_0) # add selected components from train pipeline onto predict pipeline # specify data source predict_pipeline.add_component( pipeline, data=Data( predict_input={ pipeline.data_transform_0.input.data: reader_0.output.data})) # run predict model predict_pipeline.predict() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,520
39.011364
120
py
FATE
FATE-master/examples/pipeline/feature_imputation/pipeline-feature-imputation-column-method.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.utils.tools import load_job_config from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import FeatureImputation from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "dvisits_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "dvisits_hetero_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", with_label=False) intersection_0 = Intersection(name="intersection_0") feature_imputation_0 = FeatureImputation(name="feature_imputation_0", default_value=42, missing_impute=[0]) feature_imputation_0.get_party_instance(role='guest', party_id=guest).component_param( col_missing_fill_method={"doctorco": "min", "hscore": "designated"}) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) pipeline.add_component(feature_imputation_0, data=Data(data=intersection_0.output.data)) pipeline.compile() pipeline.fit() # predict # deploy required components pipeline.deploy_component([data_transform_0, intersection_0, feature_imputation_0]) predict_pipeline = PipeLine() # add data reader onto predict pipeline predict_pipeline.add_component(reader_0) # add selected components from train pipeline onto predict pipeline # specify data source predict_pipeline.add_component( pipeline, data=Data( predict_input={ pipeline.data_transform_0.input.data: reader_0.output.data})) # run predict model predict_pipeline.predict() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,497
38.303371
103
py
FATE
FATE-master/examples/pipeline/feature_imputation/__init__.py
0
0
0
py
FATE
FATE-master/examples/pipeline/feature_imputation/pipeline-feature-imputation-designated.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.utils.tools import load_job_config from pipeline.backend.pipeline import PipeLine from pipeline.component import DataTransform from pipeline.component import FeatureImputation from pipeline.component import Intersection from pipeline.component import Reader from pipeline.interface import Data def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"} host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"} pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host) reader_0 = Reader(name="reader_0") reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) data_transform_0 = DataTransform(name="data_transform_0", with_label=False) intersection_0 = Intersection(name="intersection_0") feature_imputation_0 = FeatureImputation(name="feature_imputation_0", missing_fill_method="designated", default_value=42, missing_impute=[0]) pipeline.add_component(reader_0) pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data)) pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data)) pipeline.add_component(feature_imputation_0, data=Data(data=intersection_0.output.data)) pipeline.compile() pipeline.fit() # predict # deploy required components pipeline.deploy_component([data_transform_0, intersection_0, feature_imputation_0]) predict_pipeline = PipeLine() # add data reader onto predict pipeline predict_pipeline.add_component(reader_0) # add selected components from train pipeline onto predict pipeline # specify data source predict_pipeline.add_component( pipeline, data=Data( predict_input={ pipeline.data_transform_0.input.data: reader_0.output.data})) # run predict model predict_pipeline.predict() if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
3,328
37.709302
103
py