repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
MICCAI21_MMQ
|
MICCAI21_MMQ-main/tools/compute_softscore.py
|
"""
This code is slightly modified from Hengyuan Hu's repository.
https://github.com/hengyuan-hu/bottom-up-attention-vqa
"""
from __future__ import print_function
import os
import sys
import json
import numpy as np
import re
import _pickle as cPickle
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# from dataset import Dictionary
import utils
contractions = {
"aint": "ain't", "arent": "aren't", "cant": "can't", "couldve":
"could've", "couldnt": "couldn't", "couldn'tve": "couldn't've",
"couldnt've": "couldn't've", "didnt": "didn't", "doesnt":
"doesn't", "dont": "don't", "hadnt": "hadn't", "hadnt've":
"hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent":
"haven't", "hed": "he'd", "hed've": "he'd've", "he'dve":
"he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll",
"hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", "Im":
"I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've":
"it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's",
"maam": "ma'am", "mightnt": "mightn't", "mightnt've":
"mightn't've", "mightn'tve": "mightn't've", "mightve": "might've",
"mustnt": "mustn't", "mustve": "must've", "neednt": "needn't",
"notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't",
"ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat":
"'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve":
"she'd've", "she's": "she's", "shouldve": "should've", "shouldnt":
"shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve":
"shouldn't've", "somebody'd": "somebodyd", "somebodyd've":
"somebody'd've", "somebody'dve": "somebody'd've", "somebodyll":
"somebody'll", "somebodys": "somebody's", "someoned": "someone'd",
"someoned've": "someone'd've", "someone'dve": "someone'd've",
"someonell": "someone'll", "someones": "someone's", "somethingd":
"something'd", "somethingd've": "something'd've", "something'dve":
"something'd've", "somethingll": "something'll", "thats":
"that's", "thered": "there'd", "thered've": "there'd've",
"there'dve": "there'd've", "therere": "there're", "theres":
"there's", "theyd": "they'd", "theyd've": "they'd've", "they'dve":
"they'd've", "theyll": "they'll", "theyre": "they're", "theyve":
"they've", "twas": "'twas", "wasnt": "wasn't", "wed've":
"we'd've", "we'dve": "we'd've", "weve": "we've", "werent":
"weren't", "whatll": "what'll", "whatre": "what're", "whats":
"what's", "whatve": "what've", "whens": "when's", "whered":
"where'd", "wheres": "where's", "whereve": "where've", "whod":
"who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl":
"who'll", "whos": "who's", "whove": "who've", "whyll": "why'll",
"whyre": "why're", "whys": "why's", "wont": "won't", "wouldve":
"would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll":
"y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've",
"y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd":
"you'd", "youd've": "you'd've", "you'dve": "you'd've", "youll":
"you'll", "youre": "you're", "youve": "you've"
}
# list of 172 stop-words. (179 stopwords from nltk - '{'no', 'both', 'above', 'below', 'after', 'before', 'between'})
# stopwords = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours',
# 'ourselves', 'you', "you're", "you've", "you'll",
# "you'd", 'your', 'yours', 'yourself', 'yourselves',
# 'he', 'him', 'his', 'himself', 'she', "she's", 'her',
# 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they',
# 'them', 'their', 'theirs', 'themselves', 'what', 'which',
# 'who', 'whom', 'this', 'that', "that'll", 'these', 'those',
# 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
# 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing',
# 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as',
# 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about',
# 'against', 'into', 'through', 'during',
# 'to', 'from', 'up', 'down', 'in',
# 'out', 'on', 'off', 'over', 'under', 'again', 'further',
# 'then', 'once', 'here', 'there', 'when', 'where', 'why',
# 'how', 'all', 'any', 'each', 'few', 'more',
# 'most', 'other', 'some', 'such', 'nor', 'not',
# 'only', 'own', 'same', 'so', 'than', 'too', 'very',
# 's', 't', 'can', 'will', 'just', 'don', "don't",
# 'should', "should've", 'now', 'd', 'll', 'm', 'o',
# 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn',
# "couldn't", 'didn', "didn't", 'doesn', "doesn't",
# 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't",
# 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',
# "mustn't", 'needn', "needn't", 'shan', "shan't",
# 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren',
# "weren't", 'won', "won't", 'wouldn', "wouldn't"]
manual_map = { 'none': '0',
'zero': '0',
'one': '1',
'two': '2',
'three': '3',
'four': '4',
'five': '5',
'six': '6',
'seven': '7',
'eight': '8',
'nine': '9',
'ten': '10'}
articles = ['a', 'an', 'the']
period_strip = re.compile("(?!<=\d)(\.)(?!\d)")
comma_strip = re.compile("(\d)(\,)(\d)")
punct = [';', r"/", '[', ']', '"', '{', '}',
'(', ')', '=', '+', '\\', '_', '-',
'>', '<', '@', '`', ',', '?', '!']
# Notice that VQA score is the average of 10 choose 9 candidate answers cases
# See http://visualqa.org/evaluation.html
def get_score(occurences):
if occurences == 0:
return .0
elif occurences == 1:
return .3
elif occurences == 2:
return .6
elif occurences == 3:
return .9
else:
return 1.
def process_punctuation(inText):
outText = inText
for p in punct:
if (p + ' ' in inText or ' ' + p in inText) \
or (re.search(comma_strip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = period_strip.sub("", outText, re.UNICODE)
return outText
def process_digit_article(inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = manual_map.setdefault(word, word)
if word not in articles:
outText.append(word)
else:
pass
for wordId, word in enumerate(outText):
if word in contractions:
outText[wordId] = contractions[word]
outText = ' '.join(outText)
return outText
def multiple_replace(text, wordDict):
for key in wordDict:
text = text.replace(key, wordDict[key])
return text
def preprocess_answer(answer):
answer = str(answer)
answer = process_digit_article(process_punctuation(answer))
answer = answer.replace(',', '').replace('x ray', 'xray')
return answer
def filter_answers(answers_dset, min_occurence):
"""This will change the answer to preprocessed version
"""
occurence = {}
for ans_entry in answers_dset:
answers = ans_entry['answer']
gtruth = ans_entry['answer']
gtruth = preprocess_answer(gtruth)
if gtruth not in occurence:
occurence[gtruth] = set()
occurence[gtruth].add(ans_entry['qid'])
for answer in list(occurence):
if len(occurence[answer]) < min_occurence:
occurence.pop(answer)
print('Num of answers that appear >= %d times: %d' % (
min_occurence, len(occurence)))
return occurence
def create_ans2label(occurence, name, cache_root='data_RAD/cache'):
"""Note that this will also create label2ans.pkl at the same time
occurence: dict {answer -> whatever}
name: prefix of the output file
cache_root: str
"""
ans2label = {}
label2ans = []
label = 0
for answer in occurence:
label2ans.append(answer)
ans2label[answer] = label
label += 1
print('ans2lab', len(ans2label))
print('lab2abs', len(label2ans))
utils.create_dir(cache_root)
cache_file = os.path.join(cache_root, name+'_ans2label.pkl')
cPickle.dump(ans2label, open(cache_file, 'wb'))
cache_file = os.path.join(cache_root, name+'_label2ans.pkl')
cPickle.dump(label2ans, open(cache_file, 'wb'))
return ans2label
def compute_target(answers_dset, ans2label, name, cache_root='data_RAD/cache'):
"""Augment answers_dset with soft score as label
***answers_dset should be preprocessed***
Write result into a cache file
"""
target = []
count = 0
for ans_entry in answers_dset:
answers = preprocess_answer(ans_entry['answer'])
# answer_count = {}
# for answer in answers:
# answer_ = answer['answer']
# answer_count[answer_] = answer_count.get(answer_, 0) + 1
labels = []
scores = []
if answers in ans2label:
scores.append(1.)
labels.append(ans2label[answers])
# for answer in answer_count:
# if answer not in ans2label:
# continue
# labels.append(ans2label[answer])
# score = get_score(answer_count[answer])
# scores.append(score)
target.append({
'qid': ans_entry['qid'],
'image_name': ans_entry['image_name'],
'labels': labels,
'scores': scores
})
utils.create_dir(cache_root)
cache_file = os.path.join(cache_root, name+'_target.pkl')
cPickle.dump(target, open(cache_file, 'wb'))
return target
def get_answer(qid, answers):
for ans in answers:
if ans['question_id'] == qid:
return ans
def get_question(qid, questions):
for question in questions:
if question['question_id'] == qid:
return question
if __name__ == '__main__':
VQA_dir = 'data_RAD'
train_answers = json.load(open(VQA_dir + '/trainset.json'))
answers = train_answers
occurence = filter_answers(answers, 0)
print('occ', (len(occurence)))
cache_path = VQA_dir + '/cache/trainval_ans2label.pkl'
if os.path.isfile(cache_path):
print('found %s' % cache_path)
ans2label = cPickle.load(open(cache_path, 'rb'))
else:
ans2label = create_ans2label(occurence, 'trainval')
compute_target(train_answers, ans2label, 'train')
test_answers = json.load(open(VQA_dir + '/testset.json'))
compute_target(test_answers, ans2label, 'test')
| 10,849 | 36.673611 | 117 |
py
|
MICCAI21_MMQ
|
MICCAI21_MMQ-main/tools/create_dictionary.py
|
"""
This code is from Hengyuan Hu's repository.
https://github.com/hengyuan-hu/bottom-up-attention-vqa
"""
from __future__ import print_function
import os
import sys
import json
import numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dataset_VQA import Dictionary
def create_dictionary(dataroot):
dictionary = Dictionary()
questions = []
files = [
'trainset.json',
]
for path in files:
question_path = os.path.join(dataroot, path)
qs = json.load(open(question_path))
for q in qs:
dictionary.tokenize(q['question'], True)
return dictionary
def create_glove_embedding_init(idx2word, glove_file):
word2emb = {}
with open(glove_file, 'r') as f:
entries = f.readlines()
emb_dim = len(entries[0].split(' ')) - 1
print('embedding dim is %d' % emb_dim)
weights = np.zeros((len(idx2word), emb_dim), dtype=np.float32)
for entry in entries:
vals = entry.split(' ')
word = vals[0]
vals = list(map(float, vals[1:]))
word2emb[word] = np.array(vals)
for idx, word in enumerate(idx2word):
if word not in word2emb:
continue
weights[idx] = word2emb[word]
return weights, word2emb
if __name__ == '__main__':
VQA_dir = 'data_RAD'
d = create_dictionary(VQA_dir)
d.dump_to_file(VQA_dir + '/dictionary.pkl')
d = Dictionary.load_from_file(VQA_dir + '/dictionary.pkl')
emb_dim = 300
glove_file = VQA_dir + '/glove/glove.6B.%dd.txt' % emb_dim
weights, word2emb = create_glove_embedding_init(d.idx2word, glove_file)
np.save(VQA_dir + '/glove6b_init_%dd.npy' % emb_dim, weights)
| 1,702 | 29.410714 | 76 |
py
|
MICCAI21_MMQ
|
MICCAI21_MMQ-main/mmq_maml/VQA_RAD_train.py
|
import torch, os
import numpy as np
from VQA_RAD import VQARAD_maml
import scipy.stats
from torch.utils.data import DataLoader
import argparse
import time
from meta import Meta
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
# add other class (n_way + 1)
args.n_way += 1
print(args)
# hidden layer dimension config
if args.imgsz == 84:
dim_hidden_linear = 64 * 8 * 8
elif args.imgsz == 128:
dim_hidden_linear = 32 * 11 * 11
# model config
config = [
('conv2d', [64, 1, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 2, 2, 1, 0]),
('relu', [True]),
('bn', [64]),
('flatten', []),
('linear', [args.n_way, dim_hidden_linear])
]
# initial MAML model
device = torch.device('cuda')
maml = Meta(args, config).to(device)
tmp = filter(lambda x: x.requires_grad, maml.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
print(maml)
print('Total trainable tensors:', num)
# Load dataset
# batchsz here means total episode number
mini = VQARAD_maml(args.data, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=10000, resize=args.imgsz, t = args.t_dst)
# Train model
for epoch in range(args.epoch//10000):
# fetch meta_batchsz num of episode each time
db = DataLoader(mini, args.task_num, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
for step, (x_spt, y_spt, x_qry, y_qry, _, _) in enumerate(db):
x_spt, y_spt, x_qry, y_qry = x_spt.to(device), y_spt.to(device), x_qry.to(device), y_qry.to(device)
accs = maml(x_spt, y_spt, x_qry, y_qry)
if step % 50 == 0:
end = time.time()
print('step:', step, '\ttraining acc:', accs, '\ttime:', end - start)
start = time.time()
# Save model
model_dir = args.output + '/VQARAD_maml%d_miccai2021_newmethod_%dway_%dshot_t%d'%(args.imgsz, args.n_way, args.k_spt, args.t_dst)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_path = os.path.join(model_dir, 'model_epoch%d.pth' % epoch)
print('saving model to:', model_dir)
torch.save(maml.state_dict(), model_path)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=10000)
argparser.add_argument('--n_way', type=int, help='n way', default=3)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=3)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=3)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84)
argparser.add_argument('--imgc', type=int, help='imgc', default=1)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=5)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--output', type=str, help='output directory for saving models', default='saved_models')
argparser.add_argument('--data', type=str, help='data directory', default='data/vqarad_maml/')
argparser.add_argument('--t_dst', type=int, help='t-th step', default=0)
args = argparser.parse_args()
main(args)
| 4,249 | 37.990826 | 137 |
py
|
MICCAI21_MMQ
|
MICCAI21_MMQ-main/mmq_maml/VQA_RAD_fuse.py
|
import torch, os
import numpy as np
from VQA_RAD import VQARAD_maml
import scipy.stats
from torch.utils.data import DataLoader
import argparse
import time
from meta import Meta
import pickle as p
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def num_tensors(model):
tmp = filter(lambda x: x.requires_grad, model.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
return num
def feature_extraction(maml, db_test, device):
for i in range(args.nums_t):
model_dir = args.input + '/VQARAD_maml%d_miccai2021_newmethod_%dway_%dshot_t%d' % (args.imgsz, args.n_way, args.k_spt, i)
model_path = os.path.join(model_dir, 'model_epoch%d.pth' % args.epoch)
print('-------load model weight from:', model_path)
maml.load_state_dict(torch.load(model_path))
start = time.time()
logit_softmax_list = []
feature_list = []
path_list = []
for x_spt, y_spt, x_qry, y_qry, _, flatten_query_x in db_test:
x_spt, y_spt, x_qry, y_qry = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device)
logit_softmax, feature = maml.feature_validation(x_spt, y_spt, x_qry, y_qry)
logit_softmax_list += logit_softmax
feature_list += list(feature.cpu())
path_list += flatten_query_x
if not os.path.exists(args.feature_output):
os.makedirs(args.feature_output)
p.dump(feature_list, open(args.feature_output + '/VQARAD_feature_t%d.pkl'%(i), 'wb'))
p.dump(logit_softmax_list, open(args.feature_output + '/VQARAD_logit_softmax_t%d.pkl'%(i), 'wb'))
# [b, update_step+1]
end = time.time()
print('***************** Time:', end - start)
def load_feature_and_logit(args):
features = []
logits = []
for t in range(args.nums_t):
features.append(torch.stack(p.load(open(args.feature_output + '/VQARAD_feature_t%d.pkl' % (t), 'rb'))))
logits.append(p.load(open(args.feature_output + '/VQARAD_logit_softmax_t%d.pkl' % (t), 'rb')))
features = torch.stack(features).cpu().transpose(0, 1)
logits = torch.tensor(logits).transpose(0, 1)
return features, logits
def fuse_score(features, logits, alpha=0.5):
"""
Compute fuse score using equation: S_F = \gamma S_P + (1-\gamma)\sum_{t=1}^m 1 - Cosine \left(F_{c}, F_t\right) \forall F_{c} \neq F_t
For more detail, please visit Algorithm 2 of the paper.
:param features:
:param logits:
:param alpha:
:return results: score list, type:
"""
cos = torch.nn.CosineSimilarity(dim=0)
results = []
for idx in range(len(features)):
if idx%100 == 0:
print('%d / %d'%(idx, len(features)))
feature_sample = features[idx]
logit_sample = logits[idx]
sample_results = []
for i in range(len(feature_sample)):
row_results = []
for j in range(len(feature_sample)):
if i != j:
sim = cos(feature_sample[i], feature_sample[j])
div = 1 - sim
row_results.append(div)
row_results = torch.nn.functional.tanh(sum(row_results))
fuse_score = (alpha * logit_sample[i]) + ((1 - alpha) * row_results)
# norm = torch.nn.functional.normalize(torch.stack([logit_sample[i], row_results]), p=2, dim=0)
# fuse_score = (alpha * norm[0]) + ((1 - alpha) * norm[1])
sample_results.append(fuse_score)
results.append(sample_results)
return results
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
# add other class (n_way + 1)
args.n_way += 1
print(args)
# hidden layer dimension config
if args.imgsz == 84:
dim_hidden_linear = 64 * 8 * 8
elif args.imgsz == 128:
dim_hidden_linear = 32 * 11 * 11
# model config
config = [
('conv2d', [64, 1, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 2, 2, 1, 0]),
('relu', [True]),
('bn', [64]),
('flatten', []),
('linear', [args.n_way, dim_hidden_linear])
]
# initial MAML model
device = torch.device('cuda')
maml = Meta(args, config).to(device)
print(maml)
print('Total trainable tensors:', num_tensors(maml))
# Load validation dataset
# batchsz here means total episode number
mini_test = VQARAD_maml(args.data, mode='val', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz)
db_test = DataLoader(mini_test, 1, shuffle=False, num_workers=1, pin_memory=True)
# Extract validation features, logits for each model and save them
feature_extraction(maml, db_test, device)
# Load all features and logits
features, logits = load_feature_and_logit(args)
# Compute fuse score
results = fuse_score(features, logits, alpha = 0.75)
# Show results to select suitable models
results = torch.tensor(results).mean(dim=0)
print(results)
print('------- sort', torch.sort(results, descending=True))
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=0)
argparser.add_argument('--n_way', type=int, help='n way', default=3)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=3)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=3)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84)
argparser.add_argument('--imgc', type=int, help='imgc', default=1)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=5)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--input', type=str, help='input directory for saving models', default='saved_models')
argparser.add_argument('--data', type=str, help='data directory', default='data/vqarad_maml/')
argparser.add_argument('--nums_t', type=int, help='num model', default=6) # m refinement models + first model (5 + 1 = 6)
argparser.add_argument('--feature_output', type=str, help='input directory for saving feature', default='features')
args = argparser.parse_args()
main(args)
| 7,237 | 40.597701 | 145 |
py
|
MICCAI21_MMQ
|
MICCAI21_MMQ-main/mmq_maml/pathVQA_maml.py
|
import os
import torch
from torch.utils.data import Dataset
from torchvision.transforms import transforms
import numpy as np
from PIL import Image
import random
class PathVQA_maml(Dataset):
"""
NOTICE: meta-learning is different from general supervised learning, especially the concept of batch and set.
batch: contains several sets
sets: conains n_way * k_shot for meta-train set, n_way * n_query for meta-test set.
"""
def __init__(self, root, mode, batchsz, n_way, k_shot, k_query, resize, startidx=0, unlabel = False, t = 0):
"""
:param root: root path of mini-imagenet
:param mode: train, val or test
:param batchsz: batch size of sets, not batch of imgs
:param n_way:
:param k_shot:
:param k_query: num of qeruy imgs per class
:param resize: resize to
:param startidx: start to index label from startidx
"""
self.batchsz = batchsz # batch of set, not batch of imgs
self.n_way = n_way # n-way
self.k_shot = k_shot # k-shot
self.k_query = k_query # for evaluation
self.setsz = self.n_way * self.k_shot # num of samples per set
self.querysz = self.n_way * self.k_query # number of samples per set for evaluation
self.resize = resize # resize to
self.startidx = startidx # index label not from 0, but from startidx
self.unlabel = unlabel
print('shuffle DB :%s, b:%d, %d-way, %d-shot, %d-query, resize:%d' % (
mode, batchsz, n_way, k_shot, k_query, resize))
if mode == 'train':
self.transform = transforms.Compose([lambda x: Image.open(x).convert('RGB'),
transforms.Resize((self.resize, self.resize)),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(5),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
else:
self.transform = transforms.Compose([lambda x: Image.open(x).convert('RGB'),
transforms.Resize((self.resize, self.resize)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
# self.path = os.path.join(root, 'maml', mode)
# self.path = os.path.join(root, 'maml%d'%(self.resize), mode)
if mode == 'train':
self.path = os.path.join(root, 't%d'%(t), mode)
else:
self.path = os.path.join(root, mode)
print('--loading data from:', self.path)
data = self.loadData(self.path)# image path
self.data = []
self.img2label = {}
self.label2class = []
for i, (k, v) in enumerate(data.items()):
self.data.append(v) # [[img1, img2, ...], [img111, ...]]
self.img2label[k] = i + self.startidx # {"img_name[:9]":label}
self.label2class.append(k)
self.img2label['x'] = i + 1 + self.startidx
self.cls_num = len(self.data)
if self.unlabel:
self.path_unlabel = os.path.join(root, 't%d'%(t), mode + '_unlabel')
self.data_unlabel = [os.path.join(self.path_unlabel, file) for file in os.listdir(self.path_unlabel)]
self.create_batch_unlabel(self.batchsz)
else:
self.create_batch(self.batchsz)
def loadData(self, path):
"""
return a dict saving the information of csv
:param splitFile: csv file name
:return: {label:[file1, file2 ...]}
"""
dictLabels = {}
for (dirpath, dirnames, _) in os.walk(path):
for dir in dirnames:
dictLabels[dir] = [os.path.join(dir, file) for file in os.listdir(os.path.join(dirpath, dir))]
return dictLabels
def create_other(self, selected_cls):
other_pool = []
for idx, i in enumerate(self.data):
if idx not in selected_cls:
other_pool += i
other_pool = [i + 'x' for i in other_pool]
return other_pool
def create_batch(self, batchsz):
"""
create batch for meta-learning.
×episode× here means batch, and it means how many sets we want to retain.
:param batchsz: batch size
:return:
"""
self.support_x_batch = [] # support set batch
self.query_x_batch = [] # query set batch
for b in range(batchsz): # for each batch
# 1.select n_way classes randomly
selected_cls = np.random.choice(self.cls_num, self.n_way - 1, False) # no duplicate
np.random.shuffle(selected_cls)
support_x = []
query_x = []
for cls in selected_cls:
# 2. select k_shot + k_query for each class
try:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot + self.k_query, False)
except:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot + self.k_query, True)
np.random.shuffle(selected_imgs_idx)
indexDtrain = np.array(selected_imgs_idx[:self.k_shot]) # idx for Dtrain
indexDtest = np.array(selected_imgs_idx[self.k_shot:]) # idx for Dtest
support_x.append(
np.array(self.data[cls])[indexDtrain].tolist()) # get all images filename for current Dtrain
query_x.append(np.array(self.data[cls])[indexDtest].tolist())
other_images = self.create_other(selected_cls)
try:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot + self.k_query, False)
except:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot + self.k_query, True)
np.random.shuffle(selected_imgs_idx_other)
indexDtrain_other = np.array(selected_imgs_idx_other[:self.k_shot]) # idx for Dtrain
indexDtest_other = np.array(selected_imgs_idx_other[self.k_shot:])
support_x.append(
np.array(other_images)[indexDtrain_other].tolist()) # get all images filename for current Dtrain
query_x.append(np.array(other_images)[indexDtest_other].tolist())
# shuffle the correponding relation between support set and query set
random.shuffle(support_x)
random.shuffle(query_x)
self.support_x_batch.append(support_x) # append set to current sets
self.query_x_batch.append(query_x) # append sets to current sets
def create_batch_unlabel(self, batchsz):
# Data Refinement for unlabel data, See Algorithm 1 of the paper
self.support_x_batch = [] # support set batch
self.query_x_batch = [] # query set batch
for b in range(batchsz): # for each batch
# 1.select n_way classes randomly
selected_cls = np.random.choice(self.cls_num, self.n_way - 1, False) # no duplicate
np.random.shuffle(selected_cls)
support_x = []
query_x = []
for cls in selected_cls:
# 2. select k_shot + k_query for each class
try:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot, False)
except:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot, True)
np.random.shuffle(selected_imgs_idx)
indexDtrain = np.array(selected_imgs_idx) # idx for Dtrain
support_x.append(
np.array(self.data[cls])[indexDtrain].tolist()) # get all images filename for current Dtrain
selected_imgs_idx_test = np.random.choice(len(self.data_unlabel), self.k_query, False)
indexDtest = np.array(selected_imgs_idx_test) # idx for Dtest
query_x.append(np.array(self.data_unlabel)[indexDtest].tolist())
other_images = self.create_other(selected_cls)
try:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot, False)
except:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot, True)
np.random.shuffle(selected_imgs_idx_other)
indexDtrain_other = np.array(selected_imgs_idx_other)
support_x.append(
np.array(other_images)[indexDtrain_other].tolist())
selected_imgs_idx_test_other = np.random.choice(len(self.data_unlabel), self.k_query, False)
indexDtest_other = np.array(selected_imgs_idx_test_other) # idx for Dtest
query_x.append(np.array(self.data_unlabel)[indexDtest_other].tolist())
# shuffle the correponding relation between support set and query set
random.shuffle(support_x)
random.shuffle(query_x)
self.support_x_batch.append(support_x) # append set to current sets
self.query_x_batch.append(query_x) # append sets to current sets
def __getitem__(self, index):
"""
index means index of sets, 0<= index <= batchsz-1
:param index:
:return:
"""
# [setsz, 3, resize, resize]
support_x = torch.FloatTensor(self.setsz, 3, self.resize, self.resize)
# [setsz]
support_y = np.zeros((self.setsz), dtype=np.int)
# [querysz, 3, resize, resize]
query_x = torch.FloatTensor(self.querysz, 3, self.resize, self.resize)
# [querysz]
flatten_support_x = [os.path.join(self.path, item if item[-1] != "x" else item[:-1])
for sublist in self.support_x_batch[index] for item in sublist]
support_y = np.array(
[self.img2label[item.split('/')[0] if item[-1] != "x" else "x"] # filename:n0153282900000005.jpg, the first 9 characters treated as label
for sublist in self.support_x_batch[index] for item in sublist]).astype(np.int32)
if not self.unlabel:
flatten_query_x = [os.path.join(self.path, item if item[-1] != "x" else item[:-1])
for sublist in self.query_x_batch[index] for item in sublist]
else:
flatten_query_x = [item for sublist in self.query_x_batch[index] for item in sublist]
# print('global:', support_y, query_y)
# support_y: [setsz]
# query_y: [querysz]
# unique: [n-way], sorted
unique = np.unique(support_y)
random.shuffle(unique)
# relative means the label ranges from 0 to n-way
support_y_relative = np.zeros(self.setsz)
for idx, l in enumerate(unique):
support_y_relative[support_y == l] = idx
# print('relative:', support_y_relative, query_y_relative)
for i, path in enumerate(flatten_support_x):
support_x[i] = self.transform(path)
for i, path in enumerate(flatten_query_x):
query_x[i] = self.transform(path)
# print(support_set_y)
# return support_x, torch.LongTensor(support_y), query_x, torch.LongTensor(query_y)
if not self.unlabel:
query_y_relative = np.zeros(self.querysz)
query_y = np.zeros((self.querysz), dtype=np.int)
query_y = np.array([self.img2label[item.split('/')[0] if item[-1] != "x" else "x"]
for sublist in self.query_x_batch[index] for item in sublist]).astype(np.int32)
for idx, l in enumerate(unique):
query_y_relative[query_y == l] = idx
return support_x, torch.LongTensor(support_y_relative), query_x, torch.LongTensor(query_y_relative), torch.LongTensor(query_y), flatten_query_x
return support_x, torch.LongTensor(support_y_relative), query_x, torch.LongTensor(unique), flatten_query_x
def __len__(self):
# as we have built up to batchsz of sets, you can sample some small batch size of sets.
return self.batchsz
| 12,430 | 47.74902 | 155 |
py
|
MICCAI21_MMQ
|
MICCAI21_MMQ-main/mmq_maml/learner.py
|
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
class Learner(nn.Module):
"""
"""
def __init__(self, config, imgc, imgsz):
"""
:param config: network config file, type:list of (string, list)
:param imgc: 1 or 3
:param imgsz: 28 or 84
"""
super(Learner, self).__init__()
self.config = config
# this dict contains all tensors needed to be optimized
self.vars = nn.ParameterList()
# running_mean and running_var
self.vars_bn = nn.ParameterList()
for i, (name, param) in enumerate(self.config):
if name is 'conv2d':
# [ch_out, ch_in, kernelsz, kernelsz]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name is 'convt2d':
# [ch_in, ch_out, kernelsz, kernelsz, stride, padding]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_in, ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[1])))
elif name is 'linear':
# [ch_out, ch_in]
w = nn.Parameter(torch.ones(*param))
# gain=1 according to cbfinn's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name is 'bn':
# [ch_out]
w = nn.Parameter(torch.ones(param[0]))
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
# must set requires_grad=False
running_mean = nn.Parameter(torch.zeros(param[0]), requires_grad=False)
running_var = nn.Parameter(torch.ones(param[0]), requires_grad=False)
self.vars_bn.extend([running_mean, running_var])
elif name in ['tanh', 'relu', 'upsample', 'avg_pool2d', 'max_pool2d',
'flatten', 'reshape', 'leakyrelu', 'sigmoid']:
continue
else:
raise NotImplementedError
def extra_repr(self):
info = ''
for name, param in self.config:
if name is 'conv2d':
tmp = 'conv2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)'\
%(param[1], param[0], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name is 'convt2d':
tmp = 'convTranspose2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)'\
%(param[0], param[1], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name is 'linear':
tmp = 'linear:(in:%d, out:%d)'%(param[1], param[0])
info += tmp + '\n'
elif name is 'leakyrelu':
tmp = 'leakyrelu:(slope:%f)'%(param[0])
info += tmp + '\n'
elif name is 'avg_pool2d':
tmp = 'avg_pool2d:(k:%d, stride:%d, padding:%d)'%(param[0], param[1], param[2])
info += tmp + '\n'
elif name is 'max_pool2d':
tmp = 'max_pool2d:(k:%d, stride:%d, padding:%d)'%(param[0], param[1], param[2])
info += tmp + '\n'
elif name in ['flatten', 'tanh', 'relu', 'upsample', 'reshape', 'sigmoid', 'use_logits', 'bn']:
tmp = name + ':' + str(tuple(param))
info += tmp + '\n'
else:
raise NotImplementedError
return info
def forward(self, x, vars=None, bn_training=True):
"""
This function can be called by finetunning, however, in finetunning, we dont wish to update
running_mean/running_var. Thought weights/bias of bn is updated, it has been separated by fast_weights.
Indeed, to not update running_mean/running_var, we need set update_bn_statistics=False
but weight/bias will be updated and not dirty initial theta parameters via fast_weiths.
:param x: [b, 1, 28, 28]
:param vars:
:param bn_training: set False to not update
:return: x, loss, likelihood, kld
"""
if vars is None:
vars = self.vars
idx = 0
bn_idx = 0
for name, param in self.config:
if name is 'conv2d':
w, b = vars[idx], vars[idx + 1]
# remember to keep synchrozied of forward_encoder and forward_decoder!
x = F.conv2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name is 'convt2d':
w, b = vars[idx], vars[idx + 1]
# remember to keep synchrozied of forward_encoder and forward_decoder!
x = F.conv_transpose2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name is 'linear':
w, b = vars[idx], vars[idx + 1]
x = F.linear(x, w, b)
idx += 2
# print('forward:', idx, x.norm().item())
elif name is 'bn':
w, b = vars[idx], vars[idx + 1]
running_mean, running_var = self.vars_bn[bn_idx], self.vars_bn[bn_idx+1]
x = F.batch_norm(x, running_mean, running_var, weight=w, bias=b, training=bn_training)
idx += 2
bn_idx += 2
elif name is 'flatten':
# print(x.shape)
feature = torch.mean(x.view(x.size(0), self.config[0][1][0], -1), 2)
x = x.view(x.size(0), -1)
elif name is 'reshape':
# [b, 8] => [b, 2, 2, 2]
x = x.view(x.size(0), *param)
elif name is 'relu':
x = F.relu(x, inplace=param[0])
elif name is 'leakyrelu':
x = F.leaky_relu(x, negative_slope=param[0], inplace=param[1])
elif name is 'tanh':
x = F.tanh(x)
elif name is 'sigmoid':
x = torch.sigmoid(x)
elif name is 'upsample':
x = F.upsample_nearest(x, scale_factor=param[0])
elif name is 'max_pool2d':
x = F.max_pool2d(x, param[0], param[1], param[2])
elif name is 'avg_pool2d':
x = F.avg_pool2d(x, param[0], param[1], param[2])
else:
raise NotImplementedError
# make sure variable is used properly
assert idx == len(vars)
assert bn_idx == len(self.vars_bn)
return x, feature
def zero_grad(self, vars=None):
"""
:param vars:
:return:
"""
with torch.no_grad():
if vars is None:
for p in self.vars:
if p.grad is not None:
p.grad.zero_()
else:
for p in vars:
if p.grad is not None:
p.grad.zero_()
def parameters(self):
"""
override this function since initial parameters will return with a generator.
:return:
"""
return self.vars
| 7,789 | 34.733945 | 111 |
py
|
MICCAI21_MMQ
|
MICCAI21_MMQ-main/mmq_maml/VQA_RAD.py
|
import os
import torch
from torch.utils.data import Dataset
from torchvision.transforms import transforms
import numpy as np
from PIL import Image
import random
class VQARAD_maml(Dataset):
"""
NOTICE: meta-learning is different from general supervised learning, especially the concept of batch and set.
batch: contains several sets
sets: conains n_way * k_shot for meta-train set, n_way * n_query for meta-test set.
"""
def __init__(self, root, mode, batchsz, n_way, k_shot, k_query, resize, startidx=0, unlabel = False, t = 0):
"""
:param root: root path of mini-imagenet
:param mode: train, val or test
:param batchsz: batch size of sets, not batch of imgs
:param n_way:
:param k_shot:
:param k_query: num of qeruy imgs per class
:param resize: resize to
:param startidx: start to index label from startidx
"""
self.batchsz = batchsz # batch of set, not batch of imgs
self.n_way = n_way # n-way
self.k_shot = k_shot # k-shot
self.k_query = k_query # for evaluation
self.setsz = self.n_way * self.k_shot # num of samples per set
self.querysz = self.n_way * self.k_query # number of samples per set for evaluation
self.resize = resize # resize to
self.startidx = startidx # index label not from 0, but from startidx
self.unlabel = unlabel
print('shuffle DB :%s, b:%d, %d-way, %d-shot, %d-query, resize:%d' % (
mode, batchsz, n_way, k_shot, k_query, resize))
self.transform = transforms.Compose([lambda x: Image.open(x).convert('L'),
transforms.Resize(self.resize),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(5),
transforms.ToTensor(),
lambda x: x / 255.,
lambda x: x.unsqueeze(0)
])
if mode == 'train':
self.path = os.path.join(root, 't%d'%(t), mode)
else:
self.path = os.path.join(root, mode)
print('--loading data from:', self.path)
data = self.loadData(self.path)# image path
self.data = []
self.img2label = {}
self.label2class = []
for i, (k, v) in enumerate(data.items()):
self.data.append(v) # [[img1, img2, ...], [img111, ...]]
self.img2label[k] = i + self.startidx # {"img_name[:9]":label}
self.label2class.append(k)
self.img2label['x'] = i + 1 + self.startidx
self.cls_num = len(self.data)
if self.unlabel:
self.path_unlabel = os.path.join(root, 't%d'%(t), mode + '_unlabel')
self.data_unlabel = [os.path.join(self.path_unlabel, file) for file in os.listdir(self.path_unlabel)]
self.create_batch_unlabel(self.batchsz)
else:
self.create_batch(self.batchsz)
def loadData(self, path):
"""
return a dict saving the information of csv
:param splitFile: csv file name
:return: {label:[file1, file2 ...]}
"""
dictLabels = {}
for (dirpath, dirnames, _) in os.walk(path):
for dir in dirnames:
dictLabels[dir] = [os.path.join(dir, file) for file in os.listdir(os.path.join(dirpath, dir))]
return dictLabels
def create_other(self, selected_cls):
other_pool = []
for idx, i in enumerate(self.data):
if idx not in selected_cls:
other_pool += i
other_pool = [i + 'x' for i in other_pool]
return other_pool
def create_batch(self, batchsz):
"""
create batch for meta-learning.
×episode× here means batch, and it means how many sets we want to retain.
:param batchsz: batch size
:return:
"""
self.support_x_batch = [] # support set batch
self.query_x_batch = [] # query set batch
for b in range(batchsz): # for each batch
# 1.select n_way classes randomly
selected_cls = np.random.choice(self.cls_num, self.n_way - 1, False) # no duplicate
np.random.shuffle(selected_cls)
support_x = []
query_x = []
for cls in selected_cls:
# 2. select k_shot + k_query for each class
try:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot + self.k_query, False)
except:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot + self.k_query, True)
np.random.shuffle(selected_imgs_idx)
indexDtrain = np.array(selected_imgs_idx[:self.k_shot]) # idx for Dtrain
indexDtest = np.array(selected_imgs_idx[self.k_shot:]) # idx for Dtest
support_x.append(
np.array(self.data[cls])[indexDtrain].tolist()) # get all images filename for current Dtrain
query_x.append(np.array(self.data[cls])[indexDtest].tolist())
other_images = self.create_other(selected_cls)
try:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot + self.k_query, False)
except:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot + self.k_query, True)
np.random.shuffle(selected_imgs_idx_other)
indexDtrain_other = np.array(selected_imgs_idx_other[:self.k_shot]) # idx for Dtrain
indexDtest_other = np.array(selected_imgs_idx_other[self.k_shot:])
support_x.append(
np.array(other_images)[indexDtrain_other].tolist()) # get all images filename for current Dtrain
query_x.append(np.array(other_images)[indexDtest_other].tolist())
# shuffle the correponding relation between support set and query set
random.shuffle(support_x)
random.shuffle(query_x)
self.support_x_batch.append(support_x) # append set to current sets
self.query_x_batch.append(query_x) # append sets to current sets
def create_batch_unlabel(self, batchsz):
# Data Refinement for unlabel data, See Algorithm 1 of the paper
self.support_x_batch = [] # support set batch
self.query_x_batch = [] # query set batch
for b in range(batchsz): # for each batch
# 1.select n_way classes randomly
selected_cls = np.random.choice(self.cls_num, self.n_way - 1, False) # no duplicate
np.random.shuffle(selected_cls)
support_x = []
query_x = []
for cls in selected_cls:
# 2. select k_shot + k_query for each class
try:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot, False)
except:
selected_imgs_idx = np.random.choice(len(self.data[cls]), self.k_shot, True)
np.random.shuffle(selected_imgs_idx)
indexDtrain = np.array(selected_imgs_idx) # idx for Dtrain
support_x.append(
np.array(self.data[cls])[indexDtrain].tolist()) # get all images filename for current Dtrain
selected_imgs_idx_test = np.random.choice(len(self.data_unlabel), self.k_query, False)
indexDtest = np.array(selected_imgs_idx_test) # idx for Dtest
query_x.append(np.array(self.data_unlabel)[indexDtest].tolist())
other_images = self.create_other(selected_cls)
try:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot, False)
except:
selected_imgs_idx_other = np.random.choice(len(other_images), self.k_shot, True)
np.random.shuffle(selected_imgs_idx_other)
indexDtrain_other = np.array(selected_imgs_idx_other)
support_x.append(
np.array(other_images)[indexDtrain_other].tolist())
selected_imgs_idx_test_other = np.random.choice(len(self.data_unlabel), self.k_query, False)
indexDtest_other = np.array(selected_imgs_idx_test_other) # idx for Dtest
query_x.append(np.array(self.data_unlabel)[indexDtest_other].tolist())
# shuffle the correponding relation between support set and query set
random.shuffle(support_x)
random.shuffle(query_x)
self.support_x_batch.append(support_x) # append set to current sets
self.query_x_batch.append(query_x) # append sets to current sets
def __getitem__(self, index):
"""
index means index of sets, 0<= index <= batchsz-1
:param index:
:return:
"""
# [setsz, 3, resize, resize]
support_x = torch.FloatTensor(self.setsz, 1, self.resize, self.resize)
# [setsz]
support_y = np.zeros((self.setsz), dtype=np.int)
# [querysz, 3, resize, resize]
query_x = torch.FloatTensor(self.querysz, 1, self.resize, self.resize)
# [querysz]
flatten_support_x = [os.path.join(self.path, item if item[-1] != "x" else item[:-1])
for sublist in self.support_x_batch[index] for item in sublist]
support_y = np.array(
[self.img2label[item.split('/')[0] if item[-1] != "x" else "x"] # filename:n0153282900000005.jpg, the first 9 characters treated as label
for sublist in self.support_x_batch[index] for item in sublist]).astype(np.int32)
if not self.unlabel:
flatten_query_x = [os.path.join(self.path, item if item[-1] != "x" else item[:-1])
for sublist in self.query_x_batch[index] for item in sublist]
else:
flatten_query_x = [item for sublist in self.query_x_batch[index] for item in sublist]
# print('global:', support_y, query_y)
# support_y: [setsz]
# query_y: [querysz]
# unique: [n-way], sorted
unique = np.unique(support_y)
random.shuffle(unique)
# relative means the label ranges from 0 to n-way
support_y_relative = np.zeros(self.setsz)
for idx, l in enumerate(unique):
support_y_relative[support_y == l] = idx
# print('relative:', support_y_relative, query_y_relative)
for i, path in enumerate(flatten_support_x):
support_x[i] = self.transform(path)
for i, path in enumerate(flatten_query_x):
query_x[i] = self.transform(path)
# print(support_set_y)
# return support_x, torch.LongTensor(support_y), query_x, torch.LongTensor(query_y)
if not self.unlabel:
query_y_relative = np.zeros(self.querysz)
query_y = np.zeros((self.querysz), dtype=np.int)
query_y = np.array([self.img2label[item.split('/')[0] if item[-1] != "x" else "x"]
for sublist in self.query_x_batch[index] for item in sublist]).astype(np.int32)
for idx, l in enumerate(unique):
query_y_relative[query_y == l] = idx
return support_x, torch.LongTensor(support_y_relative), query_x, torch.LongTensor(query_y_relative), torch.LongTensor(query_y), flatten_query_x
return support_x, torch.LongTensor(support_y_relative), query_x, torch.LongTensor(unique), flatten_query_x
def __len__(self):
# as we have built up to batchsz of sets, you can sample some small batch size of sets.
return self.batchsz
| 11,779 | 46.692308 | 155 |
py
|
MICCAI21_MMQ
|
MICCAI21_MMQ-main/mmq_maml/pathVQA_maml_train.py
|
import torch, os
import numpy as np
from pathVQA_maml import PathVQA_maml
import scipy.stats
from torch.utils.data import DataLoader
import argparse
import time
from meta import Meta
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
# add other class (n_way + 1)
args.n_way += 1
print(args)
# hidden layer dimension config
if args.imgsz == 84:
dim_hidden_linear = 32 * 5 * 5
elif args.imgsz == 128:
dim_hidden_linear = 32 * 11 * 11
# model config
config = [
('conv2d', [32, 3, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 1, 0]),
('flatten', []),
('linear', [args.n_way, dim_hidden_linear])
]
# initial MAML model
device = torch.device('cuda')
maml = Meta(args, config).to(device)
tmp = filter(lambda x: x.requires_grad, maml.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
print(maml)
print('Total trainable tensors:', num)
# Load dataset
# batchsz here means total episode number
mini = PathVQA_maml(args.data, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=10000, resize=args.imgsz, t = args.t_dst)
mini_test = PathVQA_maml(args.data, mode='test', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz)
# Train model
for epoch in range(args.epoch//10000):
# fetch meta_batchsz num of episode each time
db = DataLoader(mini, args.task_num, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
for step, (x_spt, y_spt, x_qry, y_qry, _, _) in enumerate(db):
x_spt, y_spt, x_qry, y_qry = x_spt.to(device), y_spt.to(device), x_qry.to(device), y_qry.to(device)
accs = maml(x_spt, y_spt, x_qry, y_qry)
if step % 50 == 0:
end = time.time()
print('step:', step, '\ttraining acc:', accs, '\ttime:', end - start)
start = time.time()
if (step % 1000 == 0 and step != 0) or (step == len(db) - 1): # evaluation
db_test = DataLoader(mini_test, 1, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
accs_all_test = []
for x_spt, y_spt, x_qry, y_qry, _, _ in db_test:
x_spt, y_spt, x_qry, y_qry = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device)
accs = maml.finetunning(x_spt, y_spt, x_qry, y_qry)
accs_all_test.append(accs)
# [b, update_step+1]
accs = np.array(accs_all_test).mean(axis=0).astype(np.float16)
end = time.time()
print('***************** Test acc:', accs, '\ttime:', end - start)
start = time.time()
# Save model
model_dir = args.output + '/maml%d_miccai2021_optimization_newmethod_%dway_%dshot_t%d'%(args.imgsz, args.n_way, args.k_spt, args.t_dst)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_path = os.path.join(model_dir, 'model_epoch%d.pth' % epoch)
print('saving model to:', model_dir)
torch.save(maml.state_dict(), model_path)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=10000)
argparser.add_argument('--n_way', type=int, help='n way', default=5)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=5)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=15)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84)
argparser.add_argument('--imgc', type=int, help='imgc', default=3)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=4)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--output', type=str, help='output directory for saving models', default='saved_models')
argparser.add_argument('--data', type=str, help='data directory', default='data/pathvqa_maml/')
argparser.add_argument('--t_dst', type=int, help='t-th step', default=0)
args = argparser.parse_args()
main(args)
| 5,532 | 40.291045 | 143 |
py
|
MICCAI21_MMQ
|
MICCAI21_MMQ-main/mmq_maml/pathVQA_maml_fuse.py
|
import torch, os
import numpy as np
from pathVQA_maml import PathVQA_maml
import scipy.stats
from torch.utils.data import DataLoader
import argparse
import time
from meta import Meta
import pickle as p
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def num_tensors(model):
tmp = filter(lambda x: x.requires_grad, model.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
return num
def feature_extraction(maml, db_test, device):
for i in range(args.nums_t):
model_dir = args.input + '/maml%d_miccai2021_optimization_newmethod_%dway_%dshot_t%d' % (args.imgsz, args.n_way, args.k_spt, i)
model_path = os.path.join(model_dir, 'model_epoch%d.pth' % args.epoch)
print('-------load model weight from:', model_path)
maml.load_state_dict(torch.load(model_path))
start = time.time()
logit_softmax_list = []
feature_list = []
path_list = []
for x_spt, y_spt, x_qry, y_qry, _, flatten_query_x in db_test:
x_spt, y_spt, x_qry, y_qry = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device)
logit_softmax, feature = maml.feature_validation(x_spt, y_spt, x_qry, y_qry)
logit_softmax_list += logit_softmax
feature_list += list(feature.cpu())
path_list += flatten_query_x
if not os.path.exists(args.feature_output):
os.makedirs(args.feature_output)
p.dump(feature_list, open(args.feature_output + '/feature_t%d.pkl'%(i), 'wb'))
p.dump(logit_softmax_list, open(args.feature_output + '/logit_softmax_t%d.pkl'%(i), 'wb'))
p.dump(path_list, open(args.feature_output + '/path_t%d.pkl'%(i), 'wb'))
# [b, update_step+1]
end = time.time()
print('***************** Time:', end - start)
def load_feature_and_logit(args):
features = []
logits = []
for t in range(args.nums_t):
features.append(torch.stack(p.load(open(args.feature_output + '/feature_t%d.pkl' % (t), 'rb'))))
logits.append(p.load(open(args.feature_output + '/logit_softmax_t%d.pkl' % (t), 'rb')))
features = torch.stack(features).cpu().transpose(0, 1)
logits = torch.tensor(logits).transpose(0, 1)
return features, logits
def fuse_score(features, logits, alpha=0.8):
"""
Compute fuse score using equation: S_F = \gamma S_P + (1-\gamma)\sum_{t=1}^m 1 - Cosine \left(F_{c}, F_t\right) \forall F_{c} \neq F_t
For more detail, please visit Algorithm 2 of the paper.
:param features:
:param logits:
:param alpha:
:return results: score list, type:
"""
cos = torch.nn.CosineSimilarity(dim=0)
results = []
for idx in range(len(features)):
if idx%100 == 0:
print('%d / %d'%(idx, len(features)))
feature_sample = features[idx]
logit_sample = logits[idx]
sample_results = []
for i in range(len(feature_sample)):
row_results = []
for j in range(len(feature_sample)):
if i != j:
sim = cos(feature_sample[i], feature_sample[j])
div = 1 - sim
row_results.append(div)
row_results = sum(row_results)
fuse_score = (alpha * logit_sample[i]) + ((1 - alpha) * row_results)
sample_results.append(fuse_score)
results.append(sample_results)
return results
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
# add other class (n_way + 1)
args.n_way += 1
print(args)
# hidden layer dimension config
if args.imgsz == 84:
dim_hidden_linear = 32 * 5 * 5
elif args.imgsz == 128:
dim_hidden_linear = 32 * 11 * 11
# model config
config = [
('conv2d', [32, 3, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 1, 0]),
('flatten', []),
('linear', [args.n_way, dim_hidden_linear])
]
# initial MAML model
device = torch.device('cuda')
maml = Meta(args, config).to(device)
print(maml)
print('Total trainable tensors:', num_tensors(maml))
# Load validation dataset
# batchsz here means total episode number
mini_test = PathVQA_maml(args.data, mode='val', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz)
db_test = DataLoader(mini_test, 1, shuffle=False, num_workers=1, pin_memory=True)
# Extract validation features, logits for each model and save them
feature_extraction(maml, db_test, device)
# Load all features and logits
features, logits = load_feature_and_logit(args)
# Compute fuse score
results = fuse_score(features, logits)
# Show results to select suitable models
results = torch.tensor(results).mean(dim=0)
print(results)
print('------- sort', torch.sort(results, descending=True))
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=0)
argparser.add_argument('--n_way', type=int, help='n way', default=5)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=5)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=15)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84)
argparser.add_argument('--imgc', type=int, help='imgc', default=3)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=4)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--input', type=str, help='input directory for saving models', default='saved_models')
argparser.add_argument('--data', type=str, help='data directory', default='data/pathvqa_maml/')
argparser.add_argument('--nums_t', type=int, help='num models', default=6) # m refinement models + first model (5 + 1 = 6)
argparser.add_argument('--feature_output', type=str, help='input directory for saving feature', default='features')
args = argparser.parse_args()
main(args)
| 7,202 | 39.926136 | 141 |
py
|
MICCAI21_MMQ
|
MICCAI21_MMQ-main/mmq_maml/VQA_RAD_half.py
|
import torch, os
import numpy as np
from VQA_RAD import VQARAD_maml
import scipy.stats
from torch.utils.data import DataLoader
import argparse
import time
from meta import Meta
import shutil
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def unlabel_processing(dataset, unlabel_pool, threshold_fre = 47, threshold_max = 15):
"""
Auto annotation for samples collection.
:param dataset:
:param unlabel_pool:
:param threshold_fre:
:param threshold_max:
:return
"""
count = 0
new_path = dataset.path + '_unlabel_add'
for i in unlabel_pool:
if len(unlabel_pool[i]) > threshold_fre:
unique = set(unlabel_pool[i])
ele_count = {}
for j in unique:
ele_count[j] = unlabel_pool[i].count(j)
max_key = max(ele_count, key=ele_count.get)
max_value = ele_count[max_key]
all_values = list(ele_count.values())
if all_values.count(max_value) == 1 and max_value > threshold_max:
label = int(max_key)
if label != 9:
count += 1
class_name = dataset.label2class[label]
dst_dir = os.path.join(new_path, class_name)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
dst = os.path.join(dst_dir, i.split('/')[-1])
os.rename(i, dst)
print('The number of additional unlabeled images:', count)
return count
def label_processing(dataset, label_pool, threshold=0.3):
"""
Auto removable for samples that have high uncertanty.
:param dataset:
:param label_pool:
:param threshold:
:return count:
"""
count = 0
new_path = dataset.path + '_label_remove'
if not os.path.exists(new_path):
os.makedirs(new_path)
for i in label_pool:
if max(label_pool[i]) < threshold:
count+=1
dst = os.path.join(new_path, i.split('/')[-1])
os.rename(i, dst)
print('The number of removed labeled images:', count)
return count
def final_processing(dataset):
"""
Refine meta-data and unlabel data.
:param dataset:
:return
"""
root = dataset.path
src = root + '_unlabel_add'
list_dirs = os.listdir(src)
for dir in list_dirs:
files = os.listdir(os.path.join(src, dir))
for file in files:
shutil.move(os.path.join(src, dir, file), os.path.join(root, dir))
shutil.rmtree(src)
src = root + '_label_remove'
list_files = os.listdir(src)
for i in list_files:
shutil.move(os.path.join(src, i), root + '_unlabel')
shutil.rmtree(src)
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
# add other class (n_way + 1)
args.n_way += 1
print(args)
# hidden layer dimension config
if args.imgsz == 84:
dim_hidden_linear = 64 * 8 * 8
elif args.imgsz == 128:
dim_hidden_linear = 32 * 11 * 11
# model config
config = [
('conv2d', [64, 1, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 2, 2, 1, 0]),
('relu', [True]),
('bn', [64]),
('flatten', []),
('linear', [args.n_way, dim_hidden_linear])
]
# initial model
device = torch.device('cuda')
maml = Meta(args, config).to(device)
model_dir = args.input + '/VQARAD_maml%d_miccai2021_newmethod_%dway_%dshot_t%d' % (args.imgsz, args.n_way, args.k_spt, args.t_dst - 1)
model_path = os.path.join(model_dir, 'model_epoch%d.pth' % args.epoch)
print('-------load model weight from:', model_path)
maml.load_state_dict(torch.load(model_path))
tmp = filter(lambda x: x.requires_grad, maml.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
print(maml)
print('Total trainable tensors:', num)
# source and destination directory
t_dst = args.t_dst
src = os.path.join(args.data, 't%d'%(t_dst - 1))
dst = os.path.join(args.data, 't%d'%(t_dst))
# copy original dataset to the destination folder
shutil.copytree(src, dst)
####################################################################
## PROCESS UNLABEL DATA
####################################################################
# batchsz here means total episode number
mini_test_unlabel = VQARAD_maml(args.data, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz, unlabel=True, t=t_dst)
db_test_unlabel = DataLoader(mini_test_unlabel, 1, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
unlabel_pool = {}
for x_spt, y_spt, x_qry, y_spt_real, flatten_query_x in db_test_unlabel:
x_spt, y_spt, x_qry, y_spt_real = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_spt_real.squeeze(0).to(device)
results = maml.unlabel_pooling(x_spt, y_spt, x_qry, y_spt_real, flatten_query_x)
for i in results:
if i not in unlabel_pool.keys():
unlabel_pool[i] = []
unlabel_pool[i] += results[i]
end = time.time()
print('***************** time:', end - start)
unlabel_processing(mini_test_unlabel, unlabel_pool)
####################################################################
## PROCESS LABEL DATA
####################################################################
# batchsz here means total episode number
mini_test_label = VQARAD_maml(args.data, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz, unlabel=False, t = t_dst)
db_test_label = DataLoader(mini_test_label, 1, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
label_pool = {}
for x_spt, y_spt, x_qry, y_qry, y_qry_real, flatten_query_x in db_test_label:
x_spt, y_spt, x_qry, y_qry, y_qry_real = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device), y_qry_real.squeeze(0).to(device)
results = maml.label_pooling(x_spt, y_spt, x_qry, y_qry, y_qry_real, flatten_query_x)
for i in results:
if i not in label_pool.keys():
label_pool[i] = []
label_pool[i] += results[i]
end = time.time()
print('***************** time:', end - start)
label_processing(mini_test_label, label_pool)
####################################################################
## CREATE NEW LABEL AND UNLABEL DATASET and REMOVE TEMP FOLDERS
####################################################################
final_processing(mini_test_label)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=0)
argparser.add_argument('--n_way', type=int, help='n way', default=3)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=3)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=3)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84)
argparser.add_argument('--imgc', type=int, help='imgc', default=1)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=5)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--input', type=str, help='input directory for saving models', default='saved_models')
argparser.add_argument('--data', type=str, help='data directory', default='data/vqarad_maml/')
argparser.add_argument('--t_dst', type=int, help='t-th step', default=1)
args = argparser.parse_args()
main(args)
| 8,795 | 38.621622 | 138 |
py
|
MICCAI21_MMQ
|
MICCAI21_MMQ-main/mmq_maml/meta.py
|
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch import optim
import numpy as np
from learner import Learner
from copy import deepcopy
class Meta(nn.Module):
"""
Meta Learner
"""
def __init__(self, args, config):
"""
:param args:
"""
super(Meta, self).__init__()
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.n_way = args.n_way
self.k_spt = args.k_spt
self.k_qry = args.k_qry
self.task_num = args.task_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.net = Learner(config, args.imgc, args.imgsz)
self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)
def clip_grad_by_norm_(self, grad, max_norm):
"""
in-place gradient clipping.
:param grad: list of gradients
:param max_norm: maximum norm allowable
:return:
"""
total_norm = 0
counter = 0
for g in grad:
param_norm = g.data.norm(2)
total_norm += param_norm.item() ** 2
counter += 1
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grad:
g.data.mul_(clip_coef)
return total_norm/counter
def forward(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [b, setsz, c_, h, w]
:param y_spt: [b, setsz]
:param x_qry: [b, querysz, c_, h, w]
:param y_qry: [b, querysz]
:return:
"""
task_num, setsz, c_, h, w = x_spt.size()
querysz = x_qry.size(1)
losses_q = [0 for _ in range(self.update_step + 1)] # losses_q[i] is the loss on step i
corrects = [0 for _ in range(self.update_step + 1)]
for i in range(task_num):
# 1. run the i-th task and compute loss for k=0
logits, _ = self.net(x_spt[i], vars=None, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
grad = torch.autograd.grad(loss, self.net.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, self.net.parameters())))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q, _ = self.net(x_qry[i], self.net.parameters(), bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[0] += loss_q
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[0] = corrects[0] + correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q, _ = self.net(x_qry[i], fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[1] += loss_q
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step):
# 1. run the i-th task and compute loss for k=1~K-1
logits, _ = self.net(x_spt[i], fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q, _ = self.net(x_qry[i], fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[k + 1] += loss_q
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item() # convert to numpy
corrects[k + 1] = corrects[k + 1] + correct
# end of all tasks
# sum over all losses on query set across all tasks
loss_q = losses_q[-1] / task_num
# optimize theta parameters
self.meta_optim.zero_grad()
loss_q.backward()
# print('meta update')
# for p in self.net.parameters()[:5]:
# print(torch.norm(p).item())
self.meta_optim.step()
accs = np.array(corrects) / (querysz * task_num)
return accs
def finetunning(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [setsz, c_, h, w]
:param y_spt: [setsz]
:param x_qry: [querysz, c_, h, w]
:param y_qry: [querysz]
:return:
"""
assert len(x_spt.shape) == 4
querysz = x_qry.size(0)
corrects = [0 for _ in range(self.update_step_test + 1)]
# in order to not ruin the state of running_mean/variance and bn_weight/bias
# we finetunning on the copied model instead of self.net
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits, _ = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q, _ = net(x_qry, net.parameters(), bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
correct = torch.eq(pred_q, y_qry).sum().item()
corrects[0] = corrects[0] + correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q, _ = net(x_qry, fast_weights, bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
correct = torch.eq(pred_q, y_qry).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits, _ = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q, _ = net(x_qry, fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry)
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry).sum().item() # convert to numpy
corrects[k + 1] = corrects[k + 1] + correct
del net
accs = np.array(corrects) / querysz
return accs
def unlabel_pooling(self, x_spt, y_spt, x_qry, y_spt_real, flatten_query_x):
"""
:param x_spt: [setsz, c_, h, w]
:param y_spt: [setsz]
:param x_qry: [querysz, c_, h, w]
:param y_qry: [querysz]
:return:
"""
assert len(x_spt.shape) == 4
querysz = x_qry.size(0)
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits, _ = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits, _ = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
with torch.no_grad():
logits_q, _ = net(x_qry, fast_weights, bn_training=True)
logits_q_softmax = F.softmax(logits_q, dim=1)
pred_q = logits_q_softmax.argmax(dim=1)
del net
# TODO - IGNORE
results = dict()
for i, path in enumerate(flatten_query_x):
# results[path[0]] = [y_spt_real[pred_q[i]].item(), logits_q_softmax[i][pred_q[i]].item()]
if path[0] not in results.keys():
results[path[0]] = []
results[path[0]] += [y_spt_real[pred_q[i]].item()]
return results
def label_pooling(self, x_spt, y_spt, x_qry, y_qry, y_qry_real, flatten_query_x):
"""
:param x_spt: [setsz, c_, h, w]
:param y_spt: [setsz]
:param x_qry: [querysz, c_, h, w]
:param y_qry: [querysz]
:return:
"""
assert len(x_spt.shape) == 4
querysz = x_qry.size(0)
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits, _ = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits, _ = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
with torch.no_grad():
logits_q, _ = net(x_qry, fast_weights, bn_training=True)
logits_q_softmax = F.softmax(logits_q, dim=1)
pred_q = logits_q_softmax.argmax(dim=1)
del net
results = dict()
# TODO-IGNORE
for i, path in enumerate(flatten_query_x):
if y_qry_real[i] == 20:
continue
if path[0] not in results.keys():
results[path[0]] = []
results[path[0]].append(logits_q_softmax[i][y_qry[i]].item())
return results
def feature_validation(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [setsz, c_, h, w]
:param y_spt: [setsz]
:param x_qry: [querysz, c_, h, w]
:param y_qry: [querysz]
:return:
"""
assert len(x_spt.shape) == 4
querysz = x_qry.size(0)
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits, _ = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits, _ = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
with torch.no_grad():
logits_q, feature = net(x_qry, fast_weights, bn_training=True)
logits_q_softmax = F.softmax(logits_q, dim=1)
# pred_q = logits_q_softmax.argmax(dim=1)
del net
# TODO-IGNORE
results = []
for i in range(len(x_qry)):
results.append(logits_q_softmax[i][y_qry[i]].item())
return results, feature
def main():
pass
if __name__ == '__main__':
main()
| 12,796 | 34.350829 | 110 |
py
|
MICCAI21_MMQ
|
MICCAI21_MMQ-main/mmq_maml/pathVQA_maml_half.py
|
import torch, os
import numpy as np
from pathVQA_maml import PathVQA_maml
import scipy.stats
from torch.utils.data import DataLoader
import argparse
import time
from meta import Meta
import shutil
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def unlabel_processing(dataset, unlabel_pool, threshold_fre = 27, threshold_max = 9):
"""
Auto annotation for samples collection.
:param dataset:
:param unlabel_pool:
:param threshold_fre:
:param threshold_max:
:return count:
"""
count = 0
new_path = dataset.path + '_unlabel_add'
for i in unlabel_pool:
if len(unlabel_pool[i]) > threshold_fre:
unique = set(unlabel_pool[i])
ele_count = {}
for j in unique:
ele_count[j] = unlabel_pool[i].count(j)
max_key = max(ele_count, key=ele_count.get)
max_value = ele_count[max_key]
all_values = list(ele_count.values())
if all_values.count(max_value) == 1 and max_value > threshold_max:
label = int(max_key)
if label != 20:
count += 1
class_name = dataset.label2class[label]
dst_dir = os.path.join(new_path, class_name)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
dst = os.path.join(dst_dir, i.split('/')[-1])
os.rename(i, dst)
print('The number of additional unlabeled images:', count)
return count
def label_processing(dataset, label_pool, threshold=0.3):
"""
Auto removable for samples that have high uncertanty.
:param dataset:
:param label_pool:
:param threshold:
:return count:
"""
count = 0
new_path = dataset.path + '_label_remove'
if not os.path.exists(new_path):
os.makedirs(new_path)
for i in label_pool:
if max(label_pool[i]) < threshold:
count+=1
dst = os.path.join(new_path, i.split('/')[-1])
os.rename(i, dst)
print('The number of removed labeled images:', count)
return count
def final_processing(dataset):
"""
Refine meta-data and unlabel data.
:param dataset:
:return
"""
root = dataset.path
src = root + '_unlabel_add'
list_dirs = os.listdir(src)
for dir in list_dirs:
files = os.listdir(os.path.join(src, dir))
for file in files:
shutil.move(os.path.join(src, dir, file), os.path.join(root, dir))
shutil.rmtree(src)
src = root + '_label_remove'
list_files = os.listdir(src)
for i in list_files:
shutil.move(os.path.join(src, i), root + '_unlabel')
shutil.rmtree(src)
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
# add other class (n_way + 1)
args.n_way += 1
print(args)
# hidden layer dimension config
if args.imgsz == 84:
dim_hidden_linear = 32 * 5 * 5
elif args.imgsz == 128:
dim_hidden_linear = 32 * 11 * 11
# model config
config = [
('conv2d', [32, 3, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 1, 0]),
('flatten', []),
('linear', [args.n_way, dim_hidden_linear])
]
# initial MAML model
device = torch.device('cuda')
maml = Meta(args, config).to(device)
model_dir = args.input + '/maml%d_miccai2021_optimization_newmethod_%dway_%dshot_t%d' % (args.imgsz, args.n_way, args.k_spt, args.t_dst - 1)
model_path = os.path.join(model_dir, 'model_epoch%d.pth' % args.epoch)
print('-------load model weight from:', model_path)
maml.load_state_dict(torch.load(model_path))
tmp = filter(lambda x: x.requires_grad, maml.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
print(maml)
print('Total trainable tensors:', num)
# source and destination directory
t_dst = args.t_dst
src = os.path.join(args.data, 't%d'%(t_dst - 1))
dst = os.path.join(args.data, 't%d'%(t_dst))
# Copy original dataset to the destination folder
shutil.copytree(src, dst)
####################################################################
## PROCESS UNLABEL DATA
####################################################################
# batchsz here means total episode number
mini_test_unlabel = PathVQA_maml(args.data, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz, unlabel=True, t=t_dst)
db_test_unlabel = DataLoader(mini_test_unlabel, 1, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
unlabel_pool = {}
for x_spt, y_spt, x_qry, y_spt_real, flatten_query_x in db_test_unlabel:
x_spt, y_spt, x_qry, y_spt_real = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_spt_real.squeeze(0).to(device)
results = maml.unlabel_pooling(x_spt, y_spt, x_qry, y_spt_real, flatten_query_x)
for i in results:
if i not in unlabel_pool.keys():
unlabel_pool[i] = []
unlabel_pool[i] += results[i]
end = time.time()
print('***************** time:', end - start)
unlabel_processing(mini_test_unlabel, unlabel_pool)
####################################################################
## PROCESS LABEL DATA
####################################################################
# batchsz here means total episode number
mini_test_label = PathVQA_maml(args.data, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=600, resize=args.imgsz, unlabel=False, t = t_dst)
db_test_label = DataLoader(mini_test_label, 1, shuffle=True, num_workers=1, pin_memory=True)
start = time.time()
label_pool = {}
for x_spt, y_spt, x_qry, y_qry, y_qry_real, flatten_query_x in db_test_label:
x_spt, y_spt, x_qry, y_qry, y_qry_real = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device), y_qry_real.squeeze(0).to(device)
results = maml.label_pooling(x_spt, y_spt, x_qry, y_qry, y_qry_real, flatten_query_x)
for i in results:
if i not in label_pool.keys():
label_pool[i] = []
label_pool[i] += results[i]
end = time.time()
print('***************** time:', end - start)
label_processing(mini_test_label, label_pool)
####################################################################
## CREATE NEW LABEL AND UNLABEL DATASET and REMOVE TEMP FOLDERS
####################################################################
final_processing(mini_test_label)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=0)
argparser.add_argument('--n_way', type=int, help='n way', default=5)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=5)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=15)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84)
argparser.add_argument('--imgc', type=int, help='imgc', default=3)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=4)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--input', type=str, help='input directory for saving models', default='saved_models')
argparser.add_argument('--data', type=str, help='data directory', default='data/pathvqa_maml/')
argparser.add_argument('--t_dst', type=int, help='t-th step', default=1)
args = argparser.parse_args()
main(args)
| 8,907 | 38.591111 | 144 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/test.py
|
"""General-purpose test script for image-to-image translation.
Once you have trained your model with train.py, you can use this script to test the model.
It will load a saved model from '--checkpoints_dir' and save the results to '--results_dir'.
It first creates model and dataset given the option. It will hard-code some parameters.
It then runs inference for '--num_test' images and save results to an HTML file.
Example (You need to train models first or download pre-trained models from our website):
Test a CycleGAN model (both sides):
python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Test a CycleGAN model (one side only):
python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
The option '--model test' is used for generating CycleGAN results only for one side.
This option will automatically set '--dataset_mode single', which only loads the images from one set.
On the contrary, using '--model cycle_gan' requires loading and generating results in both directions,
which is sometimes unnecessary. The results will be saved at ./results/.
Use '--results_dir <directory_path_to_save_result>' to specify the results directory.
Test a pix2pix model:
python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/test_options.py for more test options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import os
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from util.visualizer import save_images
from util import html
try:
import wandb
except ImportError:
print('Warning: wandb package cannot be found. The option "--use_wandb" will result in error.')
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 0
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
# initialize logger
if opt.use_wandb:
wandb_run = wandb.init(project=opt.wandb_project_name, name=opt.name, config=opt) if not wandb.run else wandb.run
wandb_run._label(repo='CycleGAN-and-pix2pix')
# create a website
web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory
if opt.load_iter > 0: # load_iter is 0 by default
web_dir = '{:s}_iter{:d}'.format(web_dir, opt.load_iter)
print('creating web directory', web_dir)
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
# test with eval mode. This only affects layers like batchnorm and dropout.
# For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
# For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
if opt.eval:
model.eval()
for i, data in enumerate(dataset):
if i >= opt.num_test: # only apply our model to opt.num_test images.
break
model.set_input(data) # unpack data from data loader
model.test() # run inference
visuals = model.get_current_visuals() # get image results
img_path = model.get_image_paths() # get image paths
if i % 5 == 0: # save images to an HTML file
print('processing (%04d)-th image... %s' % (i, img_path))
save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize, use_wandb=opt.use_wandb)
webpage.save() # save the HTML
| 4,545 | 55.123457 | 130 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/train.py
|
"""General-purpose training script for image-to-image translation.
This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and
different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).
You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').
It first creates model, dataset, and visualizer given the option.
It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models.
The script supports continue/resume training. Use '--continue_train' to resume your previous training.
Example:
Train a CycleGAN model:
python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Train a pix2pix model:
python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/train_options.py for more training options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import time
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import Visualizer
if __name__ == '__main__':
opt = TrainOptions().parse() # get training options
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
dataset_size = len(dataset) # get the number of images in the dataset.
print('The number of training images = %d' % dataset_size)
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
total_iters = 0 # the total number of training iterations
for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
model.update_learning_rate() # update learning rates in the beginning of every epoch.
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_iters += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data) # unpack data from dataset and apply preprocessing
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % opt.update_html_freq == 0
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
t_comp = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks('latest')
model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))
| 4,933 | 62.25641 | 186 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/options/train_options.py
|
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
"""This class includes training options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
# visdom and HTML visualization parameters
parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
# network saving and loading parameters
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
# training parameters
parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
self.isTrain = True
return parser
| 3,447 | 83.097561 | 210 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/options/base_options.py
|
import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
# wandb parameters
parser.add_argument('--use_wandb', action='store_true', help='if specified, then init wandb logging')
parser.add_argument('--wandb_project_name', type=str, default='CycleGAN-and-pix2pix', help='specify wandb project name')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 8,327 | 58.485714 | 235 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/options/__init__.py
|
"""This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
| 136 | 67.5 | 135 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/options/test_options.py
|
from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""This class includes test options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser) # define shared options
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
# rewrite devalue values
parser.set_defaults(model='test')
# To avoid cropping, the load_size should be the same as crop_size
parser.set_defaults(load_size=parser.get_default('crop_size'))
self.isTrain = False
return parser
| 1,158 | 47.291667 | 108 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/models/base_model.py
|
import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this function, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): define networks used in our training.
-- self.visual_names (str list): specify the images that you want to display and save.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
self.load_networks(load_suffix)
self.print_networks(opt.verbose)
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
old_lr = self.optimizers[0].param_groups[0]['lr']
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate %.7f -> %.7f' % (old_lr, lr))
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| 10,407 | 44.056277 | 260 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/models/colorization_model.py
|
from .pix2pix_model import Pix2PixModel
import torch
from skimage import color # used for lab2rgb
import numpy as np
class ColorizationModel(Pix2PixModel):
"""This is a subclass of Pix2PixModel for image colorization (black & white image -> colorful images).
The model training requires '-dataset_model colorization' dataset.
It trains a pix2pix model, mapping from L channel to ab channels in Lab color space.
By default, the colorization dataset will automatically set '--input_nc 1' and '--output_nc 2'.
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
By default, we use 'colorization' dataset for this model.
See the original pix2pix paper (https://arxiv.org/pdf/1611.07004.pdf) and colorization results (Figure 9 in the paper)
"""
Pix2PixModel.modify_commandline_options(parser, is_train)
parser.set_defaults(dataset_mode='colorization')
return parser
def __init__(self, opt):
"""Initialize the class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
For visualization, we set 'visual_names' as 'real_A' (input real image),
'real_B_rgb' (ground truth RGB image), and 'fake_B_rgb' (predicted RGB image)
We convert the Lab image 'real_B' (inherited from Pix2pixModel) to a RGB image 'real_B_rgb'.
we convert the Lab image 'fake_B' (inherited from Pix2pixModel) to a RGB image 'fake_B_rgb'.
"""
# reuse the pix2pix model
Pix2PixModel.__init__(self, opt)
# specify the images to be visualized.
self.visual_names = ['real_A', 'real_B_rgb', 'fake_B_rgb']
def lab2rgb(self, L, AB):
"""Convert an Lab tensor image to a RGB numpy output
Parameters:
L (1-channel tensor array): L channel images (range: [-1, 1], torch tensor array)
AB (2-channel tensor array): ab channel images (range: [-1, 1], torch tensor array)
Returns:
rgb (RGB numpy image): rgb output images (range: [0, 255], numpy array)
"""
AB2 = AB * 110.0
L2 = (L + 1.0) * 50.0
Lab = torch.cat([L2, AB2], dim=1)
Lab = Lab[0].data.cpu().float().numpy()
Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0))
rgb = color.lab2rgb(Lab) * 255
return rgb
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
self.real_B_rgb = self.lab2rgb(self.real_A, self.real_B)
self.fake_B_rgb = self.lab2rgb(self.real_A, self.fake_B)
| 3,013 | 42.681159 | 141 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/models/pix2pix_model.py
|
import torch
from .base_model import BaseModel
from . import networks
class Pix2PixModel(BaseModel):
""" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
The model training requires '--dataset_mode aligned' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
self.model_names = ['G', 'D']
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap images in domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG(self.real_A) # G(A)
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
# Fake; stop backprop to the generator by detaching fake_B
fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
pred_fake = self.netD(fake_AB.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
# Real
real_AB = torch.cat((self.real_A, self.real_B), 1)
pred_real = self.netD(real_AB)
self.loss_D_real = self.criterionGAN(pred_real, True)
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
pred_fake = self.netD(fake_AB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
# combine loss and calculate gradients
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # update G's weights
| 6,519 | 49.9375 | 162 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/models/networks.py
|
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
| 28,408 | 45.04376 | 167 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/models/template_model.py
|
"""Model class template
This module provides a template for users to implement custom models.
You can specify '--model template' to use this model.
The class name should be consistent with both the filename and its model option.
The filename should be <model>_dataset.py
The class name should be <Model>Dataset.py
It implements a simple image-to-image translation baseline based on regression loss.
Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
min_<netG> ||netG(data_A) - data_B||_1
You need to implement the following functions:
<modify_commandline_options>: Add model-specific options and rewrite default values for existing options.
<__init__>: Initialize this model class.
<set_input>: Unpack input data and perform data pre-processing.
<forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
<optimize_parameters>: Update network weights; it will be called in every training iteration.
"""
import torch
from .base_model import BaseModel
from . import networks
class TemplateModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new model-specific options and rewrite default values for existing options.
Parameters:
parser -- the option parser
is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
if is_train:
parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model.
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
"""
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
# specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
self.loss_names = ['loss_G']
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
self.visual_names = ['data_A', 'data_B', 'output']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
# you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
self.model_names = ['G']
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids)
if self.isTrain: # only defined during training time
# define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
# We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
self.criterionLoss = torch.nn.L1Loss()
# define and initialize optimizers. You can define one optimizer for each network.
# If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers = [self.optimizer]
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
AtoB = self.opt.direction == 'AtoB' # use <direction> to swap data_A and data_B
self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A
self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B
self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths
def forward(self):
"""Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
self.output = self.netG(self.data_A) # generate output image given the input data_A
def backward(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# caculate the intermediate results if necessary; here self.output has been computed during function <forward>
# calculate loss given the input and intermediate results
self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression
self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
def optimize_parameters(self):
"""Update network weights; it will be called in every training iteration."""
self.forward() # first call forward to calculate intermediate results
self.optimizer.zero_grad() # clear network G's existing gradients
self.backward() # calculate gradients for network G
self.optimizer.step() # update gradients for network G
| 5,951 | 58.52 | 177 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/models/__init__.py
|
"""This package contains modules related to objective functions, optimizations, and network architectures.
To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
You need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate loss, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
In the function <__init__>, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): define networks used in our training.
-- self.visual_names (str list): specify the images that you want to display and save.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
Now you can use the model class by specifying flag '--model dummy'.
See our template model class 'template_model.py' for more details.
"""
import importlib
from models.base_model import BaseModel
def find_model_using_name(model_name):
"""Import the module "models/[model_name]_model.py".
In the file, the class called DatasetNameModel() will
be instantiated. It has to be a subclass of BaseModel,
and it is case-insensitive.
"""
model_filename = "models." + model_name + "_model"
modellib = importlib.import_module(model_filename)
model = None
target_model_name = model_name.replace('_', '') + 'model'
for name, cls in modellib.__dict__.items():
if name.lower() == target_model_name.lower() \
and issubclass(cls, BaseModel):
model = cls
if model is None:
print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
exit(0)
return model
def get_option_setter(model_name):
"""Return the static method <modify_commandline_options> of the model class."""
model_class = find_model_using_name(model_name)
return model_class.modify_commandline_options
def create_model(opt):
"""Create a model given the option.
This function warps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from models import create_model
>>> model = create_model(opt)
"""
model = find_model_using_name(opt.model)
instance = model(opt)
print("model [%s] was created" % type(instance).__name__)
return instance
| 3,072 | 44.191176 | 250 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/models/test_model.py
|
from .base_model import BaseModel
from . import networks
class TestModel(BaseModel):
""" This TesteModel can be used to generate CycleGAN results for only one direction.
This model will automatically set '--dataset_mode single', which only loads the images from one collection.
See the test instruction for more details.
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
The model can only be used during test time. It requires '--dataset_mode single'.
You need to specify the network using the option '--model_suffix'.
"""
assert not is_train, 'TestModel cannot be used during training time'
parser.set_defaults(dataset_mode='single')
parser.add_argument('--model_suffix', type=str, default='', help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will be loaded as the generator.')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
assert(not opt.isTrain)
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = []
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real', 'fake']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
self.model_names = ['G' + opt.model_suffix] # only generator is needed.
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
# assigns the model to self.netG_[suffix] so that it can be loaded
# please see <BaseModel.load_networks>
setattr(self, 'netG' + opt.model_suffix, self.netG) # store netG in self.
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
We need to use 'single_dataset' dataset mode. It only load images from one domain.
"""
self.real = input['A'].to(self.device)
self.image_paths = input['A_paths']
def forward(self):
"""Run forward pass."""
self.fake = self.netG(self.real) # G(real)
def optimize_parameters(self):
"""No optimization for test model."""
pass
| 3,151 | 44.028571 | 160 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/models/cycle_gan_model.py
|
import torch
import itertools
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
class CycleGANModel(BaseModel):
"""
This class implements the CycleGAN model, for learning image-to-image translation without paired data.
The model training requires '--dataset_mode unaligned' dataset.
By default, it uses a '--netG resnet_9blocks' ResNet generator,
a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
and a least-square GANs objective ('--gan_mode lsgan').
CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
A (source domain), B (target domain).
Generators: G_A: A -> B; G_B: B -> A.
Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
Dropout is not used in the original CycleGAN paper.
"""
parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
return parser
def __init__(self, opt):
"""Initialize the CycleGAN class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
visual_names_A = ['real_A', 'fake_B', 'rec_A']
visual_names_B = ['real_B', 'fake_A', 'rec_B']
if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
visual_names_A.append('idt_B')
visual_names_B.append('idt_A')
self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
else: # during test time, only load Gs
self.model_names = ['G_A', 'G_B']
# define networks (both Generators and discriminators)
# The naming is different from those used in the paper.
# Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define discriminators
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
assert(opt.input_nc == opt.output_nc)
self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG_A(self.real_A) # G_A(A)
self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
self.fake_A = self.netG_B(self.real_B) # G_B(B)
self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
def backward_D_basic(self, netD, real, fake):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network) -- the discriminator D
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
We also call loss_D.backward() to calculate the gradients.
"""
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
loss_D.backward()
return loss_D
def backward_D_A(self):
"""Calculate GAN loss for discriminator D_A"""
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
"""Calculate GAN loss for discriminator D_B"""
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
"""Calculate the loss for generators G_A and G_B"""
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed: ||G_A(B) - B||
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed: ||G_B(A) - A||
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss D_A(G_A(A))
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
# GAN loss D_B(G_B(B))
self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
# Forward cycle loss || G_B(G_A(A)) - A||
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# Backward cycle loss || G_A(G_B(B)) - B||
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# combined loss and calculate gradients
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
self.loss_G.backward()
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# forward
self.forward() # compute fake images and reconstruction images.
# G_A and G_B
self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
self.backward_G() # calculate gradients for G_A and G_B
self.optimizer_G.step() # update G_A and G_B's weights
# D_A and D_B
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
self.backward_D_A() # calculate gradients for D_A
self.backward_D_B() # calculate graidents for D_B
self.optimizer_D.step() # update D_A and D_B's weights
| 10,557 | 53.14359 | 362 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/util/image_pool.py
|
import random
import torch
class ImagePool():
"""This class implements an image buffer that stores previously generated images.
This buffer enables us to update discriminators using a history of generated images
rather than the ones produced by the latest generators.
"""
def __init__(self, pool_size):
"""Initialize the ImagePool class
Parameters:
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
"""
self.pool_size = pool_size
if self.pool_size > 0: # create an empty pool
self.num_imgs = 0
self.images = []
def query(self, images):
"""Return an image from the pool.
Parameters:
images: the latest generated images from the generator
Returns images from the buffer.
By 50/100, the buffer will return input images.
By 50/100, the buffer will return images previously stored in the buffer,
and insert the current images to the buffer.
"""
if self.pool_size == 0: # if the buffer size is 0, do nothing
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else: # by another 50% chance, the buffer will return the current image
return_images.append(image)
return_images = torch.cat(return_images, 0) # collect all the images and return
return return_images
| 2,226 | 39.490909 | 140 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/util/html.py
|
import dominate
from dominate.tags import meta, h3, table, tr, td, p, a, img, br
import os
class HTML:
"""This HTML class allows us to save images and write texts into a single HTML file.
It consists of functions such as <add_header> (add a text header to the HTML file),
<add_images> (add a row of images to the HTML file), and <save> (save the HTML to the disk).
It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
"""
def __init__(self, web_dir, title, refresh=0):
"""Initialize the HTML classes
Parameters:
web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/
title (str) -- the webpage name
refresh (int) -- how often the website refresh itself; if 0; no refreshing
"""
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if refresh > 0:
with self.doc.head:
meta(http_equiv="refresh", content=str(refresh))
def get_image_dir(self):
"""Return the directory that stores images"""
return self.img_dir
def add_header(self, text):
"""Insert a header to the HTML file
Parameters:
text (str) -- the header text
"""
with self.doc:
h3(text)
def add_images(self, ims, txts, links, width=400):
"""add images to the HTML file
Parameters:
ims (str list) -- a list of image paths
txts (str list) -- a list of image names shown on the website
links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
"""
self.t = table(border=1, style="table-layout: fixed;") # Insert a table
self.doc.add(self.t)
with self.t:
with tr():
for im, txt, link in zip(ims, txts, links):
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
with a(href=os.path.join('images', link)):
img(style="width:%dpx" % width, src=os.path.join('images', im))
br()
p(txt)
def save(self):
"""save the current content to the HMTL file"""
html_file = '%s/index.html' % self.web_dir
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
if __name__ == '__main__': # we show an example usage here.
html = HTML('web/', 'test_html')
html.add_header('hello world')
ims, txts, links = [], [], []
for n in range(4):
ims.append('image_%d.png' % n)
txts.append('text_%d' % n)
links.append('image_%d.png' % n)
html.add_images(ims, txts, links)
html.save()
| 3,223 | 36.057471 | 157 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/util/visualizer.py
|
import numpy as np
import os
import sys
import ntpath
import time
from . import util, html
from subprocess import Popen, PIPE
try:
import wandb
except ImportError:
print('Warning: wandb package cannot be found. The option "--use_wandb" will result in error.')
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
else:
VisdomExceptionBase = ConnectionError
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256, use_wandb=False):
"""Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims, txts, links = [], [], []
ims_dict = {}
for label, im_data in visuals.items():
im = util.tensor2im(im_data)
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
if use_wandb:
ims_dict[label] = wandb.Image(im)
webpage.add_images(ims, txts, links, width=width)
if use_wandb:
wandb.log(ims_dict)
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: connect to a visdom server
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.display_id = opt.display_id
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
self.port = opt.display_port
self.saved = False
self.use_wandb = opt.use_wandb
self.wandb_project_name = opt.wandb_project_name
self.current_epoch = 0
self.ncols = opt.display_ncols
if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>
import visdom
self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
if not self.vis.check_connection():
self.create_visdom_connections()
if self.use_wandb:
self.wandb_run = wandb.init(project=self.wandb_project_name, name=opt.name, config=opt) if not wandb.run else wandb.run
self.wandb_run._label(repo='CycleGAN-and-pix2pix')
if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def reset(self):
"""Reset the self.saved status"""
self.saved = False
def create_visdom_connections(self):
"""If the program could not connect to Visdom server, this function will start a new server at port < self.port > """
cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port
print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
print('Command: %s' % cmd)
Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
def display_current_results(self, visuals, epoch, save_result):
"""Display current results on visdom; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
"""
if self.display_id > 0: # show images in the browser using visdom
ncols = self.ncols
if ncols > 0: # show all the images in one visdom panel
ncols = min(ncols, len(visuals))
h, w = next(iter(visuals.values())).shape[:2]
table_css = """<style>
table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}
table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}
</style>""" % (w, h) # create a table css
# create a table of images.
title = self.name
label_html = ''
label_html_row = ''
images = []
idx = 0
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
label_html_row += '<td>%s</td>' % label
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if idx % ncols == 0:
label_html += '<tr>%s</tr>' % label_html_row
label_html_row = ''
white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
while idx % ncols != 0:
images.append(white_image)
label_html_row += '<td></td>'
idx += 1
if label_html_row != '':
label_html += '<tr>%s</tr>' % label_html_row
try:
self.vis.images(images, nrow=ncols, win=self.display_id + 1,
padding=2, opts=dict(title=title + ' images'))
label_html = '<table>%s</table>' % label_html
self.vis.text(table_css + label_html, win=self.display_id + 2,
opts=dict(title=title + ' labels'))
except VisdomExceptionBase:
self.create_visdom_connections()
else: # show each image in a separate visdom panel;
idx = 1
try:
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),
win=self.display_id + idx)
idx += 1
except VisdomExceptionBase:
self.create_visdom_connections()
if self.use_wandb:
columns = [key for key, _ in visuals.items()]
columns.insert(0, 'epoch')
result_table = wandb.Table(columns=columns)
table_row = [epoch]
ims_dict = {}
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
wandb_image = wandb.Image(image_numpy)
table_row.append(wandb_image)
ims_dict[label] = wandb_image
self.wandb_run.log(ims_dict)
if epoch != self.current_epoch:
self.current_epoch = epoch
result_table.add_data(*table_row)
self.wandb_run.log({"Result": result_table})
if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
self.saved = True
# save images to the disk
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims, txts, links = [], [], []
for label, image_numpy in visuals.items():
image_numpy = util.tensor2im(image)
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
def plot_current_losses(self, epoch, counter_ratio, losses):
"""display the current losses on visdom display: dictionary of error labels and values
Parameters:
epoch (int) -- current epoch
counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
"""
if not hasattr(self, 'plot_data'):
self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
self.plot_data['X'].append(epoch + counter_ratio)
self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
try:
self.vis.line(
X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
Y=np.array(self.plot_data['Y']),
opts={
'title': self.name + ' loss over time',
'legend': self.plot_data['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id)
except VisdomExceptionBase:
self.create_visdom_connections()
if self.use_wandb:
self.wandb_run.log(losses)
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message
| 11,890 | 45.089147 | 139 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/util/util.py
|
"""This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
| 3,175 | 29.538462 | 119 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/util/__init__.py
|
"""This package includes a miscellaneous collection of useful helper functions."""
| 83 | 41 | 82 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/util/get_data.py
|
from __future__ import print_function
import os
import tarfile
import requests
from warnings import warn
from zipfile import ZipFile
from bs4 import BeautifulSoup
from os.path import abspath, isdir, join, basename
class GetData(object):
"""A Python script for downloading CycleGAN or pix2pix datasets.
Parameters:
technique (str) -- One of: 'cyclegan' or 'pix2pix'.
verbose (bool) -- If True, print additional information.
Examples:
>>> from util.get_data import GetData
>>> gd = GetData(technique='cyclegan')
>>> new_data_path = gd.get(save_path='./datasets') # options will be displayed.
Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh'
and 'scripts/download_cyclegan_model.sh'.
"""
def __init__(self, technique='cyclegan', verbose=True):
url_dict = {
'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/',
'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets'
}
self.url = url_dict.get(technique.lower())
self._verbose = verbose
def _print(self, text):
if self._verbose:
print(text)
@staticmethod
def _get_options(r):
soup = BeautifulSoup(r.text, 'lxml')
options = [h.text for h in soup.find_all('a', href=True)
if h.text.endswith(('.zip', 'tar.gz'))]
return options
def _present_options(self):
r = requests.get(self.url)
options = self._get_options(r)
print('Options:\n')
for i, o in enumerate(options):
print("{0}: {1}".format(i, o))
choice = input("\nPlease enter the number of the "
"dataset above you wish to download:")
return options[int(choice)]
def _download_data(self, dataset_url, save_path):
if not isdir(save_path):
os.makedirs(save_path)
base = basename(dataset_url)
temp_save_path = join(save_path, base)
with open(temp_save_path, "wb") as f:
r = requests.get(dataset_url)
f.write(r.content)
if base.endswith('.tar.gz'):
obj = tarfile.open(temp_save_path)
elif base.endswith('.zip'):
obj = ZipFile(temp_save_path, 'r')
else:
raise ValueError("Unknown File Type: {0}.".format(base))
self._print("Unpacking Data...")
obj.extractall(save_path)
obj.close()
os.remove(temp_save_path)
def get(self, save_path, dataset=None):
"""
Download a dataset.
Parameters:
save_path (str) -- A directory to save the data to.
dataset (str) -- (optional). A specific dataset to download.
Note: this must include the file extension.
If None, options will be presented for you
to choose from.
Returns:
save_path_full (str) -- the absolute path to the downloaded data.
"""
if dataset is None:
selected_dataset = self._present_options()
else:
selected_dataset = dataset
save_path_full = join(save_path, selected_dataset.split('.')[0])
if isdir(save_path_full):
warn("\n'{0}' already exists. Voiding Download.".format(
save_path_full))
else:
self._print('Downloading Data...')
url = "{0}/{1}".format(self.url, selected_dataset)
self._download_data(url, save_path=save_path)
return abspath(save_path_full)
| 3,639 | 31.792793 | 90 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/scripts/test_before_push.py
|
# Simple script to make sure basic usage
# such as training, testing, saving and loading
# runs without errors.
import os
def run(command):
print(command)
exit_status = os.system(command)
if exit_status > 0:
exit(1)
if __name__ == '__main__':
# download mini datasets
if not os.path.exists('./datasets/mini'):
run('bash ./datasets/download_cyclegan_dataset.sh mini')
if not os.path.exists('./datasets/mini_pix2pix'):
run('bash ./datasets/download_cyclegan_dataset.sh mini_pix2pix')
# pretrained cyclegan model
if not os.path.exists('./checkpoints/horse2zebra_pretrained/latest_net_G.pth'):
run('bash ./scripts/download_cyclegan_model.sh horse2zebra')
run('python test.py --model test --dataroot ./datasets/mini --name horse2zebra_pretrained --no_dropout --num_test 1 --no_dropout')
# pretrained pix2pix model
if not os.path.exists('./checkpoints/facades_label2photo_pretrained/latest_net_G.pth'):
run('bash ./scripts/download_pix2pix_model.sh facades_label2photo')
if not os.path.exists('./datasets/facades'):
run('bash ./datasets/download_pix2pix_dataset.sh facades')
run('python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained --num_test 1')
# cyclegan train/test
run('python train.py --model cycle_gan --name temp_cyclegan --dataroot ./datasets/mini --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 10 --print_freq 1 --display_id -1')
run('python test.py --model test --name temp_cyclegan --dataroot ./datasets/mini --num_test 1 --model_suffix "_A" --no_dropout')
# pix2pix train/test
run('python train.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --n_epochs 1 --n_epochs_decay 5 --save_latest_freq 10 --display_id -1')
run('python test.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --num_test 1')
# template train/test
run('python train.py --model template --name temp2 --dataroot ./datasets/mini_pix2pix --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 10 --display_id -1')
run('python test.py --model template --name temp2 --dataroot ./datasets/mini_pix2pix --num_test 1')
# colorization train/test (optional)
if not os.path.exists('./datasets/mini_colorization'):
run('bash ./datasets/download_cyclegan_dataset.sh mini_colorization')
run('python train.py --model colorization --name temp_color --dataroot ./datasets/mini_colorization --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 5 --display_id -1')
run('python test.py --model colorization --name temp_color --dataroot ./datasets/mini_colorization --num_test 1')
| 2,722 | 51.365385 | 178 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/scripts/edges/batch_hed.py
|
# HED batch processing script; modified from https://github.com/s9xie/hed/blob/master/examples/hed/HED-tutorial.ipynb
# Step 1: download the hed repo: https://github.com/s9xie/hed
# Step 2: download the models and protoxt, and put them under {caffe_root}/examples/hed/
# Step 3: put this script under {caffe_root}/examples/hed/
# Step 4: run the following script:
# python batch_hed.py --images_dir=/data/to/path/photos/ --hed_mat_dir=/data/to/path/hed_mat_files/
# The code sometimes crashes after computation is done. Error looks like "Check failed: ... driver shutting down". You can just kill the job.
# For large images, it will produce gpu memory issue. Therefore, you better resize the images before running this script.
# Step 5: run the MATLAB post-processing script "PostprocessHED.m"
import caffe
import numpy as np
from PIL import Image
import os
import argparse
import sys
import scipy.io as sio
def parse_args():
parser = argparse.ArgumentParser(description='batch proccesing: photos->edges')
parser.add_argument('--caffe_root', dest='caffe_root', help='caffe root', default='../../', type=str)
parser.add_argument('--caffemodel', dest='caffemodel', help='caffemodel', default='./hed_pretrained_bsds.caffemodel', type=str)
parser.add_argument('--prototxt', dest='prototxt', help='caffe prototxt file', default='./deploy.prototxt', type=str)
parser.add_argument('--images_dir', dest='images_dir', help='directory to store input photos', type=str)
parser.add_argument('--hed_mat_dir', dest='hed_mat_dir', help='directory to store output hed edges in mat file', type=str)
parser.add_argument('--border', dest='border', help='padding border', type=int, default=128)
parser.add_argument('--gpu_id', dest='gpu_id', help='gpu id', type=int, default=1)
args = parser.parse_args()
return args
args = parse_args()
for arg in vars(args):
print('[%s] =' % arg, getattr(args, arg))
# Make sure that caffe is on the python path:
caffe_root = args.caffe_root # this file is expected to be in {caffe_root}/examples/hed/
sys.path.insert(0, caffe_root + 'python')
if not os.path.exists(args.hed_mat_dir):
print('create output directory %s' % args.hed_mat_dir)
os.makedirs(args.hed_mat_dir)
imgList = os.listdir(args.images_dir)
nImgs = len(imgList)
print('#images = %d' % nImgs)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
# load net
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
# pad border
border = args.border
for i in range(nImgs):
if i % 500 == 0:
print('processing image %d/%d' % (i, nImgs))
im = Image.open(os.path.join(args.images_dir, imgList[i]))
in_ = np.array(im, dtype=np.float32)
in_ = np.pad(in_, ((border, border), (border, border), (0, 0)), 'reflect')
in_ = in_[:, :, 0:3]
in_ = in_[:, :, ::-1]
in_ -= np.array((104.00698793, 116.66876762, 122.67891434))
in_ = in_.transpose((2, 0, 1))
# remove the following two lines if testing with cpu
# shape for input (data blob is N x C x H x W), set data
net.blobs['data'].reshape(1, *in_.shape)
net.blobs['data'].data[...] = in_
# run net and take argmax for prediction
net.forward()
fuse = net.blobs['sigmoid-fuse'].data[0][0, :, :]
# get rid of the border
fuse = fuse[(border + 35):(-border + 35), (border + 35):(-border + 35)]
# save hed file to the disk
name, ext = os.path.splitext(imgList[i])
sio.savemat(os.path.join(args.hed_mat_dir, name + '.mat'), {'edge_predict': fuse})
| 3,521 | 41.95122 | 141 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/scripts/eval_cityscapes/evaluate.py
|
import os
import caffe
import argparse
import numpy as np
import scipy.misc
from PIL import Image
from util import segrun, fast_hist, get_scores
from cityscapes import cityscapes
parser = argparse.ArgumentParser()
parser.add_argument("--cityscapes_dir", type=str, required=True, help="Path to the original cityscapes dataset")
parser.add_argument("--result_dir", type=str, required=True, help="Path to the generated images to be evaluated")
parser.add_argument("--output_dir", type=str, required=True, help="Where to save the evaluation results")
parser.add_argument("--caffemodel_dir", type=str, default='./scripts/eval_cityscapes/caffemodel/', help="Where the FCN-8s caffemodel stored")
parser.add_argument("--gpu_id", type=int, default=0, help="Which gpu id to use")
parser.add_argument("--split", type=str, default='val', help="Data split to be evaluated")
parser.add_argument("--save_output_images", type=int, default=0, help="Whether to save the FCN output images")
args = parser.parse_args()
def main():
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
if args.save_output_images > 0:
output_image_dir = args.output_dir + 'image_outputs/'
if not os.path.isdir(output_image_dir):
os.makedirs(output_image_dir)
CS = cityscapes(args.cityscapes_dir)
n_cl = len(CS.classes)
label_frames = CS.list_label_frames(args.split)
caffe.set_device(args.gpu_id)
caffe.set_mode_gpu()
net = caffe.Net(args.caffemodel_dir + '/deploy.prototxt',
args.caffemodel_dir + 'fcn-8s-cityscapes.caffemodel',
caffe.TEST)
hist_perframe = np.zeros((n_cl, n_cl))
for i, idx in enumerate(label_frames):
if i % 10 == 0:
print('Evaluating: %d/%d' % (i, len(label_frames)))
city = idx.split('_')[0]
# idx is city_shot_frame
label = CS.load_label(args.split, city, idx)
im_file = args.result_dir + '/' + idx + '_leftImg8bit.png'
im = np.array(Image.open(im_file))
im = scipy.misc.imresize(im, (label.shape[1], label.shape[2]))
# im = np.array(Image.fromarray(im).resize((label.shape[1], label.shape[2]))) # Note: scipy.misc.imresize is deprecated, but we still use it for reproducibility.
out = segrun(net, CS.preprocess(im))
hist_perframe += fast_hist(label.flatten(), out.flatten(), n_cl)
if args.save_output_images > 0:
label_im = CS.palette(label)
pred_im = CS.palette(out)
scipy.misc.imsave(output_image_dir + '/' + str(i) + '_pred.jpg', pred_im)
scipy.misc.imsave(output_image_dir + '/' + str(i) + '_gt.jpg', label_im)
scipy.misc.imsave(output_image_dir + '/' + str(i) + '_input.jpg', im)
mean_pixel_acc, mean_class_acc, mean_class_iou, per_class_acc, per_class_iou = get_scores(hist_perframe)
with open(args.output_dir + '/evaluation_results.txt', 'w') as f:
f.write('Mean pixel accuracy: %f\n' % mean_pixel_acc)
f.write('Mean class accuracy: %f\n' % mean_class_acc)
f.write('Mean class IoU: %f\n' % mean_class_iou)
f.write('************ Per class numbers below ************\n')
for i, cl in enumerate(CS.classes):
while len(cl) < 15:
cl = cl + ' '
f.write('%s: acc = %f, iou = %f\n' % (cl, per_class_acc[i], per_class_iou[i]))
main()
| 3,403 | 47.628571 | 170 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/scripts/eval_cityscapes/cityscapes.py
|
# The following code is modified from https://github.com/shelhamer/clockwork-fcn
import sys
import os
import glob
import numpy as np
from PIL import Image
class cityscapes:
def __init__(self, data_path):
# data_path something like /data2/cityscapes
self.dir = data_path
self.classes = ['road', 'sidewalk', 'building', 'wall', 'fence',
'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain',
'sky', 'person', 'rider', 'car', 'truck',
'bus', 'train', 'motorcycle', 'bicycle']
self.mean = np.array((72.78044, 83.21195, 73.45286), dtype=np.float32)
# import cityscapes label helper and set up label mappings
sys.path.insert(0, '{}/scripts/helpers/'.format(self.dir))
labels = __import__('labels')
self.id2trainId = {label.id: label.trainId for label in labels.labels} # dictionary mapping from raw IDs to train IDs
self.trainId2color = {label.trainId: label.color for label in labels.labels} # dictionary mapping train IDs to colors as 3-tuples
def get_dset(self, split):
'''
List images as (city, id) for the specified split
TODO(shelhamer) generate splits from cityscapes itself, instead of
relying on these separately made text files.
'''
if split == 'train':
dataset = open('{}/ImageSets/segFine/train.txt'.format(self.dir)).read().splitlines()
else:
dataset = open('{}/ImageSets/segFine/val.txt'.format(self.dir)).read().splitlines()
return [(item.split('/')[0], item.split('/')[1]) for item in dataset]
def load_image(self, split, city, idx):
im = Image.open('{}/leftImg8bit_sequence/{}/{}/{}_leftImg8bit.png'.format(self.dir, split, city, idx))
return im
def assign_trainIds(self, label):
"""
Map the given label IDs to the train IDs appropriate for training
Use the label mapping provided in labels.py from the cityscapes scripts
"""
label = np.array(label, dtype=np.float32)
if sys.version_info[0] < 3:
for k, v in self.id2trainId.iteritems():
label[label == k] = v
else:
for k, v in self.id2trainId.items():
label[label == k] = v
return label
def load_label(self, split, city, idx):
"""
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
"""
label = Image.open('{}/gtFine/{}/{}/{}_gtFine_labelIds.png'.format(self.dir, split, city, idx))
label = self.assign_trainIds(label) # get proper labels for eval
label = np.array(label, dtype=np.uint8)
label = label[np.newaxis, ...]
return label
def preprocess(self, im):
"""
Preprocess loaded image (by load_image) for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
in_ = np.array(im, dtype=np.float32)
in_ = in_[:, :, ::-1]
in_ -= self.mean
in_ = in_.transpose((2, 0, 1))
return in_
def palette(self, label):
'''
Map trainIds to colors as specified in labels.py
'''
if label.ndim == 3:
label = label[0]
color = np.empty((label.shape[0], label.shape[1], 3))
if sys.version_info[0] < 3:
for k, v in self.trainId2color.iteritems():
color[label == k, :] = v
else:
for k, v in self.trainId2color.items():
color[label == k, :] = v
return color
def make_boundaries(label, thickness=None):
"""
Input is an image label, output is a numpy array mask encoding the boundaries of the objects
Extract pixels at the true boundary by dilation - erosion of label.
Don't just pick the void label as it is not exclusive to the boundaries.
"""
assert(thickness is not None)
import skimage.morphology as skm
void = 255
mask = np.logical_and(label > 0, label != void)[0]
selem = skm.disk(thickness)
boundaries = np.logical_xor(skm.dilation(mask, selem),
skm.erosion(mask, selem))
return boundaries
def list_label_frames(self, split):
"""
Select labeled frames from a split for evaluation
collected as (city, shot, idx) tuples
"""
def file2idx(f):
"""Helper to convert file path into frame ID"""
city, shot, frame = (os.path.basename(f).split('_')[:3])
return "_".join([city, shot, frame])
frames = []
cities = [os.path.basename(f) for f in glob.glob('{}/gtFine/{}/*'.format(self.dir, split))]
for c in cities:
files = sorted(glob.glob('{}/gtFine/{}/{}/*labelIds.png'.format(self.dir, split, c)))
frames.extend([file2idx(f) for f in files])
return frames
def collect_frame_sequence(self, split, idx, length):
"""
Collect sequence of frames preceding (and including) a labeled frame
as a list of Images.
Note: 19 preceding frames are provided for each labeled frame.
"""
SEQ_LEN = length
city, shot, frame = idx.split('_')
frame = int(frame)
frame_seq = []
for i in range(frame - SEQ_LEN, frame + 1):
frame_path = '{0}/leftImg8bit_sequence/val/{1}/{1}_{2}_{3:0>6d}_leftImg8bit.png'.format(
self.dir, city, shot, i)
frame_seq.append(Image.open(frame_path))
return frame_seq
| 5,772 | 39.65493 | 138 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/scripts/eval_cityscapes/util.py
|
# The following code is modified from https://github.com/shelhamer/clockwork-fcn
import numpy as np
def get_out_scoremap(net):
return net.blobs['score'].data[0].argmax(axis=0).astype(np.uint8)
def feed_net(net, in_):
"""
Load prepared input into net.
"""
net.blobs['data'].reshape(1, *in_.shape)
net.blobs['data'].data[...] = in_
def segrun(net, in_):
feed_net(net, in_)
net.forward()
return get_out_scoremap(net)
def fast_hist(a, b, n):
k = np.where((a >= 0) & (a < n))[0]
bc = np.bincount(n * a[k].astype(int) + b[k], minlength=n**2)
if len(bc) != n**2:
# ignore this example if dimension mismatch
return 0
return bc.reshape(n, n)
def get_scores(hist):
# Mean pixel accuracy
acc = np.diag(hist).sum() / (hist.sum() + 1e-12)
# Per class accuracy
cl_acc = np.diag(hist) / (hist.sum(1) + 1e-12)
# Per class IoU
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist) + 1e-12)
return acc, np.nanmean(cl_acc), np.nanmean(iu), cl_acc, iu
| 1,051 | 23.465116 | 80 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/datasets/combine_A_and_B.py
|
import os
import numpy as np
import cv2
import argparse
from multiprocessing import Pool
def image_write(path_A, path_B, path_AB):
im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_AB = np.concatenate([im_A, im_B], 1)
cv2.imwrite(path_AB, im_AB)
parser = argparse.ArgumentParser('create image pairs')
parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='../dataset/50kshoes_edges')
parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='../dataset/50kshoes_jpg')
parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='../dataset/test_AB')
parser.add_argument('--num_imgs', dest='num_imgs', help='number of images', type=int, default=1000000)
parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)', action='store_true')
parser.add_argument('--no_multiprocessing', dest='no_multiprocessing', help='If used, chooses single CPU execution instead of parallel execution', action='store_true',default=False)
args = parser.parse_args()
for arg in vars(args):
print('[%s] = ' % arg, getattr(args, arg))
splits = os.listdir(args.fold_A)
if not args.no_multiprocessing:
pool=Pool()
for sp in splits:
img_fold_A = os.path.join(args.fold_A, sp)
img_fold_B = os.path.join(args.fold_B, sp)
img_list = os.listdir(img_fold_A)
if args.use_AB:
img_list = [img_path for img_path in img_list if '_A.' in img_path]
num_imgs = min(args.num_imgs, len(img_list))
print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list)))
img_fold_AB = os.path.join(args.fold_AB, sp)
if not os.path.isdir(img_fold_AB):
os.makedirs(img_fold_AB)
print('split = %s, number of images = %d' % (sp, num_imgs))
for n in range(num_imgs):
name_A = img_list[n]
path_A = os.path.join(img_fold_A, name_A)
if args.use_AB:
name_B = name_A.replace('_A.', '_B.')
else:
name_B = name_A
path_B = os.path.join(img_fold_B, name_B)
if os.path.isfile(path_A) and os.path.isfile(path_B):
name_AB = name_A
if args.use_AB:
name_AB = name_AB.replace('_A.', '.') # remove _A
path_AB = os.path.join(img_fold_AB, name_AB)
if not args.no_multiprocessing:
pool.apply_async(image_write, args=(path_A, path_B, path_AB))
else:
im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_AB = np.concatenate([im_A, im_B], 1)
cv2.imwrite(path_AB, im_AB)
if not args.no_multiprocessing:
pool.close()
pool.join()
| 3,002 | 43.161765 | 181 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/datasets/prepare_cityscapes_dataset.py
|
import os
import glob
from PIL import Image
help_msg = """
The dataset can be downloaded from https://cityscapes-dataset.com.
Please download the datasets [gtFine_trainvaltest.zip] and [leftImg8bit_trainvaltest.zip] and unzip them.
gtFine contains the semantics segmentations. Use --gtFine_dir to specify the path to the unzipped gtFine_trainvaltest directory.
leftImg8bit contains the dashcam photographs. Use --leftImg8bit_dir to specify the path to the unzipped leftImg8bit_trainvaltest directory.
The processed images will be placed at --output_dir.
Example usage:
python prepare_cityscapes_dataset.py --gtFine_dir ./gtFine/ --leftImg8bit_dir ./leftImg8bit --output_dir ./datasets/cityscapes/
"""
def load_resized_img(path):
return Image.open(path).convert('RGB').resize((256, 256))
def check_matching_pair(segmap_path, photo_path):
segmap_identifier = os.path.basename(segmap_path).replace('_gtFine_color', '')
photo_identifier = os.path.basename(photo_path).replace('_leftImg8bit', '')
assert segmap_identifier == photo_identifier, \
"[%s] and [%s] don't seem to be matching. Aborting." % (segmap_path, photo_path)
def process_cityscapes(gtFine_dir, leftImg8bit_dir, output_dir, phase):
save_phase = 'test' if phase == 'val' else 'train'
savedir = os.path.join(output_dir, save_phase)
os.makedirs(savedir, exist_ok=True)
os.makedirs(savedir + 'A', exist_ok=True)
os.makedirs(savedir + 'B', exist_ok=True)
print("Directory structure prepared at %s" % output_dir)
segmap_expr = os.path.join(gtFine_dir, phase) + "/*/*_color.png"
segmap_paths = glob.glob(segmap_expr)
segmap_paths = sorted(segmap_paths)
photo_expr = os.path.join(leftImg8bit_dir, phase) + "/*/*_leftImg8bit.png"
photo_paths = glob.glob(photo_expr)
photo_paths = sorted(photo_paths)
assert len(segmap_paths) == len(photo_paths), \
"%d images that match [%s], and %d images that match [%s]. Aborting." % (len(segmap_paths), segmap_expr, len(photo_paths), photo_expr)
for i, (segmap_path, photo_path) in enumerate(zip(segmap_paths, photo_paths)):
check_matching_pair(segmap_path, photo_path)
segmap = load_resized_img(segmap_path)
photo = load_resized_img(photo_path)
# data for pix2pix where the two images are placed side-by-side
sidebyside = Image.new('RGB', (512, 256))
sidebyside.paste(segmap, (256, 0))
sidebyside.paste(photo, (0, 0))
savepath = os.path.join(savedir, "%d.jpg" % i)
sidebyside.save(savepath, format='JPEG', subsampling=0, quality=100)
# data for cyclegan where the two images are stored at two distinct directories
savepath = os.path.join(savedir + 'A', "%d_A.jpg" % i)
photo.save(savepath, format='JPEG', subsampling=0, quality=100)
savepath = os.path.join(savedir + 'B', "%d_B.jpg" % i)
segmap.save(savepath, format='JPEG', subsampling=0, quality=100)
if i % (len(segmap_paths) // 10) == 0:
print("%d / %d: last image saved at %s, " % (i, len(segmap_paths), savepath))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--gtFine_dir', type=str, required=True,
help='Path to the Cityscapes gtFine directory.')
parser.add_argument('--leftImg8bit_dir', type=str, required=True,
help='Path to the Cityscapes leftImg8bit_trainvaltest directory.')
parser.add_argument('--output_dir', type=str, required=True,
default='./datasets/cityscapes',
help='Directory the output images will be written to.')
opt = parser.parse_args()
print(help_msg)
print('Preparing Cityscapes Dataset for val phase')
process_cityscapes(opt.gtFine_dir, opt.leftImg8bit_dir, opt.output_dir, "val")
print('Preparing Cityscapes Dataset for train phase')
process_cityscapes(opt.gtFine_dir, opt.leftImg8bit_dir, opt.output_dir, "train")
print('Done')
| 4,126 | 40.27 | 142 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/datasets/make_dataset_aligned.py
|
import os
from PIL import Image
def get_file_paths(folder):
image_file_paths = []
for root, dirs, filenames in os.walk(folder):
filenames = sorted(filenames)
for filename in filenames:
input_path = os.path.abspath(root)
file_path = os.path.join(input_path, filename)
if filename.endswith('.png') or filename.endswith('.jpg'):
image_file_paths.append(file_path)
break # prevent descending into subfolders
return image_file_paths
def align_images(a_file_paths, b_file_paths, target_path):
if not os.path.exists(target_path):
os.makedirs(target_path)
for i in range(len(a_file_paths)):
img_a = Image.open(a_file_paths[i])
img_b = Image.open(b_file_paths[i])
assert(img_a.size == img_b.size)
aligned_image = Image.new("RGB", (img_a.size[0] * 2, img_a.size[1]))
aligned_image.paste(img_a, (0, 0))
aligned_image.paste(img_b, (img_a.size[0], 0))
aligned_image.save(os.path.join(target_path, '{:04d}.jpg'.format(i)))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset-path',
dest='dataset_path',
help='Which folder to process (it should have subfolders testA, testB, trainA and trainB'
)
args = parser.parse_args()
dataset_folder = args.dataset_path
print(dataset_folder)
test_a_path = os.path.join(dataset_folder, 'testA')
test_b_path = os.path.join(dataset_folder, 'testB')
test_a_file_paths = get_file_paths(test_a_path)
test_b_file_paths = get_file_paths(test_b_path)
assert(len(test_a_file_paths) == len(test_b_file_paths))
test_path = os.path.join(dataset_folder, 'test')
train_a_path = os.path.join(dataset_folder, 'trainA')
train_b_path = os.path.join(dataset_folder, 'trainB')
train_a_file_paths = get_file_paths(train_a_path)
train_b_file_paths = get_file_paths(train_b_path)
assert(len(train_a_file_paths) == len(train_b_file_paths))
train_path = os.path.join(dataset_folder, 'train')
align_images(test_a_file_paths, test_b_file_paths, test_path)
align_images(train_a_file_paths, train_b_file_paths, train_path)
| 2,257 | 34.28125 | 97 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/data/colorization_dataset.py
|
import os
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from skimage import color # require skimage
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
class ColorizationDataset(BaseDataset):
"""This dataset class can load a set of natural images in RGB, and convert RGB format into (L, ab) pairs in Lab color space.
This dataset is required by pix2pix-based colorization model ('--model colorization')
"""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
By default, the number of channels for input image is 1 (L) and
the number of channels for output image is 2 (ab). The direction is from A to B
"""
parser.set_defaults(input_nc=1, output_nc=2, direction='AtoB')
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir = os.path.join(opt.dataroot, opt.phase)
self.AB_paths = sorted(make_dataset(self.dir, opt.max_dataset_size))
assert(opt.input_nc == 1 and opt.output_nc == 2 and opt.direction == 'AtoB')
self.transform = get_transform(self.opt, convert=False)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - the L channel of an image
B (tensor) - - the ab channels of the same image
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
path = self.AB_paths[index]
im = Image.open(path).convert('RGB')
im = self.transform(im)
im = np.array(im)
lab = color.rgb2lab(im).astype(np.float32)
lab_t = transforms.ToTensor()(lab)
A = lab_t[[0], ...] / 50.0 - 1.0
B = lab_t[[1, 2], ...] / 110.0
return {'A': A, 'B': B, 'A_paths': path, 'B_paths': path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.AB_paths)
| 2,717 | 38.391304 | 141 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/data/base_dataset.py
|
"""This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
class BaseDataset(data.Dataset, ABC):
"""This class is an abstract base class (ABC) for datasets.
To create a subclass, you need to implement the following four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
self.root = opt.dataroot
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def __len__(self):
"""Return the total number of images in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns:
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
"""
pass
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params=None, grayscale=False, method=transforms.InterpolationMode.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if 'resize' in opt.preprocess:
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, method))
elif 'scale_width' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
if 'crop' in opt.preprocess:
if params is None:
transform_list.append(transforms.RandomCrop(opt.crop_size))
else:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
if opt.preprocess == 'none':
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
if not opt.no_flip:
if params is None:
transform_list.append(transforms.RandomHorizontalFlip())
elif params['flip']:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __transforms2pil_resize(method):
mapper = {transforms.InterpolationMode.BILINEAR: Image.BILINEAR,
transforms.InterpolationMode.BICUBIC: Image.BICUBIC,
transforms.InterpolationMode.NEAREST: Image.NEAREST,
transforms.InterpolationMode.LANCZOS: Image.LANCZOS,}
return mapper[method]
def __make_power_2(img, base, method=transforms.InterpolationMode.BICUBIC):
method = __transforms2pil_resize(method)
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), method)
def __scale_width(img, target_size, crop_size, method=transforms.InterpolationMode.BICUBIC):
method = __transforms2pil_resize(method)
ow, oh = img.size
if ow == target_size and oh >= crop_size:
return img
w = target_size
h = int(max(target_size * oh / ow, crop_size))
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __print_size_warning(ow, oh, w, h):
"""Print warning information about image size(only print once)"""
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
| 5,895 | 34.095238 | 141 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/data/unaligned_dataset.py
|
import os
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB'
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
# apply image transformation
A = self.transform_A(A_img)
B = self.transform_B(B_img)
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
| 3,299 | 44.833333 | 122 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/data/image_folder.py
|
"""A modified image folder class
We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
so that this class can load images from both current directory and its subdirectories.
"""
import torch.utils.data as data
from PIL import Image
import os
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| 1,885 | 27.575758 | 122 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/data/aligned_dataset.py
|
import os
from data.base_dataset import BaseDataset, get_params, get_transform
from data.image_folder import make_dataset
from PIL import Image
class AlignedDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths
assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an image in the input domain
B (tensor) - - its corresponding image in the target domain
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
# read a image given a random integer index
AB_path = self.AB_paths[index]
AB = Image.open(AB_path).convert('RGB')
# split AB image into A and B
w, h = AB.size
w2 = int(w / 2)
A = AB.crop((0, 0, w2, h))
B = AB.crop((w2, 0, w, h))
# apply the same transform to both A and B
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
A = A_transform(A)
B = B_transform(B)
return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.AB_paths)
| 2,497 | 39.95082 | 118 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/data/__init__.py
|
"""This package includes all the modules related to data loading and preprocessing
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
You need to implement four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point from data loader.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
See our template dataset class 'template_dataset.py' for more details.
"""
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
"""Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
"""
data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()
return dataset
class CustomDatasetDataLoader():
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
def __init__(self, opt):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
print("dataset [%s] was created" % type(self.dataset).__name__)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads))
def load_data(self):
return self
def __len__(self):
"""Return the number of data in the dataset"""
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
"""Return a batch of data"""
for i, data in enumerate(self.dataloader):
if i * self.opt.batch_size >= self.opt.max_dataset_size:
break
yield data
| 3,554 | 36.819149 | 176 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/data/template_dataset.py
|
"""Dataset class template
This module provides a template for users to implement custom datasets.
You can specify '--dataset_mode template' to use this dataset.
The class name should be consistent with both the filename and its dataset_mode option.
The filename should be <dataset_mode>_dataset.py
The class name should be <Dataset_mode>Dataset.py
You need to implement the following functions:
-- <modify_commandline_options>: Add dataset-specific options and rewrite default values for existing options.
-- <__init__>: Initialize this dataset class.
-- <__getitem__>: Return a data point and its metadata information.
-- <__len__>: Return the number of images.
"""
from data.base_dataset import BaseDataset, get_transform
# from data.image_folder import make_dataset
# from PIL import Image
class TemplateDataset(BaseDataset):
"""A template dataset class for you to implement custom datasets."""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
A few things can be done here.
- save the options (have been done in BaseDataset)
- get image paths and meta information of the dataset.
- define the image transformation.
"""
# save the option and dataset root
BaseDataset.__init__(self, opt)
# get the image paths of your dataset;
self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
# define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
self.transform = get_transform(opt)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index -- a random integer for data indexing
Returns:
a dictionary of data with their names. It usually contains the data itself and its metadata information.
Step 1: get a random image path: e.g., path = self.image_paths[index]
Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
Step 4: return a data point as a dictionary.
"""
path = 'temp' # needs to be a string
data_A = None # needs to be a tensor
data_B = None # needs to be a tensor
return {'data_A': data_A, 'data_B': data_B, 'path': path}
def __len__(self):
"""Return the total number of images."""
return len(self.image_paths)
| 3,506 | 45.144737 | 156 |
py
|
pytorch-CycleGAN-and-pix2pix
|
pytorch-CycleGAN-and-pix2pix-master/data/single_dataset.py
|
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
class SingleDataset(BaseDataset):
"""This dataset class can load a set of images specified by the path --dataroot /path/to/data.
It can be used for generating CycleGAN results only for one side with the model option '-model test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.transform = get_transform(opt, grayscale=(input_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A and A_paths
A(tensor) - - an image in one domain
A_paths(str) - - the path of the image
"""
A_path = self.A_paths[index]
A_img = Image.open(A_path).convert('RGB')
A = self.transform(A_img)
return {'A': A, 'A_paths': A_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.A_paths)
| 1,495 | 35.487805 | 105 |
py
|
sign-topic
|
sign-topic-main/setup.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from setuptools import Extension, find_packages, setup
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for fairseq.")
def write_version_py():
with open(os.path.join("fairseq", "version.txt")) as f:
version = f.read().strip()
# append latest commit hash to version string
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except Exception:
pass
# write version info to fairseq/version.py
with open(os.path.join("fairseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
version = write_version_py()
with open("README.md") as f:
readme = f.read()
if sys.platform == "darwin":
extra_compile_args = ["-stdlib=libc++", "-O3"]
else:
extra_compile_args = ["-std=c++11", "-O3"]
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
"fairseq.libbleu",
sources=[
"fairseq/clib/libbleu/libbleu.cpp",
"fairseq/clib/libbleu/module.cpp",
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.data_utils_fast",
sources=["fairseq/data/data_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.token_block_utils_fast",
sources=["fairseq/data/token_block_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libbase",
sources=[
"fairseq/clib/libbase/balanced_assignment.cpp",
],
)
]
)
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat",
sources=[
"fairseq/clib/libnat/edit_dist.cpp",
],
),
cpp_extension.CppExtension(
"alignment_train_cpu_binding",
sources=[
"examples/operators/alignment_train_cpu.cpp",
],
),
]
)
if "CUDA_HOME" in os.environ:
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat_cuda",
sources=[
"fairseq/clib/libnat_cuda/edit_dist.cu",
"fairseq/clib/libnat_cuda/binding.cpp",
],
),
cpp_extension.CppExtension(
"fairseq.ngram_repeat_block_cuda",
sources=[
"fairseq/clib/cuda/ngram_repeat_block_cuda.cpp",
"fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu",
],
),
cpp_extension.CppExtension(
"alignment_train_cuda_binding",
sources=[
"examples/operators/alignment_train_kernel.cu",
"examples/operators/alignment_train_cuda.cpp",
],
),
]
)
cmdclass["build_ext"] = cpp_extension.BuildExtension
except ImportError:
pass
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if "clean" in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(
["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"],
shell=True,
)
extra_packages = []
if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")):
extra_packages.append("fairseq.model_parallel.megatron.mpu")
def do_setup(package_data):
setup(
name="fairseq",
version=version,
description="Facebook AI Research Sequence-to-Sequence Toolkit",
url="https://github.com/pytorch/fairseq",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"cython",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"setuptools>=18.0",
],
install_requires=[
"cffi",
"cython",
'dataclasses; python_version<"3.7"',
"hydra-core>=1.0.7,<1.1",
"omegaconf<2.1",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"regex",
"sacrebleu>=1.4.12",
"torch",
"tqdm",
"bitarray",
"torchaudio>=0.8.0",
"sentencepiece",
"fvcore"
],
dependency_links=dependency_links,
packages=find_packages(
exclude=[
"examples",
"examples.*",
"scripts",
"scripts.*",
"tests",
"tests.*",
]
)
+ extra_packages,
package_data=package_data,
ext_modules=extensions,
test_suite="tests",
entry_points={
"console_scripts": [
"fairseq-eval-lm = fairseq_cli.eval_lm:cli_main",
"fairseq-generate = fairseq_cli.generate:cli_main",
"fairseq-hydra-train = fairseq_cli.hydra_train:cli_main",
"fairseq-interactive = fairseq_cli.interactive:cli_main",
"fairseq-preprocess = fairseq_cli.preprocess:cli_main",
"fairseq-score = fairseq_cli.score:cli_main",
"fairseq-train = fairseq_cli.train:cli_main",
"fairseq-validate = fairseq_cli.validate:cli_main",
],
},
cmdclass=cmdclass,
zip_safe=False,
)
def get_files(path, relative_to="fairseq"):
all_files = []
for root, _dirs, files in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith(".pyc"):
continue
all_files.append(os.path.join(root, file))
return all_files
if __name__ == "__main__":
try:
# symlink examples into fairseq package so package_data accepts them
fairseq_examples = os.path.join("fairseq", "examples")
if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples):
os.symlink(os.path.join("..", "examples"), fairseq_examples)
package_data = {
"fairseq": (
get_files(fairseq_examples)
+ get_files(os.path.join("fairseq", "config"))
)
}
do_setup(package_data)
finally:
if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples):
os.unlink(fairseq_examples)
| 8,427 | 28.263889 | 92 |
py
|
sign-topic
|
sign-topic-main/hubconf.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import functools
import importlib
dependencies = [
"dataclasses",
"hydra",
"numpy",
"omegaconf",
"regex",
"requests",
"torch",
]
# Check for required dependencies and raise a RuntimeError if any are missing.
missing_deps = []
for dep in dependencies:
try:
importlib.import_module(dep)
except ImportError:
# Hack: the hydra package is provided under the "hydra-core" name in
# pypi. We don't want the user mistakenly calling `pip install hydra`
# since that will install an unrelated package.
if dep == "hydra":
dep = "hydra-core"
missing_deps.append(dep)
if len(missing_deps) > 0:
raise RuntimeError("Missing dependencies: {}".format(", ".join(missing_deps)))
# only do fairseq imports after checking for dependencies
from fairseq.hub_utils import ( # noqa; noqa
BPEHubInterface as bpe,
TokenizerHubInterface as tokenizer,
)
from fairseq.models import MODEL_REGISTRY # noqa
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast # noqa
except ImportError:
try:
import cython # noqa
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), "setup.py"),
["build_ext", "--inplace"],
)
except ImportError:
print(
"Unable to build Cython components. Please make sure Cython is "
"installed if the torch.hub model you are loading depends on it."
)
# automatically expose models defined in FairseqModel::hub_models
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
| 2,099 | 27.378378 | 82 |
py
|
sign-topic
|
sign-topic-main/train.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead.
"""
from fairseq_cli.train import cli_main
if __name__ == "__main__":
cli_main()
| 366 | 23.466667 | 70 |
py
|
sign-topic
|
sign-topic-main/examples/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
try:
from fairseq.version import __version__ # noqa
except ImportError:
pass
| 264 | 25.5 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/truncated_bptt/transformer_xl_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TransformerXLConfig(FairseqDataclass):
# defaults come from the original Transformer-XL code
cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000])
d_model: int = 500
n_head: int = 10
d_head: int = 50
d_inner: int = 1000
div_val: int = 1
n_layer: int = 12
mem_len: int = 0
clamp_len: int = -1
same_length: bool = False
dropout: float = 0.0
dropatt: float = 0.0
checkpoint_activations: bool = False
offload_activations: bool = False
max_target_positions: int = II("task.max_target_positions")
@register_model("transformer_xl", dataclass=TransformerXLConfig)
class TransformerXLLanguageModel(FairseqLanguageModel):
@classmethod
def build_model(cls, cfg: TransformerXLConfig, task):
return cls(TransformerXLDecoder(cfg, task))
class TransformerXLDecoder(FairseqIncrementalDecoder):
def __init__(self, cfg, task):
try:
from transformers.models.transfo_xl import (
TransfoXLConfig,
TransfoXLLMHeadModel,
)
except ImportError:
from transformers.configuration_transfo_xl import TransfoXLConfig
from transformers.modeling_transfo_xl import TransfoXLLMHeadModel
super().__init__(task.target_dictionary)
self.cfg = cfg
# remove any cutoffs larger than the vocab size
cutoffs = [
cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary)
]
config = TransfoXLConfig(
vocab_size=len(task.target_dictionary),
cutoffs=cutoffs,
d_model=cfg.d_model,
d_embed=cfg.d_model,
n_head=cfg.n_head,
d_head=cfg.d_head,
d_inner=cfg.d_inner,
div_val=cfg.div_val,
n_layer=cfg.n_layer,
mem_len=cfg.mem_len,
clamp_len=cfg.clamp_len,
same_length=cfg.same_length,
dropout=cfg.dropout,
dropatt=cfg.dropatt,
)
logger.info(config)
self.model = TransfoXLLMHeadModel(config)
# Workaround a bug in huggingface's ``ProjectedAdaptiveLogSoftmax``
# which adds ``None`` values to an ``nn.ParameterList``, which is not
# supported in PyTorch. Instead we can replace this with an
# ``nn.ModuleList``, which does support ``None`` values.
try:
if all(p is None for p in self.model.crit.out_projs._parameters.values()):
self.model.crit.out_projs = torch.nn.ModuleList(
[None] * len(self.model.crit.out_projs._parameters)
)
except Exception:
pass
if cfg.checkpoint_activations or cfg.offload_activations:
for i in range(len(self.model.transformer.layers)):
self.model.transformer.layers[i] = checkpoint_wrapper(
self.model.transformer.layers[i],
offload_to_cpu=cfg.offload_activations,
)
# TODO: may save mem to wrap(layer.pos_ff.CoreNet[3])
self._mems = None
def forward(
self,
src_tokens,
src_lengths=None, # unused
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
if incremental_state is not None: # used during inference
mems = self.get_incremental_state(incremental_state, "mems")
src_tokens = src_tokens[:, -1:] # only keep the most recent token
else:
mems = self._mems
output = self.model(
input_ids=src_tokens,
mems=mems,
return_dict=False,
)
if len(output) >= 2:
if incremental_state is not None:
self.set_incremental_state(incremental_state, "mems", output[1])
else:
self._mems = output[1]
return (output[0],)
def max_positions(self):
return self.cfg.max_target_positions
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
new_order: torch.Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
mems = self.get_incremental_state(incremental_state, "mems")
if mems is not None:
new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
self.set_incremental_state(incremental_state, "mems", new_mems)
| 5,324 | 33.134615 | 86 |
py
|
sign-topic
|
sign-topic-main/examples/truncated_bptt/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import transformer_xl_model, truncated_bptt_lm_task # noqa
| 245 | 34.142857 | 66 |
py
|
sign-topic
|
sign-topic-main/examples/truncated_bptt/truncated_bptt_lm_task.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import torch
from fairseq import utils
from fairseq.data import (
Dictionary,
TokenBlockDataset,
data_utils,
iterators,
)
from fairseq.dataclass import FairseqDataclass
from fairseq.distributed import utils as dist_utils
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TruncatedBPTTLMConfig(FairseqDataclass):
data: str = field(default="???", metadata={"help": "path to data directory"})
tokens_per_sample: int = field(
default=1024, metadata={"help": "max number of tokens per sequence"},
)
batch_size: int = II("dataset.batch_size")
# Some models use *max_target_positions* to know how many positional
# embeddings to learn. We use II(...) to make it default to
# *tokens_per_sample*, but in principle there could be more positional
# embeddings than tokens in a single batch. This may also be irrelevant for
# custom model implementations.
max_target_positions: int = II("task.tokens_per_sample")
# these will be populated automatically if not provided
data_parallel_rank: Optional[int] = None
data_parallel_size: Optional[int] = None
@register_task("truncated_bptt_lm", dataclass=TruncatedBPTTLMConfig)
class TruncatedBPTTLMTask(FairseqTask):
def __init__(self, cfg: TruncatedBPTTLMConfig):
super().__init__(cfg)
if cfg.data_parallel_rank is None or cfg.data_parallel_size is None:
if torch.distributed.is_initialized():
cfg.data_parallel_rank = dist_utils.get_data_parallel_rank()
cfg.data_parallel_size = dist_utils.get_data_parallel_world_size()
else:
cfg.data_parallel_rank = 0
cfg.data_parallel_size = 1
# load the dictionary
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
self.dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(self.dictionary)))
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)"""
# support sharded datasets
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
# each element of *data* will be a tensorized line from the original
# text dataset, similar to ``open(split_path).readlines()``
data = data_utils.load_indexed_dataset(
split_path, self.dictionary, combine=combine
)
if data is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
# this is similar to ``data.view(-1).split(tokens_per_sample)``
data = TokenBlockDataset(
data,
data.sizes,
block_size=self.cfg.tokens_per_sample,
pad=None, # unused
eos=None, # unused
break_mode="none",
)
self.datasets[split] = TruncatedBPTTDataset(
data=data,
bsz_per_shard=self.cfg.batch_size,
shard_id=self.cfg.data_parallel_rank,
num_shards=self.cfg.data_parallel_size,
)
def dataset(self, split):
return self.datasets[split]
def get_batch_iterator(
self,
dataset,
num_workers=0,
epoch=1,
data_buffer_size=0,
skip_remainder_batch=False,
**kwargs
):
return iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=self._collate_fn,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
# we don't use the batching functionality from EpochBatchIterator;
# instead every item in *dataset* is a whole batch
batch_sampler=[[i] for i in range(len(dataset))],
disable_shuffling=True,
skip_remainder_batch=skip_remainder_batch,
)
def _collate_fn(self, items: List[List[torch.Tensor]]):
# we don't use fairseq's batching functionality, so we expect a single
# Tensor of type List[torch.Tensor]
assert len(items) == 1
# item will have shape B x T (the last batch may have length < T)
id, item = items[0]
item = data_utils.collate_tokens(item, pad_idx=self.source_dictionary.pad())
B, T = item.size()
# shift item one position over and append a padding token for the target
target = torch.nn.functional.pad(
item[:, 1:], (0, 1, 0, 0), value=self.target_dictionary.pad()
)
# fairseq expects batches to have the following structure
return {
"id": torch.tensor([id] * item.size(0)),
"net_input": {"src_tokens": item,},
"target": target,
"nsentences": item.size(0),
"ntokens": item.numel(),
}
def build_dataset_for_inference(
self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs
) -> torch.utils.data.Dataset:
eos = self.source_dictionary.eos()
dataset = TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=eos,
break_mode="eos",
)
class Dataset(torch.utils.data.Dataset):
def __getitem__(self, i):
item = dataset[i]
if item[-1] == eos:
# remove eos to support generating with a prefix
item = item[:-1]
return (i, [item])
def __len__(self):
return len(dataset)
return Dataset()
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
if constraints is not None:
raise NotImplementedError
# SequenceGenerator doesn't use *src_tokens* directly, we need to
# pass the *prefix_tokens* argument instead.
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
# begin generation with the end-of-sentence token
bos_token = self.source_dictionary.eos()
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token
)
def eval_lm_dataloader(
self,
dataset,
max_tokens: Optional[int] = 36000,
batch_size: Optional[int] = None,
max_positions: Optional[int] = None,
num_shards: int = 1,
shard_id: int = 0,
num_workers: int = 1,
data_buffer_size: int = 10,
context_window: int = 0,
):
if context_window > 0:
raise NotImplementedError(
"Transformer-XL doesn't need --context-window, try "
"--model-overrides '{\"mem_len\":42}' instead "
)
return self.get_batch_iterator(
dataset=dataset,
max_tokens=max_tokens,
max_sentences=batch_size,
max_positions=max_positions,
ignore_invalid_inputs=True,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
data_buffer_size=data_buffer_size,
).next_epoch_itr(shuffle=False)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class TruncatedBPTTDataset(torch.utils.data.Dataset):
def __init__(
self,
data: List[torch.Tensor], # ordered list of items
bsz_per_shard, # number of items processed per GPUs per forward
shard_id, # current GPU ID
num_shards, # number of GPUs
):
super().__init__()
self.data = data
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).contiguous()
return data
# total number of sequences processed by all GPUs in each forward pass
global_batch_size = bsz_per_shard * num_shards
"""
With a 16 item dataset, bsz_per_shard=2 and num_shards=3,
*indices* might look like:
indices = [[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9],
[10, 11]]
The size of the TruncatedBPTTDataset instance will be 2,
and shard 1 will see items:
[(0, [data[4], data[6]]),
(1, [data[5], data[7]])]
"""
indices = batchify(torch.arange(len(data)), global_batch_size)
assert indices.size(0) == global_batch_size
self.my_indices = indices[
shard_id * bsz_per_shard : (shard_id + 1) * bsz_per_shard
]
assert self.my_indices.size(0) == bsz_per_shard
def __len__(self):
return self.my_indices.size(1)
def __getitem__(self, i) -> Tuple[int, List[torch.Tensor]]:
return (i, [self.data[idx] for idx in self.my_indices[:, i]])
| 9,995 | 33.951049 | 86 |
py
|
sign-topic
|
sign-topic-main/examples/linformer/linformer_src/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .models import linformer_roberta # noqa
| 224 | 31.142857 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/linformer/linformer_src/modules/multihead_linear_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor, nn
from torch.nn import Parameter
@with_incremental_state
class MultiheadLinearAttention(nn.Module):
"""Multi-headed linformer attention.
Projects the key and values down to the compressed dimension, before computing self-attention.
See "Linformer: Self-Attention with Linear Complexity" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
compressed=1,
max_seq_len=256,
shared_kv_compressed=0,
shared_compress_layer=None,
freeze_compress=0,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = quant_noise(
nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.v_proj = quant_noise(
nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.q_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
# used for compress sequence to subsequence
if shared_compress_layer is None:
self.compress_seq_len = max_seq_len // compressed
self.compress_k = nn.Linear(max_seq_len, self.compress_seq_len, bias=False)
if shared_kv_compressed == 0:
self.compress_v = nn.Linear(
max_seq_len, self.compress_seq_len, bias=False
)
self.layerwise_sharing = False
else:
self.compress_k = shared_compress_layer
if shared_kv_compressed == 0:
self.compress_v = shared_compress_layer
self.layerwise_sharing = True
self.shared_kv_compressed = shared_kv_compressed
self.out_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
if freeze_compress == 1:
self.compress_k.weight.requires_grad = False
if shared_kv_compressed == 0:
self.compress_v.weight.requires_grad = False
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
if (
not self.layerwise_sharing
): # otherwise, we already initialize the parameters
nn.init.xavier_uniform_(self.compress_k.weight, gain=1 / math.sqrt(2))
if self.shared_kv_compressed == 0:
nn.init.xavier_uniform_(
self.compress_v.weight, gain=1 / math.sqrt(2)
)
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
if (
not self.layerwise_sharing
): # otherwise, we already initialize the parameters
nn.init.xavier_uniform_(self.compress_k.weight)
if self.shared_kv_compressed == 0:
nn.init.xavier_uniform_(self.compress_v.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k_input = query.permute(1, 2, 0).contiguous() # B * C * T
k_input = (
F.linear(k_input, self.compress_k.weight[:, 0:tgt_len])
.permute(2, 0, 1)
.contiguous()
)
k = self.k_proj(k_input)
v_input = query.permute(1, 2, 0).contiguous() # B * C * T
if self.shared_kv_compressed == 0:
v_input = (
F.linear(v_input, self.compress_v.weight[:, 0:tgt_len])
.permute(2, 0, 1)
.contiguous()
)
if self.shared_kv_compressed == 1: # use shared kv compressed linear layer
v_input = (
F.linear(v_input, self.compress_k.weight[:, 0:tgt_len])
.permute(2, 0, 1)
.contiguous()
)
v = self.v_proj(v_input)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadLinearAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = MultiheadLinearAttention.apply_sparse_mask(
attn_weights, tgt_len, src_len, bsz
)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(
attn_weights,
p=self.dropout,
training=self.training,
)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
| 19,151 | 38.73444 | 98 |
py
|
sign-topic
|
sign-topic-main/examples/linformer/linformer_src/modules/linformer_sentence_encoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn as nn
from fairseq.models.transformer import TransformerEncoder
from .linformer_sentence_encoder_layer import LinformerTransformerEncoderLayer
class LinformerTransformerEncoder(TransformerEncoder):
"""
Implementation for a Bi-directional Linformer based Sentence Encoder used
in BERT/XLM style pre-trained models.
This first computes the token embedding using the token embedding matrix,
position embeddings (if specified) and segment embeddings
(if specified). After applying the specified number of
LinformerEncoderLayers, it outputs all the internal states of the
encoder as well as the final representation associated with the first
token (usually CLS token).
Input:
- tokens: B x T matrix representing sentences
- segment_labels: B x T matrix representing segment label for tokens
Output:
- a tuple of the following:
- a list of internal model states used to compute the
predictions where each tensor has shape T x B x C
- sentence representation associated with first input token
in format B x C.
"""
def __init__(self, args, dictionary, embed_tokens):
self.compress_layer = None
super().__init__(args, dictionary, embed_tokens)
def build_encoder_layer(self, args):
if self.args.shared_layer_kv_compressed == 1 and self.compress_layer is None:
compress_layer = nn.Linear(
self.args.max_positions,
self.args.max_positions // self.args.compressed,
)
# intialize parameters for compressed layer
nn.init.xavier_uniform_(compress_layer.weight, gain=1 / math.sqrt(2))
if self.args.freeze_compress == 1:
compress_layer.weight.requires_grad = False
self.compress_layer = compress_layer
return LinformerTransformerEncoderLayer(args, self.compress_layer)
| 2,151 | 38.127273 | 85 |
py
|
sign-topic
|
sign-topic-main/examples/linformer/linformer_src/modules/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/examples/linformer/linformer_src/modules/linformer_sentence_encoder_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from fairseq.modules import TransformerEncoderLayer
from .multihead_linear_attention import MultiheadLinearAttention
class LinformerTransformerEncoderLayer(TransformerEncoderLayer):
"""
Implements a Linformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(self, args, shared_compress_layer):
# wrap in a list so it's not automatically registered by PyTorch
self.shared_compress_layer = [shared_compress_layer]
super().__init__(args)
self.register_buffer("version", torch.tensor(2))
def build_self_attention(self, embed_dim, args):
return MultiheadLinearAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.dropout,
self_attention=True,
q_noise=args.quant_noise_pq,
qn_block_size=args.quant_noise_pq_block_size,
compressed=args.compressed,
max_seq_len=args.max_positions,
shared_kv_compressed=args.shared_kv_compressed,
shared_compress_layer=self.shared_compress_layer[0],
freeze_compress=args.freeze_compress,
)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + "." if name != "" else ""
# some old checkpoints had weight sharing implemented incorrectly
# (note: this was correct in the original paper code)
if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2:
state_dict[f"{prefix}version"] = torch.tensor(1)
# check compression layer sharing
if f"{prefix}shared_compress_layer.weight" in state_dict:
# reinitialize block without sharing compression layer to match
# old behavior
self.shared_compress_layer = [
torch.nn.Linear(
self.shared_compress_layer[0].weight.size(1),
self.shared_compress_layer[0].weight.size(0),
)
]
self.self_attn = self.build_self_attention(self.embed_dim, self.args)
# delete shared_compress_layer, since it's already copied to
# self_attn.compress_k.weight
del state_dict[f"{prefix}shared_compress_layer.weight"]
if f"{prefix}shared_compress_layer.bias" in state_dict:
del state_dict[f"{prefix}shared_compress_layer.bias"]
| 2,743 | 40.575758 | 85 |
py
|
sign-topic
|
sign-topic-main/examples/linformer/linformer_src/models/linformer_roberta.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Linformer: Self-Attention with Linear Complexity
"""
import logging
import torch
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.models.roberta import (
init_bert_params,
roberta_base_architecture,
roberta_large_architecture,
RobertaEncoder,
RobertaModel,
)
from fairseq.utils import safe_hasattr
from ..modules.linformer_sentence_encoder import LinformerTransformerEncoder
logger = logging.getLogger(__name__)
@register_model("linformer_roberta")
class LinformerModel(RobertaModel):
@staticmethod
def add_args(parser):
RobertaModel.add_args(parser)
# add args for Linformer
parser.add_argument(
"--compressed", type=int, help="compressed ratio of sequence length"
)
parser.add_argument(
"--shared-kv-compressed",
type=int,
help="share compressed matrix between k and v, in each layer",
)
parser.add_argument(
"--shared-layer-kv-compressed",
type=int,
help="share compressed matrix between k and v and across all layers",
)
parser.add_argument(
"--freeze-compress",
type=int,
help="freeze the parameters in compressed layer",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present
base_architecture(args)
if not safe_hasattr(args, "max_positions"):
args.max_positions = args.tokens_per_sample
encoder = LinformerEncoder(args, task.source_dictionary)
return cls(args, encoder)
class LinformerEncoder(RobertaEncoder):
"""Linformer encoder."""
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
self.register_buffer("version", torch.tensor(2))
def build_encoder(self, args, dictionary, embed_tokens):
encoder = LinformerTransformerEncoder(args, dictionary, embed_tokens)
encoder.apply(init_bert_params)
return encoder
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + "." if name != "" else ""
# some old checkpoints had weight sharing implemented incorrectly
# (note: this was correct in the original paper code)
if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2:
state_dict[f"{prefix}version"] = torch.tensor(1)
# check if input embeddings and output embeddings were tied
if not torch.allclose(
state_dict[f"{prefix}sentence_encoder.embed_tokens.weight"],
state_dict[f"{prefix}lm_head.weight"],
):
# they weren't tied, re-init the LM head without weight sharing
self.lm_head = self.build_lm_head(
embed_dim=self.args.encoder_embed_dim,
output_dim=len(self.dictionary),
activation_fn=self.args.activation_fn,
weight=None, # don't share weights
)
@register_model_architecture("linformer_roberta", "linformer_roberta")
def base_architecture(args):
args.compressed = getattr(args, "compressed", 4)
args.shared_kv_compressed = getattr(args, "shared_kv_compressed", 0)
args.shared_layer_kv_compressed = getattr(args, "shared_layer_kv_compressed", 0)
args.freeze_compress = getattr(args, "freeze_compress", 0)
roberta_base_architecture(args)
@register_model_architecture("linformer_roberta", "linformer_roberta_base")
def linformer_roberta_base_architecture(args):
base_architecture(args)
@register_model_architecture("linformer_roberta", "linformer_roberta_large")
def linformer_roberta_large_architecture(args):
roberta_large_architecture(args)
base_architecture(args)
| 4,143 | 33.247934 | 84 |
py
|
sign-topic
|
sign-topic-main/examples/linformer/linformer_src/models/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/examples/wav2vec/vq-wav2vec_featurize.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import glob
import os
import os.path as osp
import pprint
import soundfile as sf
import torch
import fairseq
from torch import nn
from torch.utils.data import DataLoader
try:
import tqdm
except:
print("Install tqdm to use --log-format=tqdm")
class FilesDataset:
def __init__(self, files, labels):
self.files = files
if labels and osp.exists(labels):
with open(labels, "r") as lbl_f:
self.labels = [line.rstrip() for line in lbl_f]
else:
self.labels = labels
def __len__(self):
return len(self.files)
def __getitem__(self, index):
fname = self.files[index]
wav, sr = sf.read(fname)
assert sr == 16000
wav = torch.from_numpy(wav).float()
lbls = None
if self.labels:
if isinstance(self.labels, str):
lbl_file = osp.splitext(fname)[0] + "." + self.labels
with open(lbl_file, "r") as lblf:
lbls = lblf.readline()
assert lbls is not None
else:
lbls = self.labels[index]
return wav, lbls
def collate(self, batch):
return batch
class ArgTypes:
@staticmethod
def existing_path(arg):
arg = str(arg)
assert osp.exists(arg), f"File {arg} does not exist"
return arg
@staticmethod
def mkdir(arg):
arg = str(arg)
os.makedirs(arg, exist_ok=True)
return arg
class DatasetWriter:
def __init__(self):
self.args = self.load_config()
pprint.pprint(self.args.__dict__)
self.model = self.load_model()
def __getattr__(self, attr):
return getattr(self.args, attr)
def read_manifest(self, fname):
with open(fname, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
fnames = [
osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0
]
return fnames
def process_splits(self):
if self.args.shard is not None or self.args.num_shards is not None:
assert self.args.shard is not None and self.args.num_shards is not None
for split in self.splits:
print(split)
if self.extension == "tsv":
datadir = osp.join(self.data_dir, f"{split}.{self.extension}")
print("Reading manifest file: ", datadir)
files = self.read_manifest(datadir)
else:
datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}")
files = glob.glob(datadir, recursive=True)
assert len(files) > 0
if self.args.shard is not None:
files = files[self.args.shard :: self.args.num_shards]
lbls = []
with open(self.data_file(split), "w") as srcf:
for line, lbl in self.iterate(files):
print(line, file=srcf)
if self.args.labels:
lbls.append(lbl + "\n")
if self.args.labels:
assert all(a is not None for a in lbls)
with open(self.lbl_file(split), "w") as lblf:
lblf.writelines(lbls)
def iterate(self, files):
data = self.load_data(files)
for samples in tqdm.tqdm(data, total=len(files) // 32):
for wav, lbl in samples:
x = wav.unsqueeze(0).float().cuda()
div = 1
while x.size(-1) // div > self.args.max_size:
div += 1
xs = x.chunk(div, dim=-1)
result = []
for x in xs:
torch.cuda.empty_cache()
x = self.model.feature_extractor(x)
if self.quantize_location == "encoder":
with torch.no_grad():
_, idx = self.model.vector_quantizer.forward_idx(x)
idx = idx.squeeze(0).cpu()
else:
with torch.no_grad():
z = self.model.feature_aggregator(x)
_, idx = self.model.vector_quantizer.forward_idx(z)
idx = idx.squeeze(0).cpu()
result.append(idx)
idx = torch.cat(result, dim=0)
yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl
def lbl_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.lbl{shard_part}")
def data_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.src{shard_part}")
def var_file(self):
return osp.join(self.output_dir, f"vars.pt")
def load_config(self):
parser = argparse.ArgumentParser("Vector Quantized wav2vec features")
# Model Arguments
parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True)
parser.add_argument("--data-parallel", action="store_true")
# Output Arguments
parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True)
# Data Arguments
parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True)
parser.add_argument("--splits", type=str, nargs="+", required=True)
parser.add_argument("--extension", type=str, required=True)
parser.add_argument("--labels", type=str, required=False)
parser.add_argument("--shard", type=int, default=None)
parser.add_argument("--num-shards", type=int, default=None)
parser.add_argument("--max-size", type=int, default=1300000)
# Logger Arguments
parser.add_argument(
"--log-format", type=str, choices=["none", "simple", "tqdm"]
)
return parser.parse_args()
def load_data(self, fnames):
dataset = FilesDataset(fnames, self.args.labels)
loader = DataLoader(
dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8
)
return loader
def load_model(self):
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([self.checkpoint])
model = model[0]
self.quantize_location = getattr(cfg.model, "vq", "encoder")
model.eval().float()
model.cuda()
if self.data_parallel:
model = nn.DataParallel(model)
return model
def __call__(self):
self.process_splits()
if hasattr(self.model.feature_extractor, "vars") and (
self.args.shard is None or self.args.shard == 0
):
vars = (
self.model.feature_extractor.vars.view(
self.model.feature_extractor.banks,
self.model.feature_extractor.num_vars,
-1,
)
.cpu()
.detach()
)
print("writing learned latent variable embeddings: ", vars.shape)
torch.save(vars, self.var_file())
if __name__ == "__main__":
write_data = DatasetWriter()
write_data()
print("Done.")
| 7,680 | 29.601594 | 99 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/wav2vec_manifest.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import argparse
import glob
import os
import random
import soundfile
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"root", metavar="DIR", help="root directory containing flac files to index"
)
parser.add_argument(
"--valid-percent",
default=0.01,
type=float,
metavar="D",
help="percentage of data to use as validation set (between 0 and 1)",
)
parser.add_argument(
"--dest", default=".", type=str, metavar="DIR", help="output directory"
)
parser.add_argument(
"--ext", default="flac", type=str, metavar="EXT", help="extension to look for"
)
parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed")
parser.add_argument(
"--path-must-contain",
default=None,
type=str,
metavar="FRAG",
help="if set, path must contain this substring for a file to be included in the manifest",
)
return parser
def main(args):
assert args.valid_percent >= 0 and args.valid_percent <= 1.0
if not os.path.exists(args.dest):
os.makedirs(args.dest)
dir_path = os.path.realpath(args.root)
search_path = os.path.join(dir_path, "**/*." + args.ext)
rand = random.Random(args.seed)
valid_f = (
open(os.path.join(args.dest, "valid.tsv"), "w")
if args.valid_percent > 0
else None
)
with open(os.path.join(args.dest, "train.tsv"), "w") as train_f:
print(dir_path, file=train_f)
if valid_f is not None:
print(dir_path, file=valid_f)
for fname in glob.iglob(search_path, recursive=True):
file_path = os.path.realpath(fname)
if args.path_must_contain and args.path_must_contain not in file_path:
continue
frames = soundfile.info(fname).frames
dest = train_f if rand.random() > args.valid_percent else valid_f
print(
"{}\t{}".format(os.path.relpath(file_path, dir_path), frames), file=dest
)
if valid_f is not None:
valid_f.close()
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
main(args)
| 2,513 | 27.568182 | 98 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/wav2vec_featurize.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import glob
import os
from shutil import copy
import h5py
import numpy as np
import soundfile as sf
import torch
import tqdm
import fairseq
from torch import nn
def read_audio(fname):
""" Load an audio file and return PCM along with the sample rate """
wav, sr = sf.read(fname)
assert sr == 16e3
return wav, 16e3
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([fname])
model = model[0]
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return z, c
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__("Pre-compute embeddings for flashlight datasets")
kwargs = {"action": "store", "type": str, "required": True}
self.add_argument("--input", "-i", help="Input Directory", **kwargs)
self.add_argument("--output", "-o", help="Output Directory", **kwargs)
self.add_argument("--model", help="Path to model checkpoint", **kwargs)
self.add_argument("--split", help="Dataset Splits", nargs="+", **kwargs)
self.add_argument(
"--ext", default="wav", required=False, help="Audio file extension"
)
self.add_argument(
"--no-copy-labels",
action="store_true",
help="Do not copy label files. Useful for large datasets, use --targetdir in flashlight then.",
)
self.add_argument(
"--use-feat",
action="store_true",
help="Use the feature vector ('z') instead of context vector ('c') for features",
)
self.add_argument("--gpu", help="GPU to use", default=0, type=int)
class Prediction:
""" Lightweight wrapper around a fairspeech embedding model """
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
z, c = self.model(x.unsqueeze(0))
return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()
class H5Writer:
""" Write features as hdf5 file in flashlight compatible format """
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
channel, T = data.shape
with h5py.File(self.fname, "w") as out_ds:
data = data.T.flatten()
out_ds["features"] = data
out_ds["info"] = np.array([16e3 // 160, T, channel])
class EmbeddingDatasetWriter(object):
"""Given a model and a flashlight dataset, pre-compute and store embeddings
Args:
input_root, str :
Path to the flashlight dataset
output_root, str :
Desired output directory. Will be created if non-existent
split, str :
Dataset split
"""
def __init__(
self,
input_root,
output_root,
split,
model_fname,
extension="wav",
gpu=0,
verbose=False,
use_feat=False,
):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), "Input path '{}' does not exist".format(
self.input_path
)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if fname is None:
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if fname is None:
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(
filter(
lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))
)
)
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map(
lambda x: os.path.join(
self.output_path, x.replace("." + self.extension, ".h5context")
),
map(os.path.basename, paths),
)
for name, target_fname in self._progress(
zip(paths, fnames_context), total=len(self)
):
wav, sr = read_audio(name)
z, c = self.model(wav)
feat = z if self.use_feat else c
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format(
n_files=len(self), **self.__dict__
)
if __name__ == "__main__":
args = EmbeddingWriterConfig().parse_args()
for split in args.split:
writer = EmbeddingDatasetWriter(
input_root=args.input,
output_root=args.output,
split=split,
model_fname=args.model,
gpu=args.gpu,
extension=args.ext,
use_feat=args.use_feat,
)
print(writer)
writer.require_output_path()
print("Writing Features...")
writer.write_features()
print("Done.")
if not args.no_copy_labels:
print("Copying label data...")
writer.copy_labels()
print("Done.")
| 7,020 | 27.084 | 135 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/examples/wav2vec/libri_labels.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument("tsv")
parser.add_argument("--output-dir", required=True)
parser.add_argument("--output-name", required=True)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
transcriptions = {}
with open(args.tsv, "r") as tsv, open(
os.path.join(args.output_dir, args.output_name + ".ltr"), "w"
) as ltr_out, open(
os.path.join(args.output_dir, args.output_name + ".wrd"), "w"
) as wrd_out:
root = next(tsv).strip()
for line in tsv:
line = line.strip()
dir = os.path.dirname(line)
if dir not in transcriptions:
parts = dir.split(os.path.sep)
trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt"
path = os.path.join(root, dir, trans_path)
assert os.path.exists(path)
texts = {}
with open(path, "r") as trans_f:
for tline in trans_f:
items = tline.strip().split()
texts[items[0]] = " ".join(items[1:])
transcriptions[dir] = texts
part = os.path.basename(line).split(".")[0]
assert part in transcriptions[dir]
print(transcriptions[dir][part], file=wrd_out)
print(
" ".join(list(transcriptions[dir][part].replace(" ", "|"))) + " |",
file=ltr_out,
)
if __name__ == "__main__":
main()
| 1,875 | 31.912281 | 97 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/w2vu_generate.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import ast
from collections import namedtuple
from dataclasses import dataclass, field
from enum import Enum, auto
import hydra
from hydra.core.config_store import ConfigStore
import logging
import math
import os
from omegaconf import OmegaConf
from typing import Optional
import sys
import editdistance
import torch
from hydra.core.hydra_config import HydraConfig
from fairseq import checkpoint_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import FairseqDataclass, FairseqConfig
from fairseq.logging.meters import StopwatchMeter
from omegaconf import open_dict
from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoderConfig
logging.root.setLevel(logging.INFO)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
class DecoderType(Enum):
VITERBI = auto()
KENLM = auto()
FAIRSEQ = auto()
KALDI = auto()
@dataclass
class UnsupGenerateConfig(FairseqDataclass):
fairseq: FairseqConfig = FairseqConfig()
lm_weight: float = field(
default=2.0,
metadata={"help": "language model weight"},
)
w2l_decoder: DecoderType = field(
default=DecoderType.VITERBI,
metadata={"help": "type of decoder to use"},
)
kaldi_decoder_config: Optional[KaldiDecoderConfig] = None
lexicon: Optional[str] = field(
default=None,
metadata={
"help": "path to lexicon. This is also used to 'phonemize' for unsupvised param tuning"
},
)
lm_model: Optional[str] = field(
default=None,
metadata={"help": "path to language model (kenlm or fairseq)"},
)
unit_lm: bool = field(
default=False,
metadata={"help": "whether to use unit lm"},
)
beam_threshold: float = field(
default=50.0,
metadata={"help": "beam score threshold"},
)
beam_size_token: float = field(
default=100.0,
metadata={"help": "max tokens per beam"},
)
beam: int = field(
default=5,
metadata={"help": "decoder beam size"},
)
nbest: int = field(
default=1,
metadata={"help": "number of results to return"},
)
word_score: float = field(
default=1.0,
metadata={"help": "word score to add at end of word"},
)
unk_weight: float = field(
default=-math.inf,
metadata={"help": "unknown token weight"},
)
sil_weight: float = field(
default=0.0,
metadata={"help": "silence token weight"},
)
targets: Optional[str] = field(
default=None,
metadata={"help": "extension of ground truth labels to compute UER"},
)
results_path: Optional[str] = field(
default=None,
metadata={"help": "where to store results"},
)
post_process: Optional[str] = field(
default=None,
metadata={"help": "how to post process results"},
)
vocab_usage_power: float = field(
default=2,
metadata={"help": "for unsupervised param tuning"},
)
viterbi_transcript: Optional[str] = field(
default=None,
metadata={"help": "for unsupervised param tuning"},
)
min_lm_ppl: float = field(
default=0,
metadata={"help": "for unsupervised param tuning"},
)
min_vt_uer: float = field(
default=0,
metadata={"help": "for unsupervised param tuning"},
)
blank_weight: float = field(
default=0,
metadata={"help": "value to add or set for blank emission"},
)
blank_mode: str = field(
default="set",
metadata={
"help": "can be add or set, how to modify blank emission with blank weight"
},
)
sil_is_blank: bool = field(
default=False,
metadata={"help": "if true, <SIL> token is same as blank token"},
)
unsupervised_tuning: bool = field(
default=False,
metadata={
"help": "if true, returns a score based on unsupervised param selection metric instead of UER"
},
)
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def get_dataset_itr(cfg, task):
return task.get_batch_iterator(
dataset=task.dataset(cfg.fairseq.dataset.gen_subset),
max_tokens=cfg.fairseq.dataset.max_tokens,
max_sentences=cfg.fairseq.dataset.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=cfg.fairseq.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.fairseq.dataset.required_batch_size_multiple,
num_shards=cfg.fairseq.dataset.num_shards,
shard_id=cfg.fairseq.dataset.shard_id,
num_workers=cfg.fairseq.dataset.num_workers,
data_buffer_size=cfg.fairseq.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
def process_predictions(
cfg: UnsupGenerateConfig,
hypos,
tgt_dict,
target_tokens,
res_files,
):
retval = []
word_preds = []
transcriptions = []
dec_scores = []
for i, hypo in enumerate(hypos[: min(len(hypos), cfg.nbest)]):
if torch.is_tensor(hypo["tokens"]):
tokens = hypo["tokens"].int().cpu()
tokens = tokens[tokens >= tgt_dict.nspecial]
hyp_pieces = tgt_dict.string(tokens)
else:
hyp_pieces = " ".join(hypo["tokens"])
if "words" in hypo and len(hypo["words"]) > 0:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, cfg.post_process)
to_write = {}
if res_files is not None:
to_write[res_files["hypo.units"]] = hyp_pieces
to_write[res_files["hypo.words"]] = hyp_words
tgt_words = ""
if target_tokens is not None:
if isinstance(target_tokens, str):
tgt_pieces = tgt_words = target_tokens
else:
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, cfg.post_process)
if res_files is not None:
to_write[res_files["ref.units"]] = tgt_pieces
to_write[res_files["ref.words"]] = tgt_words
if not cfg.fairseq.common_eval.quiet:
logger.info(f"HYPO {i}:" + hyp_words)
if tgt_words:
logger.info("TARGET:" + tgt_words)
if "am_score" in hypo and "lm_score" in hypo:
logger.info(
f"DECODER AM SCORE: {hypo['am_score']}, DECODER LM SCORE: {hypo['lm_score']}, DECODER SCORE: {hypo['score']}"
)
elif "score" in hypo:
logger.info(f"DECODER SCORE: {hypo['score']}")
logger.info("___________________")
hyp_words_arr = hyp_words.split()
tgt_words_arr = tgt_words.split()
retval.append(
(
editdistance.eval(hyp_words_arr, tgt_words_arr),
len(hyp_words_arr),
len(tgt_words_arr),
hyp_pieces,
hyp_words,
)
)
word_preds.append(hyp_words_arr)
transcriptions.append(to_write)
dec_scores.append(-hypo.get("score", 0)) # negate cuz kaldi returns NLL
if len(retval) > 1:
best = None
for r, t in zip(retval, transcriptions):
if best is None or r[0] < best[0][0]:
best = r, t
for dest, tran in best[1].items():
print(tran, file=dest)
dest.flush()
return best[0]
assert len(transcriptions) == 1
for dest, tran in transcriptions[0].items():
print(tran, file=dest)
return retval[0]
def prepare_result_files(cfg: UnsupGenerateConfig):
def get_res_file(file_prefix):
if cfg.fairseq.dataset.num_shards > 1:
file_prefix = f"{cfg.fairseq.dataset.shard_id}_{file_prefix}"
path = os.path.join(
cfg.results_path,
"{}{}.txt".format(
cfg.fairseq.dataset.gen_subset,
file_prefix,
),
)
return open(path, "w", buffering=1)
if not cfg.results_path:
return None
return {
"hypo.words": get_res_file(""),
"hypo.units": get_res_file("_units"),
"ref.words": get_res_file("_ref"),
"ref.units": get_res_file("_ref_units"),
"hypo.nbest.words": get_res_file("_nbest_words"),
}
def optimize_models(cfg: UnsupGenerateConfig, use_cuda, models):
"""Optimize ensemble for generation"""
for model in models:
model.eval()
if cfg.fairseq.common.fp16:
model.half()
if use_cuda:
model.cuda()
GenResult = namedtuple(
"GenResult",
[
"count",
"errs_t",
"gen_timer",
"lengths_hyp_unit_t",
"lengths_hyp_t",
"lengths_t",
"lm_score_t",
"num_feats",
"num_sentences",
"num_symbols",
"vt_err_t",
"vt_length_t",
],
)
def generate(cfg: UnsupGenerateConfig, models, saved_cfg, use_cuda):
task = tasks.setup_task(cfg.fairseq.task)
saved_cfg.task.labels = cfg.fairseq.task.labels
task.load_dataset(cfg.fairseq.dataset.gen_subset, task_cfg=saved_cfg.task)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info(
"| {} {} {} examples".format(
cfg.fairseq.task.data,
cfg.fairseq.dataset.gen_subset,
len(task.dataset(cfg.fairseq.dataset.gen_subset)),
)
)
# Load dataset (possibly sharded)
itr = get_dataset_itr(cfg, task)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(cfg: UnsupGenerateConfig):
w2l_decoder = cfg.w2l_decoder
if w2l_decoder == DecoderType.VITERBI:
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.KENLM:
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.FAIRSEQ:
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.KALDI:
from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoder
assert cfg.kaldi_decoder_config is not None
return KaldiDecoder(
cfg.kaldi_decoder_config,
cfg.beam,
)
else:
raise NotImplementedError(
"only wav2letter decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment but found "
+ str(w2l_decoder)
)
generator = build_generator(cfg)
kenlm = None
fairseq_lm = None
if cfg.lm_model is not None:
import kenlm
kenlm = kenlm.Model(cfg.lm_model)
num_sentences = 0
if cfg.results_path is not None and not os.path.exists(cfg.results_path):
os.makedirs(cfg.results_path)
res_files = prepare_result_files(cfg)
errs_t = 0
lengths_hyp_t = 0
lengths_hyp_unit_t = 0
lengths_t = 0
count = 0
num_feats = 0
all_hyp_pieces = []
all_hyp_words = []
num_symbols = (
len([s for s in tgt_dict.symbols if not s.startswith("madeup")])
- tgt_dict.nspecial
)
targets = None
if cfg.targets is not None:
tgt_path = os.path.join(
cfg.fairseq.task.data, cfg.fairseq.dataset.gen_subset + "." + cfg.targets
)
if os.path.exists(tgt_path):
with open(tgt_path, "r") as f:
targets = f.read().splitlines()
viterbi_transcript = None
if cfg.viterbi_transcript is not None and len(cfg.viterbi_transcript) > 0:
logger.info(f"loading viterbi transcript from {cfg.viterbi_transcript}")
with open(cfg.viterbi_transcript, "r") as vf:
viterbi_transcript = vf.readlines()
viterbi_transcript = [v.rstrip().split() for v in viterbi_transcript]
gen_timer.start()
start = 0
end = len(itr)
hypo_futures = None
if cfg.w2l_decoder == DecoderType.KALDI:
logger.info("Extracting features")
hypo_futures = []
samples = []
with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t:
for i, sample in enumerate(t):
if "net_input" not in sample or i < start or i >= end:
continue
if "padding_mask" not in sample["net_input"]:
sample["net_input"]["padding_mask"] = None
hypos, num_feats = gen_hypos(
generator, models, num_feats, sample, task, use_cuda
)
hypo_futures.append(hypos)
samples.append(sample)
itr = list(zip(hypo_futures, samples))
start = 0
end = len(itr)
logger.info("Finished extracting features")
with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t:
for i, sample in enumerate(t):
if i < start or i >= end:
continue
if hypo_futures is not None:
hypos, sample = sample
hypos = [h.result() for h in hypos]
else:
if "net_input" not in sample:
continue
hypos, num_feats = gen_hypos(
generator, models, num_feats, sample, task, use_cuda
)
for i, sample_id in enumerate(sample["id"].tolist()):
if targets is not None:
target_tokens = targets[sample_id]
elif "target" in sample or "target_label" in sample:
toks = (
sample["target"][i, :]
if "target_label" not in sample
else sample["target_label"][i, :]
)
target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
else:
target_tokens = None
# Process top predictions
(
errs,
length_hyp,
length,
hyp_pieces,
hyp_words,
) = process_predictions(
cfg,
hypos[i],
tgt_dict,
target_tokens,
res_files,
)
errs_t += errs
lengths_hyp_t += length_hyp
lengths_hyp_unit_t += (
len(hyp_pieces) if len(hyp_pieces) > 0 else len(hyp_words)
)
lengths_t += length
count += 1
all_hyp_pieces.append(hyp_pieces)
all_hyp_words.append(hyp_words)
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
lm_score_sum = 0
if kenlm is not None:
if cfg.unit_lm:
lm_score_sum = sum(kenlm.score(w) for w in all_hyp_pieces)
else:
lm_score_sum = sum(kenlm.score(w) for w in all_hyp_words)
elif fairseq_lm is not None:
lm_score_sum = sum(fairseq_lm.score([h.split() for h in all_hyp_words])[0])
vt_err_t = 0
vt_length_t = 0
if viterbi_transcript is not None:
unit_hyps = []
if cfg.targets is not None and cfg.lexicon is not None:
lex = {}
with open(cfg.lexicon, "r") as lf:
for line in lf:
items = line.rstrip().split()
lex[items[0]] = items[1:]
for h in all_hyp_pieces:
hyp_ws = []
for w in h.split():
assert w in lex, w
hyp_ws.extend(lex[w])
unit_hyps.append(hyp_ws)
else:
unit_hyps.extend([h.split() for h in all_hyp_words])
vt_err_t = sum(
editdistance.eval(vt, h) for vt, h in zip(viterbi_transcript, unit_hyps)
)
vt_length_t = sum(len(h) for h in viterbi_transcript)
if res_files is not None:
for r in res_files.values():
r.close()
gen_timer.stop(lengths_hyp_t)
return GenResult(
count,
errs_t,
gen_timer,
lengths_hyp_unit_t,
lengths_hyp_t,
lengths_t,
lm_score_sum,
num_feats,
num_sentences,
num_symbols,
vt_err_t,
vt_length_t,
)
def gen_hypos(generator, models, num_feats, sample, task, use_cuda):
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "features" in sample["net_input"]:
sample["net_input"]["dense_x_only"] = True
num_feats += (
sample["net_input"]["features"].shape[0]
* sample["net_input"]["features"].shape[1]
)
hypos = task.inference_step(generator, models, sample, None)
return hypos, num_feats
def main(cfg: UnsupGenerateConfig, model=None):
if (
cfg.fairseq.dataset.max_tokens is None
and cfg.fairseq.dataset.batch_size is None
):
cfg.fairseq.dataset.max_tokens = 1024000
use_cuda = torch.cuda.is_available() and not cfg.fairseq.common.cpu
task = tasks.setup_task(cfg.fairseq.task)
overrides = ast.literal_eval(cfg.fairseq.common_eval.model_overrides)
if cfg.fairseq.task._name == "unpaired_audio_text":
overrides["model"] = {
"blank_weight": cfg.blank_weight,
"blank_mode": cfg.blank_mode,
"blank_is_sil": cfg.sil_is_blank,
"no_softmax": True,
"segmentation": {
"type": "NONE",
},
}
else:
overrides["model"] = {
"blank_weight": cfg.blank_weight,
"blank_mode": cfg.blank_mode,
}
if model is None:
# Load ensemble
logger.info("| loading model(s) from {}".format(cfg.fairseq.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
cfg.fairseq.common_eval.path.split("\\"),
arg_overrides=overrides,
task=task,
suffix=cfg.fairseq.checkpoint.checkpoint_suffix,
strict=(cfg.fairseq.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.fairseq.checkpoint.checkpoint_shard_count,
)
optimize_models(cfg, use_cuda, models)
else:
models = [model]
saved_cfg = cfg.fairseq
with open_dict(saved_cfg.task):
saved_cfg.task.shuffle = False
saved_cfg.task.sort_by_length = False
gen_result = generate(cfg, models, saved_cfg, use_cuda)
wer = None
if gen_result.lengths_t > 0:
wer = gen_result.errs_t * 100.0 / gen_result.lengths_t
logger.info(f"WER: {wer}")
lm_ppl = float("inf")
if gen_result.lm_score_t != 0 and gen_result.lengths_hyp_t > 0:
hyp_len = gen_result.lengths_hyp_t
lm_ppl = math.pow(
10, -gen_result.lm_score_t / (hyp_len + gen_result.num_sentences)
)
logger.info(f"LM PPL: {lm_ppl}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
" sentences/s, {:.2f} tokens/s)".format(
gen_result.num_sentences,
gen_result.gen_timer.n,
gen_result.gen_timer.sum,
gen_result.num_sentences / gen_result.gen_timer.sum,
1.0 / gen_result.gen_timer.avg,
)
)
vt_diff = None
if gen_result.vt_length_t > 0:
vt_diff = gen_result.vt_err_t / gen_result.vt_length_t
vt_diff = max(cfg.min_vt_uer, vt_diff)
lm_ppl = max(cfg.min_lm_ppl, lm_ppl)
if not cfg.unsupervised_tuning == 0:
weighted_score = wer
else:
weighted_score = math.log(lm_ppl) * (vt_diff or 1.0)
res = (
f"| Generate {cfg.fairseq.dataset.gen_subset} with beam={cfg.beam}, "
f"lm_weight={cfg.kaldi_decoder_config.acoustic_scale if cfg.kaldi_decoder_config else cfg.lm_weight}, "
f"word_score={cfg.word_score}, sil_weight={cfg.sil_weight}, blank_weight={cfg.blank_weight}, "
f"WER: {wer}, LM_PPL: {lm_ppl}, num feats: {gen_result.num_feats}, "
f"length: {gen_result.lengths_hyp_t}, UER to viterbi: {(vt_diff or 0) * 100}, score: {weighted_score}"
)
logger.info(res)
# print(res)
return task, weighted_score
@hydra.main(
config_path=os.path.join("../../..", "fairseq", "config"), config_name="config"
)
def hydra_main(cfg):
with open_dict(cfg):
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
cfg.job_logging_cfg = OmegaConf.to_container(
HydraConfig.get().job_logging, resolve=True
)
cfg = OmegaConf.create(
OmegaConf.to_container(cfg, resolve=False, enum_to_str=False)
)
OmegaConf.set_struct(cfg, True)
logger.info(cfg)
utils.import_user_module(cfg.fairseq.common)
_, score = main(cfg)
if cfg.is_ax:
return score, None
return score
def cli_main():
try:
from hydra._internal.utils import get_args
cfg_name = get_args().config_name or "config"
except:
logger.warning("Failed to get config name from hydra args")
cfg_name = "config"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=UnsupGenerateConfig)
hydra_main()
if __name__ == "__main__":
cli_main()
| 22,210 | 30.371469 | 129 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/models/wav2vec_u.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from enum import Enum, auto
import math
import numpy as np
from typing import Tuple, List, Optional, Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
from fairseq import checkpoint_utils, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
SamePad,
TransposeLast,
)
class SegmentationType(Enum):
NONE = auto()
RANDOM = auto()
UNIFORM_RANDOM = auto()
UNIFORM_RANDOM_JOIN = auto()
JOIN = auto()
@dataclass
class SegmentationConfig(FairseqDataclass):
type: SegmentationType = SegmentationType.NONE
subsample_rate: float = 0.25
mean_pool: bool = True
mean_pool_join: bool = False
remove_zeros: bool = False
@dataclass
class Wav2vec_UConfig(FairseqDataclass):
discriminator_kernel: int = 3
discriminator_dilation: int = 1
discriminator_dim: int = 256
discriminator_causal: bool = True
discriminator_linear_emb: bool = False
discriminator_depth: int = 1
discriminator_max_pool: bool = False
discriminator_act_after_linear: bool = False
discriminator_dropout: float = 0.0
discriminator_spectral_norm: bool = False
discriminator_weight_norm: bool = False
generator_kernel: int = 4
generator_dilation: int = 1
generator_stride: int = 1
generator_bias: bool = False
generator_dropout: float = 0.0
blank_weight: float = 0
blank_mode: str = "add"
blank_is_sil: bool = False
no_softmax: bool = False
smoothness_weight: float = 0.0
smoothing: float = 0.0
smoothing_one_sided: bool = False
gradient_penalty: float = 0.0
probabilistic_grad_penalty_slicing: bool = False
code_penalty: float = 0.0
gumbel: bool = False
hard_gumbel: bool = True
temp: Tuple[float, float, float] = (2, 0.1, 0.99995)
input_dim: int = 128
segmentation: SegmentationConfig = SegmentationConfig()
class Segmenter(nn.Module):
cfg: SegmentationConfig
def __init__(self, cfg: SegmentationConfig):
super().__init__()
self.cfg = cfg
self.subsample_rate = cfg.subsample_rate
def pre_segment(self, dense_x, dense_padding_mask):
return dense_x, dense_padding_mask
def logit_segment(self, logits, padding_mask):
return logits, padding_mask
class RandomSegmenter(Segmenter):
def pre_segment(self, dense_x, dense_padding_mask):
target_num = math.ceil(dense_x.size(1) * self.subsample_rate)
ones = torch.ones(dense_x.shape[:-1], device=dense_x.device)
indices, _ = ones.multinomial(target_num).sort(dim=-1)
indices_ld = indices.unsqueeze(-1).expand(-1, -1, dense_x.size(-1))
dense_x = dense_x.gather(1, indices_ld)
dense_padding_mask = dense_padding_mask.gather(1, index=indices)
return dense_x, dense_padding_mask
class UniformRandomSegmenter(Segmenter):
def pre_segment(self, dense_x, dense_padding_mask):
bsz, tsz, fsz = dense_x.shape
target_num = math.ceil(tsz * self.subsample_rate)
rem = tsz % target_num
if rem > 0:
dense_x = F.pad(dense_x, [0, 0, 0, target_num - rem])
dense_padding_mask = F.pad(
dense_padding_mask, [0, target_num - rem], value=True
)
dense_x = dense_x.view(bsz, target_num, -1, fsz)
dense_padding_mask = dense_padding_mask.view(bsz, target_num, -1)
if self.cfg.mean_pool:
dense_x = dense_x.mean(dim=-2)
dense_padding_mask = dense_padding_mask.all(dim=-1)
else:
ones = torch.ones((bsz, dense_x.size(2)), device=dense_x.device)
indices = ones.multinomial(1)
indices = indices.unsqueeze(-1).expand(-1, target_num, -1)
indices_ld = indices.unsqueeze(-1).expand(-1, -1, -1, fsz)
dense_x = dense_x.gather(2, indices_ld).reshape(bsz, -1, fsz)
dense_padding_mask = dense_padding_mask.gather(2, index=indices).reshape(
bsz, -1
)
return dense_x, dense_padding_mask
class JoinSegmenter(Segmenter):
def logit_segment(self, logits, padding_mask):
preds = logits.argmax(dim=-1)
if padding_mask.any():
preds[padding_mask] = -1 # mark pad
uniques = []
bsz, tsz, csz = logits.shape
for p in preds:
uniques.append(
p.cpu().unique_consecutive(return_inverse=True, return_counts=True)
)
new_tsz = max(u[0].numel() for u in uniques)
new_logits = logits.new_zeros(bsz, new_tsz, csz)
new_pad = padding_mask.new_zeros(bsz, new_tsz)
for b in range(bsz):
u, idx, c = uniques[b]
keep = u != -1
if self.cfg.remove_zeros:
keep.logical_and_(u != 0)
if self.training and not self.cfg.mean_pool_join:
u[0] = 0
u[1:] = c.cumsum(0)[:-1]
m = c > 1
r = torch.rand(m.sum())
o = (c[m] * r).long()
u[m] += o
new_logits[b, : u.numel()] = logits[b, u]
else:
new_logits[b].index_add_(
dim=0, index=idx.to(new_logits.device), source=logits[b]
)
new_logits[b, : c.numel()] /= c.unsqueeze(-1).to(new_logits.device)
new_sz = keep.sum()
if not keep.all():
kept_logits = new_logits[b, : c.numel()][keep]
new_logits[b, :new_sz] = kept_logits
if new_sz < new_tsz:
pad = new_tsz - new_sz
new_logits[b, -pad:] = 0
new_pad[b, -pad:] = True
return new_logits, new_pad
class UniformRandomJoinSegmenter(UniformRandomSegmenter, JoinSegmenter):
pass
SEGMENT_FACTORY = {
SegmentationType.NONE: Segmenter,
SegmentationType.RANDOM: RandomSegmenter,
SegmentationType.UNIFORM_RANDOM: UniformRandomSegmenter,
SegmentationType.UNIFORM_RANDOM_JOIN: UniformRandomJoinSegmenter,
SegmentationType.JOIN: JoinSegmenter,
}
class Discriminator(nn.Module):
def __init__(self, dim, cfg: Wav2vec_UConfig):
super().__init__()
inner_dim = cfg.discriminator_dim
kernel = cfg.discriminator_kernel
dilation = cfg.discriminator_dilation
self.max_pool = cfg.discriminator_max_pool
if cfg.discriminator_causal:
padding = kernel - 1
else:
padding = kernel // 2
def make_conv(in_d, out_d, k, p=0, has_dilation=True):
conv = nn.Conv1d(
in_d,
out_d,
kernel_size=k,
padding=p,
dilation=dilation if has_dilation else 1,
)
if cfg.discriminator_spectral_norm:
conv = nn.utils.spectral_norm(conv)
elif cfg.discriminator_weight_norm:
conv = nn.utils.weight_norm(conv)
return conv
inner_net = [
nn.Sequential(
make_conv(inner_dim, inner_dim, kernel, padding),
SamePad(kernel_size=kernel, causal=cfg.discriminator_causal),
nn.Dropout(cfg.discriminator_dropout),
nn.GELU(),
)
for _ in range(cfg.discriminator_depth - 1)
] + [
make_conv(inner_dim, 1, kernel, padding, has_dilation=False),
SamePad(kernel_size=kernel, causal=cfg.discriminator_causal),
]
if cfg.discriminator_linear_emb:
emb_net = [make_conv(dim, inner_dim, 1)]
else:
emb_net = [
make_conv(dim, inner_dim, kernel, padding),
SamePad(kernel_size=kernel, causal=cfg.discriminator_causal),
]
if cfg.discriminator_act_after_linear:
emb_net.append(nn.GELU())
self.net = nn.Sequential(
*emb_net,
nn.Dropout(cfg.discriminator_dropout),
*inner_net,
)
def forward(self, x, padding_mask):
x = x.transpose(1, 2) # BTC -> BCT
x = self.net(x)
x = x.transpose(1, 2)
x_sz = x.size(1)
if padding_mask is not None and padding_mask.any() and padding_mask.dim() > 1:
padding_mask = padding_mask[:, : x.size(1)]
x[padding_mask] = float("-inf") if self.max_pool else 0
x_sz = x_sz - padding_mask.sum(dim=-1)
x = x.squeeze(-1)
if self.max_pool:
x, _ = x.max(dim=-1)
else:
x = x.sum(dim=-1)
x = x / x_sz
return x
class Generator(nn.Module):
def __init__(self, input_dim, output_dim, cfg: Wav2vec_UConfig):
super().__init__()
self.cfg = cfg
self.output_dim = output_dim
self.stride = cfg.generator_stride
self.dropout = nn.Dropout(cfg.generator_dropout)
padding = cfg.generator_kernel // 2
self.proj = nn.Sequential(
TransposeLast(),
nn.Conv1d(
input_dim,
output_dim,
kernel_size=cfg.generator_kernel,
stride=cfg.generator_stride,
dilation=cfg.generator_dilation,
padding=padding,
bias=cfg.generator_bias,
),
TransposeLast(),
)
def forward(self, dense_x, tokens, dense_padding_mask):
dense_x = self.dropout(dense_x)
dense_x = self.proj(dense_x)
if self.stride > 1:
dense_padding_mask = dense_padding_mask[:, :: self.stride]
if dense_padding_mask.size(1) != dense_x.size(1):
new_padding = dense_padding_mask.new_zeros(dense_x.shape[:-1])
diff = new_padding.size(1) - dense_padding_mask.size(1)
assert (
diff > 0
), f"{new_padding.shape}, {dense_padding_mask.shape}, {dense_x.shape}, {diff}"
if diff > 0:
new_padding[:, diff:] = dense_padding_mask
else:
assert diff < 0
new_padding = dense_padding_mask[:, :diff]
dense_padding_mask = new_padding
result = {}
token_x = None
if tokens is not None:
token_x = dense_x.new_zeros(tokens.numel(), self.output_dim)
token_x.scatter_(1, tokens.view(-1, 1).long(), 1)
token_x = token_x.view(tokens.shape + (self.output_dim,))
result["dense_x"] = dense_x
result["token_x"] = token_x
result["dense_padding_mask"] = dense_padding_mask
return result
@register_model("wav2vec_u", dataclass=Wav2vec_UConfig)
class Wav2vec_U(BaseFairseqModel):
def calc_gradient_penalty(self, real_data, fake_data):
b_size = min(real_data.size(0), fake_data.size(0))
t_size = min(real_data.size(1), fake_data.size(1))
if self.cfg.probabilistic_grad_penalty_slicing:
def get_slice(data, dim, target_size):
size = data.size(dim)
diff = size - target_size
if diff <= 0:
return data
start = np.random.randint(0, diff + 1)
return data.narrow(dim=dim, start=start, length=target_size)
real_data = get_slice(real_data, 0, b_size)
real_data = get_slice(real_data, 1, t_size)
fake_data = get_slice(fake_data, 0, b_size)
fake_data = get_slice(fake_data, 1, t_size)
else:
real_data = real_data[:b_size, :t_size]
fake_data = fake_data[:b_size, :t_size]
alpha = torch.rand(real_data.size(0), 1, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.to(real_data.device)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
disc_interpolates = self.discriminator(interpolates, None)
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size(), device=real_data.device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradient_penalty = (gradients.norm(2, dim=1) - 1) ** 2
return gradient_penalty
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.update_num = num_updates
self.curr_temp = max(
self.max_temp * self.temp_decay ** num_updates, self.min_temp
)
def discrim_step(self, num_updates):
return num_updates % 2 == 1
def get_groups_for_update(self, num_updates):
return "discriminator" if self.discrim_step(num_updates) else "generator"
def __init__(self, cfg: Wav2vec_UConfig, target_dict):
super().__init__()
self.cfg = cfg
self.zero_index = target_dict.index("<SIL>") if "<SIL>" in target_dict else 0
self.smoothness_weight = cfg.smoothness_weight
output_size = len(target_dict)
self.pad = target_dict.pad()
self.eos = target_dict.eos()
self.smoothing = cfg.smoothing
self.smoothing_one_sided = cfg.smoothing_one_sided
self.no_softmax = cfg.no_softmax
self.gumbel = cfg.gumbel
self.hard_gumbel = cfg.hard_gumbel
self.last_acc = None
self.gradient_penalty = cfg.gradient_penalty
self.code_penalty = cfg.code_penalty
self.blank_weight = cfg.blank_weight
self.blank_mode = cfg.blank_mode
self.blank_index = target_dict.index("<SIL>") if cfg.blank_is_sil else 0
assert self.blank_index != target_dict.unk()
self.discriminator = Discriminator(output_size, cfg)
for p in self.discriminator.parameters():
p.param_group = "discriminator"
self.pca_A = self.pca_b = None
d = cfg.input_dim
self.segmenter = SEGMENT_FACTORY[cfg.segmentation.type](cfg.segmentation)
self.generator = Generator(d, output_size, cfg)
for p in self.generator.parameters():
p.param_group = "generator"
for p in self.segmenter.parameters():
p.param_group = "generator"
self.max_temp, self.min_temp, self.temp_decay = cfg.temp
self.curr_temp = self.max_temp
self.update_num = 0
@classmethod
def build_model(cls, cfg, task):
return cls(cfg, task.target_dictionary)
def get_logits(
self,
net_output: Optional[Dict[str, List[Optional[torch.Tensor]]]],
normalize: bool = False,
):
logits = net_output["logits"]
if self.blank_weight != 0:
if self.blank_mode == "add":
logits[..., self.blank_index] += self.blank_weight
elif self.blank_mode == "set":
logits[..., self.blank_index] = self.blank_weight
else:
raise Exception(f"invalid blank mode {self.blank_mode}")
padding = net_output["padding_mask"]
if padding.any():
logits[padding] = float("-inf")
logits[padding][..., self.blank_index] = float("inf")
if normalize:
logits = utils.log_softmax(logits.float(), dim=-1)
return logits.transpose(0, 1)
def get_normalized_probs(
self,
net_output: Tuple[
torch.Tensor, Optional[Dict[str, List[Optional[torch.Tensor]]]]
],
log_probs: bool,
sample: Optional[Dict[str, torch.Tensor]] = None,
):
logits = self.get_logits(net_output)
probs = super().get_normalized_probs(logits, log_probs, sample)
# BTC -> TBC for ctc
probs = probs.transpose(0, 1)
return probs
def normalize(self, dense_x):
bsz, tsz, csz = dense_x.shape
if dense_x.numel() == 0:
raise Exception(dense_x.shape)
_, k = dense_x.max(-1)
hard_x = (
dense_x.new_zeros(bsz * tsz, csz)
.scatter_(-1, k.view(-1, 1), 1.0)
.view(-1, csz)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
code_perplexity = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
)
avg_probs = torch.softmax(dense_x.reshape(-1, csz).float(), dim=-1).mean(dim=0)
prob_perplexity = torch.exp(
-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)
)
if not self.no_softmax:
if self.training and self.gumbel:
dense_x = F.gumbel_softmax(
dense_x.float(), tau=self.curr_temp, hard=self.hard_gumbel
).type_as(dense_x)
else:
dense_x = dense_x.softmax(-1)
return dense_x, code_perplexity, prob_perplexity
def forward(
self,
features,
padding_mask,
random_label=None,
dense_x_only=False,
segment=True,
):
if segment:
features, padding_mask = self.segmenter.pre_segment(features, padding_mask)
orig_size = features.size(0) * features.size(1) - padding_mask.sum()
gen_result = self.generator(features, random_label, padding_mask)
orig_dense_x, token_x = gen_result["dense_x"], gen_result["token_x"]
orig_dense_padding_mask = gen_result["dense_padding_mask"]
if segment:
dense_x, dense_padding_mask = self.segmenter.logit_segment(
orig_dense_x, orig_dense_padding_mask
)
else:
dense_x = orig_dense_x
dense_padding_mask = orig_dense_padding_mask
dense_logits = dense_x
prob_perplexity = None
code_perplexity = None
if not (self.no_softmax and dense_x_only):
dense_x, code_perplexity, prob_perplexity = self.normalize(dense_logits)
if dense_x_only or self.discriminator is None:
return {
"logits": dense_x,
"padding_mask": dense_padding_mask,
}
token_padding_mask = random_label == self.pad
dense_y = self.discriminator(dense_x, dense_padding_mask)
token_y = self.discriminator(token_x, token_padding_mask)
sample_size = features.size(0)
d_step = self.discrim_step(self.update_num)
fake_smooth = self.smoothing
real_smooth = self.smoothing
if self.smoothing_one_sided:
fake_smooth = 0
zero_loss = None
smoothness_loss = None
code_pen = None
if d_step:
loss_dense = F.binary_cross_entropy_with_logits(
dense_y,
dense_y.new_ones(dense_y.shape) - fake_smooth,
reduction="sum",
)
loss_token = F.binary_cross_entropy_with_logits(
token_y,
token_y.new_zeros(token_y.shape) + real_smooth,
reduction="sum",
)
if self.training and self.gradient_penalty > 0:
grad_pen = self.calc_gradient_penalty(token_x, dense_x)
grad_pen = grad_pen.sum() * self.gradient_penalty
else:
grad_pen = None
else:
grad_pen = None
loss_token = None
loss_dense = F.binary_cross_entropy_with_logits(
dense_y,
dense_y.new_zeros(dense_y.shape) + fake_smooth,
reduction="sum",
)
num_vars = dense_x.size(-1)
if prob_perplexity is not None:
code_pen = (num_vars - prob_perplexity) / num_vars
code_pen = code_pen * sample_size * self.code_penalty
if self.smoothness_weight > 0:
smoothness_loss = F.mse_loss(
dense_logits[:, :-1], dense_logits[:, 1:], reduction="none"
)
smoothness_loss[dense_padding_mask[:, 1:]] = 0
smoothness_loss = (
smoothness_loss.mean() * sample_size * self.smoothness_weight
)
result = {
"losses": {
"grad_pen": grad_pen,
"code_pen": code_pen,
"smoothness": smoothness_loss,
},
"temp": self.curr_temp,
"code_ppl": code_perplexity,
"prob_ppl": prob_perplexity,
"d_steps": int(d_step),
"sample_size": sample_size,
}
suff = "_d" if d_step else "_g"
result["losses"]["dense" + suff] = loss_dense
result["losses"]["token" + suff] = loss_token
return result
| 20,954 | 31.844828 | 90 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/models/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .wav2vec_u import Wav2vec_U
__all__ = [
"Wav2vec_U",
]
| 244 | 19.416667 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/ltr_to_wrd.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
def main():
for line in sys.stdin:
print(line.replace(" ", "").replace("|", " ").strip())
if __name__ == "__main__":
main()
| 359 | 20.176471 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/wav2vec_apply_cluster_faiss.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import numpy as np
import tqdm
import torch
import sys
import faiss
import torch.nn.functional as F
from wav2vec_cluster_faiss import parse_faiss_specs, Wav2VecFeatureReader
def get_parser():
parser = argparse.ArgumentParser(description="apply clusters")
# fmt: off
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--split', help='split to process', required=True)
parser.add_argument('--labels', help='split to process', default="phn")
parser.add_argument('--path', help='path to pca and centroids', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True)
parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14)
parser.add_argument('--max-tsz', type=int, help='batch kmeans up to this much', default=14)
# fmt: on
return parser
def get_iterator(args):
label_path = osp.join(args.data, f"{args.split}.{args.labels}")
if osp.exists(label_path):
lp = open(label_path, "r")
else:
lp = None
with open(osp.join(args.data, f"{args.split}.tsv"), "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [line.rstrip() for line in lines if len(line) > 0]
if lp is not None:
lbls = [line.rstrip() for line in lp]
else:
lbls = [None] * len(files)
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname, lbl in zip(files, lbls):
file = osp.join(root, fname.split("\t")[0])
feats = reader.get_feats(file)
yield feats.data, fname, lbl
return iterate, num, root
def main():
parser = get_parser()
args = parser.parse_args()
spec = osp.basename(args.path)
try:
faiss_spec = parse_faiss_specs(spec.rstrip("/"))[0]
except:
print(spec)
raise
print("Faiss Spec:", faiss_spec, file=sys.stderr)
if faiss_spec.pca:
A = torch.from_numpy(np.load(osp.join(args.path, "pca_A.npy"))).cuda()
b = torch.from_numpy(np.load(osp.join(args.path, "pca_b.npy"))).cuda()
print("Loaded PCA", file=sys.stderr)
centroids = np.load(osp.join(args.path, "centroids.npy"))
print("Loaded centroids", centroids.shape, file=sys.stderr)
res = faiss.StandardGpuResources()
index_flat = (
faiss.IndexFlatL2(centroids.shape[1])
if not faiss_spec.sphere
else faiss.IndexFlatIP(centroids.shape[1])
)
faiss_index = faiss.index_cpu_to_gpu(res, 0, index_flat)
faiss_index.add(centroids)
generator, num, root = get_iterator(args)
iterator = generator()
had_labels = False
label_path = osp.join(args.path, f"{args.split}.{args.labels}")
with torch.no_grad():
with open(osp.join(args.path, f"{args.split}.src"), "w") as fp, open(
osp.join(args.path, f"{args.split}.tsv"), "w"
) as pp, open(label_path, "w") as lp:
print(root, file=pp)
for f, fname, lbl in tqdm.tqdm(iterator, total=num):
if faiss_spec.pca:
f = torch.mm(f, A) + b
if faiss_spec.norm:
f = F.normalize(f, p=2, dim=-1)
f = f.cpu().numpy()
_, z = faiss_index.search(f, 1)
print(" ".join(str(x.item()) for x in z), file=fp)
print(fname, file=pp)
if lbl is not None:
print(lbl, file=lp)
had_labels = True
if not had_labels:
os.remove(label_path)
if __name__ == "__main__":
main()
| 4,015 | 30.131783 | 129 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/merge_clusters.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import numpy as np
import tqdm
import torch
import random
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="transforms features via a given pca and stored them in target dir"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--cluster-dir', help='where the clusters are')
parser.add_argument('--pooling', type=str, default='mean', choices=['mean', 'sample'], help='how to pool')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
cluster_path = osp.join(args.cluster_dir, args.split + ".src")
print(f"data path: {source_path}")
features = np.load(source_path + ".npy", mmap_mode="r")
sizes = []
offsets = []
offset = 0
with open(source_path + ".lengths", "r") as len_f:
for line in len_f:
length = int(line.rstrip())
sizes.append(length)
offsets.append(offset)
offset += length
clusters = []
with open(cluster_path, "r") as cf:
for line in cf:
line = line.rstrip()
items = line.split()
items = list(map(int, items))
clusters.append(items)
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
if os.path.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if os.path.exists(osp.join(args.source, "dict.phn.txt")):
copyfile(
osp.join(args.source, "dict.phn.txt"),
osp.join(args.save_dir, "dict.phn.txt"),
)
if os.path.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
def merge(feats, clust):
feats = torch.from_numpy(feats.copy())
clust = torch.LongTensor(clust)
_, counts = clust.unique_consecutive(return_counts=True)
curr = 0
merged = []
for c in counts:
c = c.item()
start = curr
end = curr + c
curr += c
if args.pooling == "mean":
new_x = feats[start:end].mean(dim=0)
elif args.pooling == "sample":
new_x = feats[start + int(random.random() * c)]
else:
raise NotImplementedError()
merged.append(new_x)
return torch.stack(merged, dim=0).numpy()
with open(save_path + ".lengths", "w") as l_f:
for size, offset, clust in tqdm.tqdm(
zip(sizes, offsets, clusters), total=len(sizes)
):
end = size + offset
feats = features[offset:end]
feats = merge(feats, clust)
print(len(feats), file=l_f)
npaa.append(feats)
if __name__ == "__main__":
main()
| 3,543 | 29.817391 | 110 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import fasttext as ft
import os
import regex
import sys
def get_parser():
parser = argparse.ArgumentParser(
description="reads text from stdin and outputs normalized, lid-filtered version to stdout"
)
parser.add_argument(
"--fasttext-model",
help="path to fasttext model",
default="lid.187.bin",
)
parser.add_argument("--lang", help="language id", required=True)
parser.add_argument(
"--lid-threshold",
type=float,
help="threshold for this lang id probability",
default=0.4,
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]")
lg = args.lang.lower()
lg_label = f"__label__{lg}"
thresh = args.lid_threshold
if os.path.exists(args.fasttext_model):
model = ft.load_model(args.fasttext_model)
else:
print(
f"fasttext language id model {args.fasttext_model} not found. Proceeding without language filtering. "
f"To enable language filtering, please download the latest language id model "
f"from https://fasttext.cc/docs/en/language-identification.html",
file=sys.stderr,
)
model = None
for line in sys.stdin:
line = line.strip()
line = filter_r.sub(" ", line)
line = " ".join(line.split())
if model is not None:
lid, prob = model.predict(line, k=100)
try:
target_idx = lid.index(lg_label)
except ValueError:
continue
if target_idx == 0 or prob[target_idx] >= thresh:
print(line)
else:
print(line)
if __name__ == "__main__":
main()
| 1,997 | 26.369863 | 114 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/remove_silence.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
get intervals from .vads file, specify output data, and this script removes silences and saves the audio data in out path folder
paths=shards/train.tsv
vads=shards/train.vads
python remove_silence.py --paths $paths --vads $vads
"""
import os
import argparse
import torch
import torchaudio
import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--tsv", default="", type=str)
parser.add_argument("--vads", default="", type=str)
parser.add_argument("--out", type=str)
params = parser.parse_args()
# load paths
paths = []
with open(params.tsv) as f:
root = next(f).rstrip()
for line in f:
paths.append(os.path.join(root, line.rstrip().split("\t")[0]))
# load vads
list_intervals = []
with open(params.vads) as f:
for line in f:
interval = [
[int(w.split(":")[0]), int(w.split(":")[1])] for w in line.rstrip().split()
]
list_intervals.append(interval)
# load audio and keep only intervals (i.e. remove silences)
for i in tqdm.trange(len(paths)):
data, _ = torchaudio.load(paths[i])
if len(list_intervals[i]) > 0:
data_filtered = torch.cat(
[data[0][int(it[0]) : int(it[1])] for it in list_intervals[i]]
).unsqueeze(0)
else:
data_filtered = data
# YOU MAY NEED TO MODIFY THIS TO GET THE RIGHT SUBPATH
# outpath = params.out + '/'.join(paths[i].split('/')[-1])
outpath = params.out + "/" + "/".join(paths[i].split("/")[-2:])
if not os.path.isdir("/".join(outpath.split("/")[:-1])):
os.makedirs("/".join(outpath.split("/")[:-1]))
if not os.path.exists(outpath):
torchaudio.save(outpath, data_filtered, sample_rate=16000)
else:
print(outpath, "exists!")
| 1,927 | 29.125 | 128 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/normalize_text.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import regex
import sys
def main():
filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]")
for line in sys.stdin:
line = line.strip()
line = filter_r.sub(" ", line)
line = " ".join(line.split())
print(line)
if __name__ == "__main__":
main()
| 489 | 20.304348 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/phonemize_with_sil.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import numpy as np
import sys
def get_parser():
parser = argparse.ArgumentParser(
description="converts words to phones adding optional silences around in between words"
)
parser.add_argument(
"--sil-prob",
"-s",
type=float,
default=0,
help="probability of inserting silence between each word",
)
parser.add_argument(
"--surround",
action="store_true",
help="if set, surrounds each example with silence",
)
parser.add_argument(
"--lexicon",
help="lexicon to convert to phones",
required=True,
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
sil_prob = args.sil_prob
surround = args.surround
sil = "<SIL>"
wrd_to_phn = {}
with open(args.lexicon, "r") as lf:
for line in lf:
items = line.rstrip().split()
assert len(items) > 1, line
assert items[0] not in wrd_to_phn, items
wrd_to_phn[items[0]] = items[1:]
for line in sys.stdin:
words = line.strip().split()
if not all(w in wrd_to_phn for w in words):
continue
phones = []
if surround:
phones.append(sil)
sample_sil_probs = None
if sil_prob > 0 and len(words) > 1:
sample_sil_probs = np.random.random(len(words) - 1)
for i, w in enumerate(words):
phones.extend(wrd_to_phn[w])
if (
sample_sil_probs is not None
and i < len(sample_sil_probs)
and sample_sil_probs[i] < sil_prob
):
phones.append(sil)
if surround:
phones.append(sil)
print(" ".join(phones))
if __name__ == "__main__":
main()
| 2,045 | 23.357143 | 95 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/filter_lexicon.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from fairseq.data import Dictionary
def get_parser():
parser = argparse.ArgumentParser(
description="filters a lexicon given a unit dictionary"
)
parser.add_argument("-d", "--unit-dict", help="unit dictionary", required=True)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
d = Dictionary.load(args.unit_dict)
symbols = set(d.symbols)
for line in sys.stdin:
items = line.rstrip().split()
skip = len(items) < 2
for x in items[1:]:
if x not in symbols:
skip = True
break
if not skip:
print(line, end="")
if __name__ == "__main__":
main()
| 939 | 21.926829 | 83 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/wrd_to_ltr.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
def main():
for line in sys.stdin:
print(" ".join(list(line.strip().replace(" ", "|"))) + " |")
if __name__ == "__main__":
main()
| 365 | 20.529412 | 68 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/filter_tsv.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("--tsv", required=True, type=str)
parser.add_argument("--no-skip", action="store_true")
parser.add_argument("--keep", action="store_true")
params = parser.parse_args()
def get_fname(line):
p = os.path.basename(line.split("\t")[0])
p = os.path.splitext(p)[0]
return p
# filenames to exclude
seen = set()
with open(params.tsv) as f:
if not params.no_skip:
root = next(f).rstrip()
for line in f:
seen.add(get_fname(line))
for i, line in enumerate(sys.stdin):
exists = get_fname(line) in seen
keep = (exists and params.keep) or (not exists and not params.keep)
if i == 0 or keep:
print(line, end="")
| 955 | 24.157895 | 71 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from g2p_en import G2p
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--compact",
action="store_true",
help="if set, compacts phones",
)
args = parser.parse_args()
compact = args.compact
wrd_to_phn = {}
g2p = G2p()
for line in sys.stdin:
words = line.strip().split()
phones = []
for w in words:
if w not in wrd_to_phn:
wrd_to_phn[w] = g2p(w)
if compact:
wrd_to_phn[w] = [
p[:-1] if p[-1].isnumeric() else p for p in wrd_to_phn[w]
]
phones.extend(wrd_to_phn[w])
try:
print(" ".join(phones))
except:
print(wrd_to_phn, words, phones, file=sys.stderr)
raise
if __name__ == "__main__":
main()
| 1,104 | 23.021739 | 81 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/apply_pca.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import math
import numpy as np
import tqdm
import torch
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="transforms features via a given pca and stored them in target dir"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--pca-path', type=str, help='pca location. will append _A.npy and _b.npy', required=True)
parser.add_argument('--batch-size', type=int, default=2048000, help='batch size')
parser.add_argument('--unfiltered', action='store_true', help='process the unfiltered version')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
data_poth = source_path + "_unfiltered" if args.unfiltered else source_path
print(f"data path: {data_poth}")
features = np.load(data_poth + ".npy", mmap_mode="r")
pca_A = torch.from_numpy(np.load(args.pca_path + "_A.npy")).cuda()
pca_b = torch.from_numpy(np.load(args.pca_path + "_b.npy")).cuda()
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
copyfile(data_poth + ".lengths", save_path + ".lengths")
if osp.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if osp.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
batches = math.ceil(features.shape[0] / args.batch_size)
with torch.no_grad():
for b in tqdm.trange(batches):
start = b * args.batch_size
end = start + args.batch_size
x = torch.from_numpy(features[start:end]).cuda()
x = torch.matmul(x, pca_A) + pca_b
npaa.append(x.cpu().numpy())
if __name__ == "__main__":
main()
| 2,496 | 31.428571 | 114 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/wav2vec_cluster_faiss.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import gc
import os
import os.path as osp
import random
import numpy as np
import tqdm
import torch
from collections import namedtuple
import faiss
import fairseq
import soundfile as sf
def get_parser():
parser = argparse.ArgumentParser(
description="compute kmeans codebook from kaldi-computed feats"
)
# fmt: off
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True)
parser.add_argument('--sample-pct', '-r', type=float, help='percentage of timesteps to sample', default=0)
parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14)
parser.add_argument('--faiss-specs', '-f', type=str,
help='faiss index specs; separated by space '
'format is: PCAx_NORM_CLUSx_SPHERICAL -> '
'PCAx if exists first apply PCA '
'NORM if exists, normalize the vector by L2 norm '
'CLUSx must exist, cluster to x clusters '
'SPEHRICAL if exists, apply spherical kmeans',
default='l2')
# fmt: on
return parser
faiss_spec = namedtuple("faiss_spec", ["pca", "norm", "n_clus", "sphere", "spec_str"])
def parse_faiss_specs(specs_str):
specs = []
for ss in specs_str.split():
comps = ss.split("_")
pca = 0
norm = False
n_clus = 0
sphere = False
for c in comps:
if c.startswith("PCA"):
pca = int(c[3:])
elif c == "NORM":
norm = True
elif c.startswith("CLUS"):
n_clus = int(c[4:])
elif c == "SPHERICAL":
sphere = True
assert n_clus > 0
specs.append(
faiss_spec(pca=pca, norm=norm, n_clus=n_clus, sphere=sphere, spec_str=ss)
)
return specs
class Wav2VecFeatureReader(object):
def __init__(self, cp_file, layer):
state = fairseq.checkpoint_utils.load_checkpoint_to_cpu(cp_file)
self.layer = layer
if "cfg" in state:
w2v_args = state["cfg"]
task = fairseq.tasks.setup_task(w2v_args.task)
model = task.build_model(w2v_args.model)
else:
w2v_args = state["args"]
task = fairseq.tasks.setup_task(w2v_args)
model = task.build_model(w2v_args)
model.load_state_dict(state["model"], strict=True)
model.eval()
model.cuda()
self.model = model
def read_audio(self, fname):
"""Load an audio file and return PCM along with the sample rate"""
wav, sr = sf.read(fname)
assert sr == 16e3
return wav
def get_feats(self, loc):
x = self.read_audio(loc)
with torch.no_grad():
source = torch.from_numpy(x).view(1, -1).float().cuda()
res = self.model(
source=source, mask=False, features_only=True, layer=self.layer
)
return res["layer_results"][self.layer][0].squeeze(1)
def get_iterator(args):
with open(args.data, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0]
if getattr(args, "sample_pct", 0) > 0:
files = random.sample(files, int(args.sample_pct * len(files)))
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname in files:
feats = reader.get_feats(fname)
yield feats.cpu().numpy()
return iterate, num
def main():
parser = get_parser()
args = parser.parse_args()
faiss_specs = parse_faiss_specs(args.faiss_specs)
print("Faiss Specs:", faiss_specs)
feat_path = osp.join(args.save_dir, "features")
if osp.exists(feat_path + ".npy"):
feats = np.load(feat_path + ".npy")
else:
generator, num = get_iterator(args)
iterator = generator()
feats = []
for f in tqdm.tqdm(iterator, total=num):
feats.append(f)
del iterator
del generator
feats = np.concatenate(feats)
print(feats.shape)
os.makedirs(args.save_dir, exist_ok=True)
# np.save(feat_path, feats)
gc.collect()
torch.cuda.empty_cache()
reload = False
for spec in faiss_specs:
print("Processing spec", spec)
if reload:
print("Reloading...")
del feats
gc.collect()
feats = np.load(feat_path + ".npy")
save_path = osp.join(args.save_dir, spec.spec_str)
os.makedirs(save_path, exist_ok=True)
d = feats.shape[-1]
x = feats
if spec.pca > 0:
print("Computing PCA")
pca = faiss.PCAMatrix(d, spec.pca)
pca.train(x)
d = spec.pca
b = faiss.vector_to_array(pca.b)
A = faiss.vector_to_array(pca.A).reshape(pca.d_out, pca.d_in)
np.save(osp.join(save_path, "pca_A"), A.T)
np.save(osp.join(save_path, "pca_b"), b)
print("Applying PCA")
x = pca.apply_py(x)
if spec.norm:
reload = spec.pca <= 0
print("Normalizing")
faiss.normalize_L2(x)
print("Computing kmeans")
kmeans = faiss.Kmeans(
d,
spec.n_clus,
niter=50,
verbose=True,
spherical=spec.sphere,
max_points_per_centroid=feats.shape[0],
gpu=True,
nredo=3,
)
kmeans.train(x)
np.save(osp.join(save_path, "centroids"), kmeans.centroids)
del kmeans
del x
gc.collect()
if __name__ == "__main__":
main()
| 6,315 | 28.933649 | 129 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/pca.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import numpy as np
import faiss
def get_parser():
parser = argparse.ArgumentParser(
description="compute a pca matrix given an array of numpy features"
)
# fmt: off
parser.add_argument('data', help='numpy file containing features')
parser.add_argument('--output', help='where to save the pca matrix', required=True)
parser.add_argument('--dim', type=int, help='dim for pca reduction', required=True)
parser.add_argument('--eigen-power', type=float, default=0, help='eigen power, -0.5 for whitening')
return parser
def main():
parser = get_parser()
args = parser.parse_args()
print("Reading features")
x = np.load(args.data, mmap_mode="r")
print("Computing PCA")
pca = faiss.PCAMatrix(x.shape[-1], args.dim, args.eigen_power)
pca.train(x)
b = faiss.vector_to_array(pca.b)
A = faiss.vector_to_array(pca.A).reshape(pca.d_out, pca.d_in)
os.makedirs(args.output, exist_ok=True)
prefix = str(args.dim)
if args.eigen_power != 0:
prefix += f"_{args.eigen_power}"
np.save(osp.join(args.output, f"{prefix}_pca_A"), A.T)
np.save(osp.join(args.output, f"{prefix}_pca_b"), b)
if __name__ == "__main__":
main()
| 1,471 | 26.259259 | 103 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/mean_pool.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import math
import numpy as np
import tqdm
import torch
import torch.nn.functional as F
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="mean pools representations by compressing uniform splits of the data"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--subsample-rate', type=float, default=0.5, help='size to subsample data to')
parser.add_argument('--remove-extra', action='store_true', help='if true, removes extra states that cant be pooled, otherwise pads with 0s')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
print(f"data path: {source_path}")
features = np.load(source_path + ".npy", mmap_mode="r")
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
if os.path.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if os.path.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if os.path.exists(osp.join(args.source, "dict.phn.txt")):
copyfile(
osp.join(args.source, "dict.phn.txt"),
osp.join(args.save_dir, "dict.phn.txt"),
)
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
with open(source_path + ".lengths", "r") as lf:
lengths = lf.readlines()
fsz = features.shape[-1]
start = 0
with torch.no_grad():
with open(save_path + ".lengths", "w") as lengths_out:
for length in tqdm.tqdm(lengths):
length = int(length)
end = start + length
feats = features[start:end]
start += length
x = torch.from_numpy(feats).cuda()
target_num = math.ceil(length * args.subsample_rate)
rem = length % target_num
if rem > 0:
if args.remove_extra:
to_rem = target_num - rem
target_num -= 1
x = x[:-to_rem]
else:
to_add = target_num - rem
x = F.pad(x, [0, 0, 0, to_add])
x[-to_add:] = x[-to_add - 1]
x = x.view(target_num, -1, fsz)
x = x.mean(dim=-2)
print(target_num, file=lengths_out)
npaa.append(x.cpu().numpy())
if __name__ == "__main__":
main()
| 3,187 | 30.88 | 144 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/copy_labels.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
for idx, line in enumerate(sys.stdin):
print(f"utt{idx:010d} {line}", end="")
| 298 | 26.181818 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/vads.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from copy import deepcopy
from scipy.signal import lfilter
import numpy as np
from tqdm import tqdm
import soundfile as sf
import os.path as osp
def get_parser():
parser = argparse.ArgumentParser(description="compute vad segments")
parser.add_argument(
"--rvad-home",
"-r",
help="path to rvad home (see https://github.com/zhenghuatan/rVADfast)",
required=True,
)
return parser
def rvad(speechproc, path):
winlen, ovrlen, pre_coef, nfilter, nftt = 0.025, 0.01, 0.97, 20, 512
ftThres = 0.5
vadThres = 0.4
opts = 1
data, fs = sf.read(path)
assert fs == 16_000, "sample rate must be 16khz"
ft, flen, fsh10, nfr10 = speechproc.sflux(data, fs, winlen, ovrlen, nftt)
# --spectral flatness --
pv01 = np.zeros(ft.shape[0])
pv01[np.less_equal(ft, ftThres)] = 1
pitch = deepcopy(ft)
pvblk = speechproc.pitchblockdetect(pv01, pitch, nfr10, opts)
# --filtering--
ENERGYFLOOR = np.exp(-50)
b = np.array([0.9770, -0.9770])
a = np.array([1.0000, -0.9540])
fdata = lfilter(b, a, data, axis=0)
# --pass 1--
noise_samp, noise_seg, n_noise_samp = speechproc.snre_highenergy(
fdata, nfr10, flen, fsh10, ENERGYFLOOR, pv01, pvblk
)
# sets noisy segments to zero
for j in range(n_noise_samp):
fdata[range(int(noise_samp[j, 0]), int(noise_samp[j, 1]) + 1)] = 0
vad_seg = speechproc.snre_vad(
fdata, nfr10, flen, fsh10, ENERGYFLOOR, pv01, pvblk, vadThres
)
return vad_seg, data
def main():
parser = get_parser()
args = parser.parse_args()
sys.path.append(args.rvad_home)
import speechproc
stride = 160
lines = sys.stdin.readlines()
root = lines[0].rstrip()
for fpath in tqdm(lines[1:]):
path = osp.join(root, fpath.split()[0])
vads, wav = rvad(speechproc, path)
start = None
vad_segs = []
for i, v in enumerate(vads):
if start is None and v == 1:
start = i * stride
elif start is not None and v == 0:
vad_segs.append((start, i * stride))
start = None
if start is not None:
vad_segs.append((start, len(wav)))
print(" ".join(f"{v[0]}:{v[1]}" for v in vad_segs))
if __name__ == "__main__":
main()
| 2,569 | 24.959596 | 79 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import tqdm
import torch
import torch.nn.functional as F
from shutil import copyfile
from npy_append_array import NpyAppendArray
import fairseq
import soundfile as sf
def get_parser():
parser = argparse.ArgumentParser(
description="compute kmeans codebook from kaldi-computed feats"
)
# fmt: off
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec ctc model', required=True)
parser.add_argument('--layer', type=int, default=14, help='which layer to use')
# fmt: on
return parser
class Wav2VecFeatureReader(object):
def __init__(self, cp_file, layer):
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[cp_file]
)
model = model[0]
model.eval()
model.cuda()
self.model = model
self.task = task
self.layer = layer
def read_audio(self, fname):
"""Load an audio file and return PCM along with the sample rate"""
wav, sr = sf.read(fname)
assert sr == 16e3
return wav
def get_feats(self, loc):
x = self.read_audio(loc)
with torch.no_grad():
source = torch.from_numpy(x).float().cuda()
if self.task.cfg.normalize:
assert source.dim() == 1, source.dim()
with torch.no_grad():
source = F.layer_norm(source, source.shape)
source = source.view(1, -1)
m_res = self.model(source=source, mask=False, features_only=True, layer=self.layer)
return m_res["x"].squeeze(0).cpu()
def get_iterator(args):
with open(osp.join(args.data, args.split) + ".tsv", "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0]
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname in files:
w2v_feats = reader.get_feats(fname)
yield w2v_feats
return iterate, num
def main():
parser = get_parser()
args = parser.parse_args()
os.makedirs(args.save_dir, exist_ok=True)
def create_files(dest):
copyfile(osp.join(args.data, args.split) + ".tsv", dest + ".tsv")
if osp.exists(osp.join(args.data, args.split) + ".wrd"):
copyfile(osp.join(args.data, args.split) + ".wrd", dest + ".wrd")
if osp.exists(osp.join(args.data, args.split) + ".phn"):
copyfile(osp.join(args.data, args.split) + ".phn", dest + ".phn")
if osp.exists(dest + ".npy"):
os.remove(dest + ".npy")
npaa = NpyAppendArray(dest + ".npy")
return npaa
save_path = osp.join(args.save_dir, args.split)
npaa = create_files(save_path)
generator, num = get_iterator(args)
iterator = generator()
with open(save_path + ".lengths", "w") as l_f:
for w2v_feats in tqdm.tqdm(iterator, total=num):
print(len(w2v_feats), file=l_f)
if len(w2v_feats) > 0:
npaa.append(w2v_feats.numpy())
if __name__ == "__main__":
main()
| 3,673 | 29.616667 | 105 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/scripts/wer.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Implement unsupervised metric for decoding hyperparameter selection:
$$ alpha * LM_PPL + ViterbitUER(%) * 100 $$
"""
import argparse
import logging
import sys
import editdistance
logging.root.setLevel(logging.INFO)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--hypo", help="hypo transcription", required=True)
parser.add_argument(
"-r", "--reference", help="reference transcription", required=True
)
return parser
def compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p):
d_cnt = 0
w_cnt = 0
w_cnt_h = 0
for uid in hyp_uid_to_tra:
ref = ref_uid_to_tra[uid].split()
if g2p is not None:
hyp = g2p(hyp_uid_to_tra[uid])
hyp = [p for p in hyp if p != "'" and p != " "]
hyp = [p[:-1] if p[-1].isnumeric() else p for p in hyp]
else:
hyp = hyp_uid_to_tra[uid].split()
d_cnt += editdistance.eval(ref, hyp)
w_cnt += len(ref)
w_cnt_h += len(hyp)
wer = float(d_cnt) / w_cnt
logger.debug(
(
f"wer = {wer * 100:.2f}%; num. of ref words = {w_cnt}; "
f"num. of hyp words = {w_cnt_h}; num. of sentences = {len(ref_uid_to_tra)}"
)
)
return wer
def main():
args = get_parser().parse_args()
errs = 0
count = 0
with open(args.hypo, "r") as hf, open(args.reference, "r") as rf:
for h, r in zip(hf, rf):
h = h.rstrip().split()
r = r.rstrip().split()
errs += editdistance.eval(r, h)
count += len(r)
logger.info(f"UER: {errs / count * 100:.2f}%")
if __name__ == "__main__":
main()
def load_tra(tra_path):
with open(tra_path, "r") as f:
uid_to_tra = {}
for line in f:
uid, tra = line.split(None, 1)
uid_to_tra[uid] = tra
logger.debug(f"loaded {len(uid_to_tra)} utterances from {tra_path}")
return uid_to_tra
| 2,264 | 26.289157 | 87 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py
|
import kaldi_io
import numpy as np
import os
def get_parser():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("w2v_dir", help="wav2vec feature and text directory")
parser.add_argument("tar_root", help="output data directory in kaldi's format")
parser.add_argument("split", help="name of the subset")
parser.add_argument("--label", default="", help="if specified, copy labels too")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
tar_dir = os.path.join(args.tar_root, args.split)
os.makedirs(tar_dir, exist_ok=True)
lengths_path = os.path.join(args.w2v_dir, f"{args.split}.lengths")
with open(lengths_path) as f:
lengths = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengths[:-1]).tolist()
feats = np.load(
os.path.join(args.w2v_dir, f"{args.split}.npy"),
mmap_mode="r"
)
assert feats.shape[0] == sum(lengths), \
f"lengths mismatch {feats.shape[0]} != {sum(lengths)}"
ark_path = os.path.join(tar_dir, "feats.ark")
scp_path = os.path.join(tar_dir, "feats.scp")
wspec = f"ark:| copy-feats --compress=true ark:- ark,scp:{ark_path},{scp_path}"
with kaldi_io.open_or_fd(wspec, "wb") as f:
for idx, (offset, length) in enumerate(zip(offsets, lengths)):
feat = feats[offset:offset+length]
kaldi_io.write_mat(f, feat, key=f"utt{idx:010d}")
u2s_path = os.path.join(tar_dir, "utt2spk")
s2u_path = os.path.join(tar_dir, "spk2utt")
with open(u2s_path, "w") as f_u2s, open(s2u_path, "w") as f_s2u:
for idx in range(len(lengths)):
f_u2s.write(f"utt{idx:010d} utt{idx:010d}\n")
f_s2u.write(f"utt{idx:010d} utt{idx:010d}\n")
if bool(args.label):
lab_path = os.path.join(args.w2v_dir, f"{args.split}.{args.label}")
txt_path = os.path.join(tar_dir, "text")
with open(lab_path) as f_lab, open(txt_path, "w") as f_txt:
for idx, line in enumerate(f_lab):
f_txt.write(f"utt{idx:010d} {line}")
if __name__ == "__main__":
main()
| 2,137 | 36.508772 | 84 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/kaldi_self_train/st/local/copy_aligned_text.py
|
import sys
for idx, line in enumerate(sys.stdin):
print(f"utt{idx:010d} {line}", end='')
| 93 | 22.5 | 42 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/kaldi_self_train/st/local/unsup_select.py
|
"""
Implement unsupervised metric for decoding hyperparameter selection:
$$ alpha * LM_PPL + ViterbitUER(%) * 100 $$
"""
import argparse
import logging
import math
import sys
import kenlm
import editdistance
from g2p_en import G2p
logging.root.setLevel(logging.INFO)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("ref_tra", help="reference pseudo labels")
parser.add_argument("hyp_tra", help="decoded pseudo labels to be assess")
parser.add_argument("--kenlm_path", default="/checkpoint/abaevski/data/speech/libri/librispeech_lm_novox.phnc_o5.bin", help="")
parser.add_argument("--uppercase", action="store_true", help="")
parser.add_argument("--skipwords", default="", help="")
parser.add_argument("--gt_tra", default="", help="ground truth pseudo labels for computing oracle WER")
parser.add_argument("--min_vt_uer", default=0.0, type=float)
parser.add_argument("--phonemize", action="store_true", help="phonemize word hypotheses, used when reference is phone transcript")
parser.add_argument("--phonemize_lexicon", default="", type=str, help="use a lexicon for phonemizing")
return parser
def load_tra(tra_path):
with open(tra_path, "r") as f:
uid_to_tra = {}
for line in f:
toks = line.rstrip().split()
uid, tra = toks[0], " ".join(toks[1:])
uid_to_tra[uid] = tra
logger.debug(f"loaded {len(uid_to_tra)} utterances from {tra_path}")
return uid_to_tra
def load_lex(lex_path):
with open(lex_path, "r") as f:
w2p = {}
for line in f:
w, p = line.rstrip().split(None, 1)
w2p[w] = p.split()
return w2p
def compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p, g2p_dict):
d_cnt = 0
w_cnt = 0
w_cnt_h = 0
for uid in hyp_uid_to_tra:
ref = ref_uid_to_tra[uid].split()
if g2p_dict is not None:
hyp = []
for word in hyp_uid_to_tra[uid].split():
if word in g2p_dict:
hyp = hyp + g2p_dict[word]
else:
logger.warning(f"{word} not in g2p_dict")
elif g2p is not None:
hyp = g2p(hyp_uid_to_tra[uid])
hyp = [p for p in hyp if p != "'" and p != " "]
hyp = [p[:-1] if p[-1].isnumeric() else p for p in hyp]
else:
hyp = hyp_uid_to_tra[uid].split()
logger.debug((
f"======================\n"
f"HYP: {' '.join(hyp)}\n"
f"REF: {' '.join(ref)}"
))
d_cnt += editdistance.eval(ref, hyp)
w_cnt += len(ref)
w_cnt_h += len(hyp)
wer = float(d_cnt) / w_cnt
logger.debug((
f"wer = {wer*100:.2f}%; num. of ref words = {w_cnt}; "
f"num. of hyp words = {w_cnt_h}; num. of sentences = {len(ref_uid_to_tra)}"
))
return wer
def compute_lm_ppl(hyp_uid_to_tra, score_fn):
lm_score = 0.
w_cnt = 0
for hyp in hyp_uid_to_tra.values():
cur_score = score_fn(hyp)
cur_cnt = len(hyp.split()) + 1 # plus one for </s>
lm_score += cur_score
w_cnt += cur_cnt
logger.debug((
f"======================\n"
f"score sum/avg = {cur_score:.2f}/{cur_score/cur_cnt:.2f}\n"
f"hyp = {hyp}"
))
lm_ppl = math.pow(10, -lm_score / w_cnt)
logger.debug(f"lm ppl = {lm_ppl:.2f}; num. of words = {w_cnt}")
return lm_ppl
def main():
args = get_parser().parse_args()
logger.debug(f"Args: {args}")
ref_uid_to_tra = load_tra(args.ref_tra)
hyp_uid_to_tra = load_tra(args.hyp_tra)
assert not bool(set(hyp_uid_to_tra.keys()) - set(ref_uid_to_tra.keys()))
lm = kenlm.Model(args.kenlm_path)
skipwords = set(args.skipwords.split(","))
def compute_lm_score(s):
s = " ".join(w for w in s.split() if w not in skipwords)
s = s.upper() if args.uppercase else s
return lm.score(s)
g2p, g2p_dict = None, None
if args.phonemize:
if args.phonemize_lexicon:
g2p_dict = load_lex(args.phonemize_lexicon)
else:
g2p = G2p()
wer = compute_wer(ref_uid_to_tra, hyp_uid_to_tra, g2p, g2p_dict)
lm_ppl = compute_lm_ppl(hyp_uid_to_tra, compute_lm_score)
gt_wer = -math.inf
if args.gt_tra:
gt_uid_to_tra = load_tra(args.gt_tra)
gt_wer = compute_wer(gt_uid_to_tra, hyp_uid_to_tra, None, None)
score = math.log(lm_ppl) * max(wer, args.min_vt_uer)
logging.info(f"{args.hyp_tra}: score={score:.4f}; wer={wer*100:.2f}%; lm_ppl={lm_ppl:.4f}; gt_wer={gt_wer*100:.2f}%")
if __name__ == "__main__":
main()
| 4,767 | 34.058824 | 134 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/data/random_input_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import List
from fairseq.data import BaseWrapperDataset, data_utils
class RandomInputDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
random_input_dataset,
input_key_path: List[str],
add_to_input,
pad_idx,
):
super().__init__(dataset)
self.random_input_dataset = random_input_dataset
if isinstance(input_key_path, str):
input_key_path = [input_key_path]
assert len(input_key_path) > 0
self.input_key_path = input_key_path
self.add_to_input = add_to_input
self.pad_idx = pad_idx
def get_target(self, item):
target_loc = item
for p in self.input_key_path[:-1]:
target_loc = target_loc[p]
return self.input_key_path[-1], target_loc
def get_target_value(self, item):
k, target_loc = self.get_target(item)
return target_loc[k]
def __getitem__(self, index):
item = self.dataset[index]
k, target_loc = self.get_target(item)
target_loc[k] = random.choice(self.random_input_dataset)
return item
def collater(self, samples):
collated = self.dataset.collater(samples)
if len(collated) == 0:
return collated
indices = set(collated["id"].tolist())
random_inputs = data_utils.collate_tokens(
[self.get_target_value(s) for s in samples if s["id"] in indices],
pad_idx=self.pad_idx,
left_pad=False,
)
k, target_loc = self.get_target(
collated if not self.add_to_input else collated["net_input"]
)
target_loc[k] = random_inputs
return collated
| 1,905 | 29.253968 | 78 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/data/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .extracted_features_dataset import ExtractedFeaturesDataset
from .random_input_dataset import RandomInputDataset
__all__ = [
"ExtractedFeaturesDataset",
"RandomInputDataset",
]
| 370 | 25.5 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/data/extracted_features_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import contextlib
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
logger = logging.getLogger(__name__)
class ExtractedFeaturesDataset(FairseqDataset):
def __init__(
self,
path,
split,
min_length=3,
max_length=None,
labels=None,
label_dict=None,
shuffle=True,
sort_by_length=True,
):
super().__init__()
self.min_length = min_length
self.max_length = max_length
self.shuffle = shuffle
self.sort_by_length = sort_by_length
self.label_dict = label_dict
if labels is not None:
assert label_dict is not None
self.sizes = []
self.offsets = []
self.labels = []
path = os.path.join(path, split)
data_path = path
self.data = np.load(data_path + ".npy", mmap_mode="r")
offset = 0
skipped = 0
if not os.path.exists(path + f".{labels}"):
labels = None
with open(data_path + ".lengths", "r") as len_f, open(
path + f".{labels}", "r"
) if labels is not None else contextlib.ExitStack() as lbl_f:
for line in len_f:
length = int(line.rstrip())
lbl = None if labels is None else next(lbl_f).rstrip().split()
if length >= min_length and (
max_length is None or length <= max_length
):
self.sizes.append(length)
self.offsets.append(offset)
if lbl is not None:
self.labels.append(lbl)
offset += length
self.sizes = np.asarray(self.sizes)
self.offsets = np.asarray(self.offsets)
logger.info(f"loaded {len(self.offsets)}, skipped {skipped} samples")
def __getitem__(self, index):
offset = self.offsets[index]
end = self.sizes[index] + offset
feats = torch.from_numpy(self.data[offset:end].copy()).float()
res = {"id": index, "features": feats}
if len(self.labels) > 0:
res["target"] = self.label_dict.encode_line(
self.labels[index],
line_tokenizer=lambda x: x,
append_eos=False,
)
return res
def __len__(self):
return len(self.sizes)
def collater(self, samples):
if len(samples) == 0:
return {}
features = [s["features"] for s in samples]
sizes = [len(s) for s in features]
target_size = max(sizes)
collated_features = features[0].new_zeros(
len(features), target_size, features[0].size(-1)
)
padding_mask = torch.BoolTensor(collated_features.shape[:-1]).fill_(False)
for i, (f, size) in enumerate(zip(features, sizes)):
collated_features[i, :size] = f
padding_mask[i, size:] = True
res = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": {"features": collated_features, "padding_mask": padding_mask},
}
if len(self.labels) > 0:
target = data_utils.collate_tokens(
[s["target"] for s in samples],
pad_idx=self.label_dict.pad(),
left_pad=False,
)
res["target"] = target
return res
def num_tokens(self, index):
return self.size(index)
def size(self, index):
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
if self.sort_by_length:
order.append(self.sizes)
return np.lexsort(order)[::-1]
else:
return order[0]
| 4,170 | 27.765517 | 87 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/tasks/unpaired_audio_text.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from dataclasses import dataclass, field
import logging
import math
import os
from typing import Optional
import torch
from fairseq.logging import metrics
from fairseq.tasks import FairseqTask, register_task
from ..data import ExtractedFeaturesDataset, RandomInputDataset
from fairseq.data import (
Dictionary,
data_utils,
StripTokenDataset,
)
from fairseq.dataclass import FairseqDataclass
from fairseq.distributed.utils import get_data_parallel_world_size
from omegaconf import MISSING
from examples.speech_recognition.kaldi.kaldi_decoder import (
KaldiDecoder,
KaldiDecoderConfig,
)
logger = logging.getLogger(__name__)
@dataclass
class DecodingConfig(FairseqDataclass):
kenlm_path: Optional[str] = None
lm_weight: float = 0
blank_weight: float = 0
@dataclass
class UnpairedAudioTextConfig(FairseqDataclass):
data: str = field(
default=MISSING, metadata={"help": "path to data directory containing audio"}
)
text_data: str = field(
default=MISSING, metadata={"help": "path to data directory containing text"}
)
max_length: Optional[int] = None
labels: Optional[str] = field(
default=None,
metadata={"help": "extension of the label file to load, used for fine-tuning"},
)
unfiltered: bool = field(
default=False, metadata={"help": "load data with _unfiltered suffix"}
)
ctc_eval: bool = field(
default=False, metadata={"help": "eval UER as if computed by CTC"}
)
sort_by_length: bool = field(
default=True, metadata={"help": "sort examples by length of audio timesteps"}
)
shuffle: bool = field(default=True, metadata={"help": "shuffle examples"})
append_eos: bool = field(default=False, metadata={"help": "append eos"})
uppercase: Optional[bool] = field(
default=False, metadata={"help": "uppercase for LM score computation"}
)
skipwords: Optional[str] = field(
default="",
metadata={
"help": "comma-separated words to be removed for LM score computation"
},
)
kenlm_path: Optional[str] = None
vocab_usage_power: float = 2
word_decoder_config: Optional[KaldiDecoderConfig] = None
word_kenlm_path: Optional[str] = None
decoding_config: DecodingConfig = DecodingConfig()
@register_task("unpaired_audio_text", dataclass=UnpairedAudioTextConfig)
class UnpairedAudioText(FairseqTask):
""" """
cfg: UnpairedAudioTextConfig
def __init__(
self,
cfg: UnpairedAudioTextConfig,
source_dictionary=None,
target_dictionary=None,
):
super().__init__(cfg)
self._target_dictionary = target_dictionary
self._source_dictionary = source_dictionary
self.num_symbols = (
len([s for s in target_dictionary.symbols if not s.startswith("madeup")])
- target_dictionary.nspecial
)
self.sil_id = (
target_dictionary.index("<SIL>") if "<SIL>" in target_dictionary else -1
)
self.kenlm = None
if cfg.kenlm_path is not None:
import kenlm
self.kenlm = kenlm.Model(cfg.kenlm_path)
self.word_kenlm = None
if cfg.word_kenlm_path is not None:
import kenlm
self.word_kenlm = kenlm.Model(cfg.word_kenlm_path)
self.uppercase = cfg.uppercase
self.skipwords = set(cfg.skipwords.split(","))
def str_postprocess(s):
s = " ".join(w for w in s.split() if w not in self.skipwords)
s = s.upper() if self.uppercase else s
return s
self.str_postprocess = str_postprocess
self.compute_lm_score = lambda s: self.kenlm.score(self.str_postprocess(s))
self.compute_word_score = None
if cfg.word_decoder_config is not None:
self.kaldi_decoder = KaldiDecoder(cfg.word_decoder_config, beam=10)
def compute_word_score(logits, padding):
res = self.kaldi_decoder.decode(logits, padding)
for r in res:
r = r.result()
assert len(r) == 1
r = r[0]
yield r["score"], r["words"]
self.compute_word_score = compute_word_score
@classmethod
def setup_task(cls, cfg: UnpairedAudioTextConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (AudioPretrainingConfig): configuration of this task
"""
dict_path = os.path.join(cfg.text_data, "dict.txt")
if os.path.exists(dict_path):
target_dictionary = Dictionary.load(dict_path)
else:
dict_path = os.path.join(cfg.data, f"dict.{cfg.labels}.txt")
target_dictionary = Dictionary.load(dict_path)
return cls(cfg, target_dictionary=target_dictionary)
def optimizer_step(self, optimizer, model, update_num):
if hasattr(model, "get_groups_for_update"):
groups = model.get_groups_for_update(update_num)
optimizer.step(groups={groups})
else:
optimizer.step()
def valid_step(self, sample, model, criterion):
res = model(
**sample["net_input"],
dense_x_only=True,
)
dense_x = res["logits"]
padding_mask = res["padding_mask"]
word_scores = None
if self.compute_word_score is not None:
word_scores = self.compute_word_score(dense_x.cpu(), padding_mask.cpu())
z = dense_x.argmax(-1)
z[padding_mask] = self.target_dictionary.pad()
vocab_seen = torch.zeros(self.num_symbols, dtype=torch.bool)
import editdistance
c_err = 0
c_len = 0
pred_c_len = 0
lm_score_sum = 0
for i, (x, t, id) in enumerate(
zip(
z,
sample["target"] if "target" in sample else [None] * len(z),
sample["id"],
)
):
if t is not None:
t = t[(t >= self.target_dictionary.nspecial)]
x = x[
(x >= self.target_dictionary.nspecial)
& (x < (self.num_symbols + self.target_dictionary.nspecial))
]
if self.sil_id >= 0:
x = x[x != self.sil_id]
vocab_seen[x - self.target_dictionary.nspecial] = True
pred_units_arr = x
if self.cfg.ctc_eval:
pred_units_arr = pred_units_arr.unique_consecutive()
pred_units_arr = pred_units_arr[pred_units_arr != 0]
if id == 0:
if t is not None:
logger.info(f"REF: {self.target_dictionary.string(t)}")
logger.info(f"HYP: {self.target_dictionary.string(pred_units_arr)}")
if self.kenlm is not None:
if t is not None:
ref_lm_s = self.compute_lm_score(
self.target_dictionary.string(t)
)
logger.info(
f"LM [REF]: {ref_lm_s}, {math.pow(10, -ref_lm_s / (len(t) + 1))}"
)
hyp_lm_s = self.compute_lm_score(
self.target_dictionary.string(pred_units_arr)
)
logger.info(
f"LM [HYP]: {hyp_lm_s}, {math.pow(10, -hyp_lm_s / (len(pred_units_arr) + 1))}"
)
pred_units_arr = pred_units_arr.tolist()
pred_c_len += len(pred_units_arr)
if t is not None:
t = t.tolist()
c_err += editdistance.eval(pred_units_arr, t)
c_len += len(t)
else:
c_len = pred_c_len
if self.kenlm is not None:
pred_str = self.target_dictionary.string(pred_units_arr)
lm_score = self.compute_lm_score(pred_str)
lm_score_sum += lm_score
kaldi_score_sum = 0
word_lm_sum = 0
num_words = 0
if word_scores is not None:
for score, words in word_scores:
kaldi_score_sum += score
num_words += len(words)
if self.word_kenlm is not None:
word_lm_sum += self.kenlm.score(" ".join(words))
try:
world_size = get_data_parallel_world_size()
except:
world_size = 1
logging_output = {
"loss": c_err,
"_num_char_errors": c_err,
"_num_chars": c_len,
"_num_pred_chars": pred_c_len,
"ntokens": c_len,
"nsentences": z.size(0),
"sample_size": c_len,
"_world_size": world_size,
"_lm_score_sum": lm_score_sum,
"_kaldi_score_sum": kaldi_score_sum,
"_word_lm_sum": word_lm_sum,
"_num_words": num_words,
"_vocab_seen": vocab_seen,
}
return c_err, c_len, logging_output
def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs):
data_path = self.cfg.data
task_cfg = task_cfg or self.cfg
has_unpaired_text = os.path.exists(
os.path.join(self.cfg.text_data, f"{split}.idx")
)
self.datasets[split] = ExtractedFeaturesDataset(
path=data_path,
split=split,
min_length=3,
max_length=task_cfg.max_length,
labels=None if has_unpaired_text else task_cfg.labels,
label_dict=self.target_dictionary,
shuffle=getattr(task_cfg, "shuffle", True),
sort_by_length=task_cfg.sort_by_length,
)
logger.info(f"split {split} has unpaired text? {has_unpaired_text}")
if has_unpaired_text:
text_dataset = data_utils.load_indexed_dataset(
os.path.join(self.cfg.text_data, split), self.target_dictionary
)
text_dataset = StripTokenDataset(text_dataset, self.target_dictionary.eos())
self.datasets[split] = RandomInputDataset(
self.datasets[split],
text_dataset,
["random_label"],
add_to_input=True,
pad_idx=self.target_dictionary.pad(),
)
@property
def source_dictionary(self):
return self._source_dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self._target_dictionary
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
zero = torch.scalar_tensor(0.0)
num_char_errors = sum(
log.get("_num_char_errors", zero) for log in logging_outputs
)
num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs)
num_word_errors = sum(
log.get("_num_word_errors", zero) for log in logging_outputs
)
num_words = sum(log.get("_num_words", zero) for log in logging_outputs)
num_pred_chars = sum(
log.get("_num_pred_chars", zero) for log in logging_outputs
)
lm_score_sum = sum(log.get("_lm_score_sum", zero) for log in logging_outputs)
vocab_seen = (
sum(log.get("_vocab_seen", zero) for log in logging_outputs)
.bool()
.sum()
.item()
)
kaldi_score_sum = sum(
log.get("_kaldi_score_sum", zero) for log in logging_outputs
)
word_lm_sum = sum(log.get("_word_lm_sum", zero) for log in logging_outputs)
metrics.log_scalar_sum("_num_char_errors", num_char_errors)
metrics.log_scalar_sum("_num_chars", num_chars)
metrics.log_scalar_sum("_num_word_errors", num_word_errors)
metrics.log_scalar_sum("_num_words", num_words)
metrics.log_scalar_sum("lm_score_sum", lm_score_sum)
metrics.log_scalar_sum("num_pred_chars", num_pred_chars)
if self.cfg.word_kenlm_path is not None:
metrics.log_scalar_sum("kaldi_score_sum", kaldi_score_sum)
metrics.log_scalar_sum("word_lm_sum", word_lm_sum)
if num_chars > 0:
metrics.log_derived(
"uer",
lambda meters: meters["_num_char_errors"].sum
* 100.0
/ meters["_num_chars"].sum
if meters["_num_chars"].sum > 0
else float("nan"),
)
if lm_score_sum < 0 and vocab_seen > 0:
metrics.log_scalar("vocab_seen_pct", vocab_seen / self.num_symbols)
metrics.log_derived(
"weighted_lm_ppl",
lambda meters: math.pow(
10,
-meters["lm_score_sum"].sum
/ (
meters["num_pred_chars"].sum + meters["nsentences"].sum
), # account for </s>
)
/ meters["vocab_seen_pct"].avg ** self.cfg.vocab_usage_power,
)
metrics.log_derived(
"lm_ppl",
lambda meters: math.pow(
10,
-meters["lm_score_sum"].sum
/ (
meters["num_pred_chars"].sum + meters["nsentences"].sum
), # account for </s>
),
)
else:
metrics.log_derived("weighted_lm_ppl", lambda meters: float("inf"))
if num_words > 0:
if word_lm_sum != 0:
metrics.log_derived(
"word_lm_ppl",
lambda meters: math.pow(
10,
-meters["word_lm_sum"].sum
/ (
meters["_num_words"].sum + meters["nsentences"].sum
), # account for </s>
),
)
metrics.log_derived(
"weighted_word_lm_ppl",
lambda meters: math.pow(
10,
-meters["word_lm_sum"].sum
/ (
meters["_num_words"].sum + meters["nsentences"].sum
), # account for </s>
)
/ meters["vocab_seen_pct"].avg ** self.cfg.vocab_usage_power,
)
if self.cfg.word_kenlm_path is not None:
metrics.log_derived(
"kaldi_score",
lambda meters: meters["kaldi_score_sum"].sum
/ meters["nsentences"].sum,
)
def build_model(self, cfg: FairseqDataclass):
model = super().build_model(cfg)
return model
| 15,435 | 33.455357 | 102 |
py
|
sign-topic
|
sign-topic-main/examples/wav2vec/unsupervised/tasks/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .unpaired_audio_text import UnpairedAudioText
__all__ = [
"UnpairedAudioText",
]
| 270 | 21.583333 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/criss/save_encoder.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.sequence_generator import EnsembleModel
from fairseq.utils import safe_hasattr
def get_avg_pool(
models, sample, prefix_tokens, src_dict, remove_bpe, has_langtok=False
):
model = EnsembleModel(models)
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
# compute the encoder output for each beam
encoder_outs = model.forward_encoder(encoder_input)
np_encoder_outs = encoder_outs[0].encoder_out.cpu().numpy().astype(np.float32)
encoder_mask = 1 - encoder_outs[0].encoder_padding_mask.cpu().numpy().astype(
np.float32
)
encoder_mask = np.expand_dims(encoder_mask.T, axis=2)
if has_langtok:
encoder_mask = encoder_mask[1:, :, :]
np_encoder_outs = np_encoder_outs[1, :, :]
masked_encoder_outs = encoder_mask * np_encoder_outs
avg_pool = (masked_encoder_outs / encoder_mask.sum(axis=0)).sum(axis=0)
return avg_pool
def main(args):
assert args.path is not None, "--path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
args.beam = 1
utils.import_user_module(args)
if args.max_tokens is None:
args.max_tokens = 12000
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Set dictionaries
try:
src_dict = getattr(task, "source_dictionary", None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# Load ensemble
print("| loading model(s) from {}".format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(":"),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_positions=utils.resolve_max_positions(
task.max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
num_sentences = 0
source_sentences = []
shard_id = 0
all_avg_pool = None
encoder_has_langtok = (
safe_hasattr(task.args, "encoder_langtok")
and task.args.encoder_langtok is not None
and safe_hasattr(task.args, "lang_tok_replacing_bos_eos")
and not task.args.lang_tok_replacing_bos_eos
)
with progress_bar.build_progress_bar(args, itr) as t:
for sample in t:
if sample is None:
print("Skipping None")
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample["target"][:, : args.prefix_size]
with torch.no_grad():
avg_pool = get_avg_pool(
models,
sample,
prefix_tokens,
src_dict,
args.post_process,
has_langtok=encoder_has_langtok,
)
if all_avg_pool is not None:
all_avg_pool = np.concatenate((all_avg_pool, avg_pool))
else:
all_avg_pool = avg_pool
if not isinstance(sample["id"], list):
sample_ids = sample["id"].tolist()
else:
sample_ids = sample["id"]
for i, sample_id in enumerate(sample_ids):
# Remove padding
src_tokens = utils.strip_pad(
sample["net_input"]["src_tokens"][i, :], tgt_dict.pad()
)
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(
sample_id
)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.post_process)
else:
src_str = ""
if not args.quiet:
if src_dict is not None:
print("S-{}\t{}".format(sample_id, src_str))
source_sentences.append(f"{sample_id}\t{src_str}")
num_sentences += sample["nsentences"]
if all_avg_pool.shape[0] >= 1000000:
with open(
f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}",
"w",
) as avg_pool_file:
all_avg_pool.tofile(avg_pool_file)
with open(
f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}",
"w",
) as sentence_file:
sentence_file.writelines(f"{line}\n" for line in source_sentences)
all_avg_pool = None
source_sentences = []
shard_id += 1
if all_avg_pool is not None:
with open(
f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}", "w"
) as avg_pool_file:
all_avg_pool.tofile(avg_pool_file)
with open(
f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}", "w"
) as sentence_file:
sentence_file.writelines(f"{line}\n" for line in source_sentences)
return None
def cli_main():
parser = options.get_generation_parser()
parser.add_argument(
"--encoder-save-dir",
default="",
type=str,
metavar="N",
help="directory to save encoder outputs",
)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| 7,473 | 33.762791 | 90 |
py
|
sign-topic
|
sign-topic-main/examples/criss/sentence_retrieval/encoder_analysis.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import glob
import numpy as np
DIM = 1024
def compute_dist(source_embs, target_embs, k=5, return_sim_mat=False):
target_ids = [tid for tid in target_embs]
source_mat = np.stack(source_embs.values(), axis=0)
normalized_source_mat = source_mat / np.linalg.norm(
source_mat, axis=1, keepdims=True
)
target_mat = np.stack(target_embs.values(), axis=0)
normalized_target_mat = target_mat / np.linalg.norm(
target_mat, axis=1, keepdims=True
)
sim_mat = normalized_source_mat.dot(normalized_target_mat.T)
if return_sim_mat:
return sim_mat
neighbors_map = {}
for i, sentence_id in enumerate(source_embs):
idx = np.argsort(sim_mat[i, :])[::-1][:k]
neighbors_map[sentence_id] = [target_ids[tid] for tid in idx]
return neighbors_map
def load_embeddings(directory, LANGS):
sentence_embeddings = {}
sentence_texts = {}
for lang in LANGS:
sentence_embeddings[lang] = {}
sentence_texts[lang] = {}
lang_dir = f"{directory}/{lang}"
embedding_files = glob.glob(f"{lang_dir}/all_avg_pool.{lang}.*")
for embed_file in embedding_files:
shard_id = embed_file.split(".")[-1]
embeddings = np.fromfile(embed_file, dtype=np.float32)
num_rows = embeddings.shape[0] // DIM
embeddings = embeddings.reshape((num_rows, DIM))
with open(f"{lang_dir}/sentences.{lang}.{shard_id}") as sentence_file:
for idx, line in enumerate(sentence_file):
sentence_id, sentence = line.strip().split("\t")
sentence_texts[lang][sentence_id] = sentence
sentence_embeddings[lang][sentence_id] = embeddings[idx, :]
return sentence_embeddings, sentence_texts
def compute_accuracy(directory, LANGS):
sentence_embeddings, sentence_texts = load_embeddings(directory, LANGS)
top_1_accuracy = {}
top1_str = " ".join(LANGS) + "\n"
for source_lang in LANGS:
top_1_accuracy[source_lang] = {}
top1_str += f"{source_lang} "
for target_lang in LANGS:
top1 = 0
top5 = 0
neighbors_map = compute_dist(
sentence_embeddings[source_lang], sentence_embeddings[target_lang]
)
for sentence_id, neighbors in neighbors_map.items():
if sentence_id == neighbors[0]:
top1 += 1
if sentence_id in neighbors[:5]:
top5 += 1
n = len(sentence_embeddings[target_lang])
top1_str += f"{top1/n} "
top1_str += "\n"
print(top1_str)
print(top1_str, file=open(f"{directory}/accuracy", "w"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Analyze encoder outputs")
parser.add_argument("directory", help="Source language corpus")
parser.add_argument("--langs", help="List of langs")
args = parser.parse_args()
langs = args.langs.split(",")
compute_accuracy(args.directory, langs)
| 3,278 | 34.258065 | 82 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.