repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/T5.py
|
from torch import nn
from transformers import T5Model, T5Tokenizer
import json
from typing import List, Dict, Optional
import os
import numpy as np
import logging
class T5(nn.Module):
"""DEPRECATED: Please use models.Transformer instead.
T5 model to generate token embeddings.
Each token is mapped to an output vector from BERT.
"""
def __init__(self, model_name_or_path: str, max_seq_length: int = 128, do_lower_case: Optional[bool] = None, task_identifier: str = 'stsb sentence1: ', model_args: Dict = {}, tokenizer_args: Dict = {}):
super(T5, self).__init__()
self.config_keys = ['max_seq_length', 'do_lower_case', 'task_identifier']
self.do_lower_case = do_lower_case
if max_seq_length > 512:
logging.warning("T5 only allows a max_seq_length of 512. Value will be set to 512")
max_seq_length = 512
self.max_seq_length = max_seq_length
if self.do_lower_case is not None:
tokenizer_args['do_lower_case'] = do_lower_case
self.t5model = T5Model.from_pretrained(model_name_or_path, **model_args)
self.tokenizer = T5Tokenizer.from_pretrained(model_name_or_path, **tokenizer_args)
self.task_identifier = task_identifier
def forward(self, features):
"""Returns token_embeddings, cls_token"""
output_states = self.t5model.encoder(input_ids=features['input_ids'], attention_mask=features['attention_mask'])
output_tokens = output_states[0]
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens})
if len(output_states) > 1:
features.update({'all_layer_embeddings': output_states[1]})
return features
def get_word_embedding_dimension(self) -> int:
return self.t5model.config.hidden_size
def tokenize(self, text: str) -> List[int]:
"""
Tokenizes a text and maps tokens to token-ids
"""
return self.tokenizer.encode(self.task_identifier+text)
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, self.max_seq_length)
return self.tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, padding='max_length', return_tensors='pt', truncation=True, prepend_batch_axis=True)
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str):
self.t5model.save_pretrained(output_path)
self.tokenizer.save_pretrained(output_path)
with open(os.path.join(output_path, 'sentence_T5_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'sentence_T5_config.json')) as fIn:
config = json.load(fIn)
return T5(model_name_or_path=input_path, **config)
| 3,402 | 37.235955 | 206 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/RoBERTa.py
|
from . import Transformer
class RoBERTa(Transformer):
"""
DEPRECATED: Please use models.Transformer instead.
"""
pass
| 140 | 9.846154 | 54 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/CamemBERT.py
|
from . import Transformer
class CamemBERT(Transformer):
"""
DEPRECATED: Please use models.Transformer instead.
"""
pass
| 144 | 8.666667 | 54 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/BERT.py
|
from . import Transformer
class BERT(Transformer):
"""
DEPRECATED: Please use models.Transformer instead.
"""
pass
| 138 | 8.928571 | 54 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/XLNet.py
|
from . import Transformer
class XLNet(Transformer):
"""
DEPRECATED: Please use models.Transformer instead.
"""
pass
| 139 | 9 | 54 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/WordWeights.py
|
import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
import logging
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: List[str], word_weights: Dict[str, float], unknown_word_weight: float = 1):
"""
:param vocab:
Vocabulary of the tokenizer
:param word_weights:
Mapping of tokens to a float weight value. Words embeddings are multiplied by this float value. Tokens in word_weights must not be equal to the vocab (can contain more or less values)
:param unknown_word_weight:
Weight for words in vocab, that do not appear in the word_weights lookup. These can be for example rare words in the vocab, where no weight exists.
"""
super(WordWeights, self).__init__()
self.config_keys = ['vocab', 'word_weights', 'unknown_word_weight']
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
weights.append(weight)
logging.info("{} of {} words without a weighting value. Set weight to {}".format(num_unknown_words, len(vocab), unknown_word_weight))
self.emb_layer = nn.Embedding(len(vocab), 1)
self.emb_layer.load_state_dict({'weight': torch.FloatTensor(weights).unsqueeze(1)})
def forward(self, features: Dict[str, Tensor]):
attention_mask = features['attention_mask']
token_embeddings = features['token_embeddings']
#Compute a weight value for each token
token_weights_raw = self.emb_layer(features['input_ids']).squeeze(-1)
token_weights = token_weights_raw * attention_mask.float()
token_weights_sum = torch.sum(token_weights, 1)
#Multiply embedding by token weight value
token_weights_expanded = token_weights.unsqueeze(-1).expand(token_embeddings.size())
token_embeddings = token_embeddings * token_weights_expanded
features.update({'token_embeddings': token_embeddings, 'token_weights_sum': token_weights_sum})
return features
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return WordWeights(**config)
| 3,017 | 39.783784 | 196 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/WKPooling.py
|
import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
import numpy as np
class WKPooling(nn.Module):
"""
Pooling based on the paper: "SBERT-WK: A Sentence Embedding Method ByDissecting BERT-based Word Models"
https://arxiv.org/pdf/2002.06652.pdf
Note: SBERT-WK uses QR decomposition. torch QR decomposition is currently extremely slow when run on GPU.
Hence, the tensor is first transferred to the CPU before it is applied. This makes this pooling method rather slow
"""
def __init__(self, word_embedding_dimension, layer_start: int = 4, context_window_size: int = 2):
super(WKPooling, self).__init__()
self.config_keys = ['word_embedding_dimension', 'layer_start', 'context_window_size']
self.word_embedding_dimension = word_embedding_dimension
self.pooling_output_dimension = word_embedding_dimension
self.layer_start = layer_start
self.context_window_size = context_window_size
def forward(self, features: Dict[str, Tensor]):
ft_all_layers = features['all_layer_embeddings']
org_device = ft_all_layers[0].device
all_layer_embedding = torch.stack(ft_all_layers).transpose(1,0)
all_layer_embedding = all_layer_embedding[:, self.layer_start:, :, :] # Start from 4th layers output
# torch.qr is slow on GPU (see https://github.com/pytorch/pytorch/issues/22573). So compute it on CPU until issue is fixed
all_layer_embedding = all_layer_embedding.cpu()
attention_mask = features['attention_mask'].cpu().numpy()
unmask_num = np.array([sum(mask) for mask in attention_mask]) - 1 # Not considering the last item
embedding = []
# One sentence at a time
for sent_index in range(len(unmask_num)):
sentence_feature = all_layer_embedding[sent_index, :, :unmask_num[sent_index], :]
one_sentence_embedding = []
# Process each token
for token_index in range(sentence_feature.shape[1]):
token_feature = sentence_feature[:, token_index, :]
# 'Unified Word Representation'
token_embedding = self.unify_token(token_feature)
one_sentence_embedding.append(token_embedding)
features.update({'sentence_embedding': features['cls_token_embeddings']})
one_sentence_embedding = torch.stack(one_sentence_embedding)
sentence_embedding = self.unify_sentence(sentence_feature, one_sentence_embedding)
embedding.append(sentence_embedding)
output_vector = torch.stack(embedding).to(org_device)
features.update({'sentence_embedding': output_vector})
return features
def unify_token(self, token_feature):
"""
Unify Token Representation
"""
window_size = self.context_window_size
alpha_alignment = torch.zeros(token_feature.size()[0], device=token_feature.device)
alpha_novelty = torch.zeros(token_feature.size()[0], device=token_feature.device)
for k in range(token_feature.size()[0]):
left_window = token_feature[k - window_size:k, :]
right_window = token_feature[k + 1:k + window_size + 1, :]
window_matrix = torch.cat([left_window, right_window, token_feature[k, :][None, :]])
Q, R = torch.qr(window_matrix.T)
r = R[:, -1]
alpha_alignment[k] = torch.mean(self.norm_vector(R[:-1, :-1], dim=0), dim=1).matmul(R[:-1, -1]) / torch.norm(r[:-1])
alpha_alignment[k] = 1 / (alpha_alignment[k] * window_matrix.size()[0] * 2)
alpha_novelty[k] = torch.abs(r[-1]) / torch.norm(r)
# Sum Norm
alpha_alignment = alpha_alignment / torch.sum(alpha_alignment) # Normalization Choice
alpha_novelty = alpha_novelty / torch.sum(alpha_novelty)
alpha = alpha_novelty + alpha_alignment
alpha = alpha / torch.sum(alpha) # Normalize
out_embedding = torch.mv(token_feature.t(), alpha)
return out_embedding
def norm_vector(self, vec, p=2, dim=0):
"""
Implements the normalize() function from sklearn
"""
vec_norm = torch.norm(vec, p=p, dim=dim)
return vec.div(vec_norm.expand_as(vec))
def unify_sentence(self, sentence_feature, one_sentence_embedding):
"""
Unify Sentence By Token Importance
"""
sent_len = one_sentence_embedding.size()[0]
var_token = torch.zeros(sent_len, device=one_sentence_embedding.device)
for token_index in range(sent_len):
token_feature = sentence_feature[:, token_index, :]
sim_map = self.cosine_similarity_torch(token_feature)
var_token[token_index] = torch.var(sim_map.diagonal(-1))
var_token = var_token / torch.sum(var_token)
sentence_embedding = torch.mv(one_sentence_embedding.t(), var_token)
return sentence_embedding
def cosine_similarity_torch(self, x1, x2=None, eps=1e-8):
x2 = x1 if x2 is None else x2
w1 = x1.norm(p=2, dim=1, keepdim=True)
w2 = w1 if x2 is x1 else x2.norm(p=2, dim=1, keepdim=True)
return torch.mm(x1, x2.t()) / (w1 * w2.t()).clamp(min=eps)
def get_sentence_embedding_dimension(self):
return self.pooling_output_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return WKPooling(**config)
| 5,864 | 40.595745 | 130 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/Normalize.py
|
from torch import Tensor
from torch import nn
from typing import Dict
import torch.nn.functional as F
class Normalize(nn.Module):
"""
This layer normalizes embeddings to unit length
"""
def __init__(self):
super(Normalize, self).__init__()
def forward(self, features: Dict[str, Tensor]):
features.update({'sentence_embedding': F.normalize(features['sentence_embedding'], p=2, dim=1)})
return features
def save(self, output_path):
pass
@staticmethod
def load(input_path):
return Normalize()
| 566 | 23.652174 | 104 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/ALBERT.py
|
from . import Transformer
class ALBERT(Transformer):
"""
DEPRECATED: Please use models.Transformer instead.
"""
pass
| 139 | 9.769231 | 54 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/Dense.py
|
import torch
from torch import Tensor
from torch import nn
from torch import functional as F
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
from ..util import fullname, import_from_string
class Dense(nn.Module):
"""Feed-forward function with activiation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networs (DAN).
:param in_features: Size of the input dimension
:param out_features: Output size
:param bias: Add a bias vector
:param activation_function: Pytorch activation function applied on output
"""
def __init__(self, in_features: int, out_features: int, bias: bool = True, activation_function=nn.Tanh()):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
def forward(self, features: Dict[str, Tensor]):
features.update({'sentence_embedding': self.activation_function(self.linear(features['sentence_embedding']))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump({'in_features': self.in_features, 'out_features': self.out_features, 'bias': self.bias, 'activation_function': fullname(self.activation_function)}, fOut)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
config['activation_function'] = import_from_string(config['activation_function'])()
model = Dense(**config)
model.load_state_dict(torch.load(os.path.join(input_path, 'pytorch_model.bin'), map_location=torch.device('cpu')))
return model
| 2,116 | 40.509804 | 175 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/BoW.py
|
import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
import logging
import numpy as np
from .tokenizer import WhitespaceTokenizer
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(self, vocab: List[str], word_weights: Dict[str, float] = {}, unknown_word_weight: float = 1, cumulative_term_frequency: bool = True):
super(BoW, self).__init__()
vocab = list(set(vocab)) #Ensure vocab is unique
self.config_keys = ['vocab', 'word_weights', 'unknown_word_weight', 'cumulative_term_frequency']
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
#Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logging.info("{} out of {} words without a weighting value. Set weight to {}".format(num_unknown_words, len(vocab), unknown_word_weight))
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
#Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, text: str) -> List[int]:
return self.tokenizer.tokenize(text)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
vector = np.zeros(self.get_sentence_embedding_dimension(), dtype=np.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
return {'sentence_embedding': torch.tensor([vector], dtype=torch.float)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return BoW(**config)
| 2,940 | 37.194805 | 150 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/__init__.py
|
from .Transformer import Transformer
from .ALBERT import ALBERT
from .BERT import BERT
from .BoW import BoW
from .CNN import CNN
from .CamemBERT import CamemBERT
from .Dense import Dense
from .DistilBERT import DistilBERT
from .LSTM import LSTM
from .Normalize import Normalize
from .Pooling import Pooling
from .RoBERTa import RoBERTa
from .T5 import T5
from .WKPooling import WKPooling
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
from .XLMRoBERTa import XLMRoBERTa
from .XLNet import XLNet
from .MLP3 import MLP3
| 606 | 27.904762 | 54 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/Pooling.py
|
import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
class Pooling(nn.Module):
"""Performs pooling (max or mean) on the token embeddings.
Using pooling, it generates from a variable sized sentence a fixed sized sentence embedding. This layer also allows to use the CLS token if it is returned by the underlying word embedding model.
You can concatenate multiple poolings together.
:param word_embedding_dimension: Dimensions for the word embeddings
:param pooling_mode_cls_token: Use the first token (CLS token) as text representations
:param pooling_mode_max_tokens: Use max in each dimension over all tokens.
:param pooling_mode_mean_tokens: Perform mean-pooling
:param pooling_mode_mean_sqrt_len_tokens: Perform mean-pooling, but devide by sqrt(input_length).
"""
def __init__(self,
word_embedding_dimension: int,
pooling_mode_cls_token: bool = False,
pooling_mode_max_tokens: bool = False,
pooling_mode_mean_tokens: bool = True,
pooling_mode_mean_sqrt_len_tokens: bool = False,
pooling_mode_mean_last_2_tokens: bool = False,
pooling_mode_mean_first_last_tokens: bool = False, # same as bert-flow, see https://github.com/bohanli/BERT-flow/issues/11
pooling_mode_pad_max_tokens: bool = False,
pooling_mode_pad_mean_tokens: bool = False,
):
super(Pooling, self).__init__()
self.config_keys = ['word_embedding_dimension', 'pooling_mode_cls_token', 'pooling_mode_mean_tokens', 'pooling_mode_max_tokens', 'pooling_mode_mean_sqrt_len_tokens', 'pooling_mode_mean_last_2_tokens']
self.word_embedding_dimension = word_embedding_dimension
self.pooling_mode_cls_token = pooling_mode_cls_token
self.pooling_mode_mean_tokens = pooling_mode_mean_tokens
self.pooling_mode_max_tokens = pooling_mode_max_tokens
self.pooling_mode_mean_sqrt_len_tokens = pooling_mode_mean_sqrt_len_tokens
self.pooling_mode_mean_last_2_tokens = pooling_mode_mean_last_2_tokens
self.pooling_mode_mean_first_last_tokens = pooling_mode_mean_first_last_tokens
self.pooling_mode_pad_max_tokens = pooling_mode_pad_max_tokens
self.pooling_mode_pad_mean_tokens = pooling_mode_pad_mean_tokens
pooling_mode_multiplier = sum([pooling_mode_cls_token, pooling_mode_max_tokens, pooling_mode_mean_tokens, pooling_mode_mean_sqrt_len_tokens])
self.pooling_output_dimension = (pooling_mode_multiplier * word_embedding_dimension)
def forward(self, features: Dict[str, Tensor]):
token_embeddings = features['token_embeddings']
cls_token = features['cls_token_embeddings']
attention_mask = features['attention_mask']
# assert features["all_layer_embeddings"][-1].sum() == features["token_embeddings"].sum()
## Pooling strategy
output_vectors = []
if self.pooling_mode_cls_token:
output_vectors.append(cls_token)
if self.pooling_mode_max_tokens:
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
token_embeddings[input_mask_expanded == 0] = -1e9 # Set padding tokens to large negative value
max_over_time = torch.max(token_embeddings, 1)[0]
output_vectors.append(max_over_time)
if self.pooling_mode_mean_tokens or self.pooling_mode_mean_sqrt_len_tokens:
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
#If tokens are weighted (by WordWeights layer), feature 'token_weights_sum' will be present
if 'token_weights_sum' in features:
sum_mask = features['token_weights_sum'].unsqueeze(-1).expand(sum_embeddings.size())
else:
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-9)
if self.pooling_mode_mean_tokens:
output_vectors.append(sum_embeddings / sum_mask)
if self.pooling_mode_mean_sqrt_len_tokens:
output_vectors.append(sum_embeddings / torch.sqrt(sum_mask))
if self.pooling_mode_mean_last_2_tokens and "all_layer_embeddings" in features: # avg of last 2 layers
if "token_checker" in self.__dict__:
token_ids = features['input_ids']
new_mask = []
for sample_token_ids in token_ids:
sample_mask = []
for token_id in sample_token_ids:
if self.token_checker(token_id.item()):
sample_mask.append(1)
else:
sample_mask.append(0)
new_mask.append(sample_mask)
attention_mask = torch.tensor(new_mask).to(device=attention_mask.device, dtype=attention_mask.dtype)
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-9)
token_embeddings_last1 = features["all_layer_embeddings"][-1]
sum_embeddings_last1 = torch.sum(token_embeddings_last1 * input_mask_expanded, 1)
sum_embeddings_last1 = sum_embeddings_last1 / sum_mask
token_embeddings_last2 = features["all_layer_embeddings"][-2]
sum_embeddings_last2 = torch.sum(token_embeddings_last2 * input_mask_expanded, 1)
sum_embeddings_last2 = sum_embeddings_last2 / sum_mask
output_vectors.append((sum_embeddings_last1+sum_embeddings_last2) / 2)
if self.pooling_mode_mean_first_last_tokens and "all_layer_embeddings" in features: # avg of the first and the last layers
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-9)
token_embeddings_first = features["all_layer_embeddings"][-0]
sum_embeddings_first = torch.sum(token_embeddings_first * input_mask_expanded, 1)
sum_embeddings_first = sum_embeddings_first / sum_mask
token_embeddings_last = features["all_layer_embeddings"][-1]
sum_embeddings_last = torch.sum(token_embeddings_last * input_mask_expanded, 1)
sum_embeddings_last = sum_embeddings_last / sum_mask
output_vectors.append((sum_embeddings_first + sum_embeddings_last) / 2)
max_output, _ = torch.max(token_embeddings[:, 1:, :], dim=1)
if self.pooling_mode_pad_max_tokens:
output_vectors.append(max_output)
features.update({'pad_max_tokens': max_output})
mean_ouput = torch.mean(token_embeddings[:, 1:, :], 1)
if self.pooling_mode_pad_mean_tokens:
output_vectors.append(mean_output)
features.update({'pad_mean_tokens': mean_ouput})
output_vector = torch.cat(output_vectors, 1)
features.update({'sentence_embedding': output_vector})
return features
def get_sentence_embedding_dimension(self):
return self.pooling_output_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return Pooling(**config)
| 8,028 | 51.477124 | 209 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/LSTM.py
|
import torch
from torch import nn
from typing import List
import os
import json
class LSTM(nn.Module):
"""
Bidirectional LSTM running over word embeddings.
"""
def __init__(self, word_embedding_dimension: int, hidden_dim: int, num_layers: int = 1, dropout: float = 0, bidirectional: bool = True):
nn.Module.__init__(self)
self.config_keys = ['word_embedding_dimension', 'hidden_dim', 'num_layers', 'dropout', 'bidirectional']
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(word_embedding_dimension, hidden_dim, num_layers=num_layers, dropout=dropout, bidirectional=bidirectional, batch_first=True)
def forward(self, features):
token_embeddings = features['token_embeddings']
sentence_lengths = torch.clamp(features['sentence_lengths'], min=1)
packed = nn.utils.rnn.pack_padded_sequence(token_embeddings, sentence_lengths, batch_first=True, enforce_sorted=False)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({'token_embeddings': unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, 'lstm_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'lstm_config.json'), 'r') as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, 'pytorch_model.bin'))
model = LSTM(**config)
model.load_state_dict(weights)
return model
| 2,323 | 35.888889 | 155 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/MLP3.py
|
import torch
from torch import nn
import os
import json
from typing import Union, Tuple, List, Iterable, Dict
from torch import Tensor
class MLP3(nn.Module):
def __init__(self, hidden_dim=2048, norm=None, activation='relu'):
super().__init__()
''' page 3 baseline setting
Projection MLP. The projection MLP (in f) has BN ap-
plied to each fully-connected (fc) layer, including its out-
put fc. Its output fc has no ReLU. The hidden fc is 2048-d.
This MLP has 3 layers.
'''
self.config_keys = ['hidden_dim', 'norm', 'activation']
self.hidden_dim = hidden_dim
self.norm = norm
self.activation = activation
if activation == "relu":
activation_layer = nn.ReLU()
elif activation == "leakyrelu":
activation_layer = nn.LeakyReLU()
elif activation == "tanh":
activation_layer = nn.Tanh()
elif activation == "sigmoid":
activation_layer = nn.Sigmoid()
else:
raise ValueError(f"Unknown activation function {hidden_activation}")
if norm:
if norm=='bn':
norm_layer = nn.BatchNorm1d
else:
norm_layer = nn.LayerNorm
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
norm_layer(hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
norm_layer(hidden_dim),
nn.ReLU(inplace=True)
)
self.layer3 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
norm_layer(hidden_dim)
)
else:
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
)
self.layer3 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
)
self.num_layers = 3
def set_layers(self, num_layers):
self.num_layers = num_layers
def forward(self, features: Dict[str, Tensor]):
x = features["token_embeddings"]
if self.num_layers == 3:
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
elif self.num_layers == 2:
x = self.layer1(x)
x = self.layer3(x)
else:
raise Exception
features["token_embeddings"] = x
return features
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'mlp3_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'mlp3_config.json')) as fIn:
config = json.load(fIn)
return MLP3(**config)
| 3,212 | 32.123711 | 80 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/DistilBERT.py
|
from . import Transformer
class DistilBERT(Transformer):
"""
DEPRECATED: Please use models.Transformer instead.
"""
pass
| 143 | 10.076923 | 54 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/tokenizer/WordTokenizer.py
|
from abc import ABC, abstractmethod
from typing import Union, Tuple, List, Iterable, Dict
ENGLISH_STOP_WORDS = ['!', '"', "''", "``", '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~', 'a', 'about', 'above', 'across', 'after', 'afterwards', 'again', 'against', 'ain', 'all', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always', 'am', 'among', 'amongst', 'amoungst', 'amount', 'an', 'and', 'another', 'any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere', 'are', 'aren', 'around', 'as', 'at', 'back', 'be', 'became', 'because', 'become', 'becomes', 'becoming', 'been', 'before', 'beforehand', 'behind', 'being', 'below', 'beside', 'besides', 'between', 'beyond', 'bill', 'both', 'bottom', 'but', 'by', 'call', 'can', 'cannot', 'cant', 'co', 'con', 'could', 'couldn', 'couldnt', 'cry', 'd', 'de', 'describe', 'detail', 'did', 'didn', 'do', 'does', 'doesn', 'doing', 'don', 'done', 'down', 'due', 'during', 'each', 'eg', 'eight', 'either', 'eleven', 'else', 'elsewhere', 'empty', 'enough', 'etc', 'even', 'ever', 'every', 'everyone', 'everything', 'everywhere', 'except', 'few', 'fifteen', 'fifty', 'fill', 'find', 'fire', 'first', 'five', 'for', 'former', 'formerly', 'forty', 'found', 'four', 'from', 'front', 'full', 'further', 'get', 'give', 'go', 'had', 'hadn', 'has', 'hasn', 'hasnt', 'have', 'haven', 'having', 'he', 'hence', 'her', 'here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself', 'him', 'himself', 'his', 'how', 'however', 'hundred', 'i', 'ie', 'if', 'in', 'inc', 'indeed', 'interest', 'into', 'is', 'isn', 'it', 'its', 'itself', 'just', 'keep', 'last', 'latter', 'latterly', 'least', 'less', 'll', 'ltd', 'm', 'ma', 'made', 'many', 'may', 'me', 'meanwhile', 'might', 'mightn', 'mill', 'mine', 'more', 'moreover', 'most', 'mostly', 'move', 'much', 'must', 'mustn', 'my', 'myself', 'name', 'namely', 'needn', 'neither', 'never', 'nevertheless', 'next', 'nine', 'no', 'nobody', 'none', 'noone', 'nor', 'not', 'nothing', 'now', 'nowhere', 'o', 'of', 'off', 'often', 'on', 'once', 'one', 'only', 'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours', 'ourselves', 'out', 'over', 'own', 'part', 'per', 'perhaps', 'please', 'put', 'rather', 're', 's', 'same', 'see', 'seem', 'seemed', 'seeming', 'seems', 'serious', 'several', 'shan', 'she', 'should', 'shouldn', 'show', 'side', 'since', 'sincere', 'six', 'sixty', 'so', 'some', 'somehow', 'someone', 'something', 'sometime', 'sometimes', 'somewhere', 'still', 'such', 'system', 't', 'take', 'ten', 'than', 'that', 'the', 'their', 'theirs', 'them', 'themselves', 'then', 'thence', 'there', 'thereafter', 'thereby', 'therefore', 'therein', 'thereupon', 'these', 'they', 'thick', 'thin', 'third', 'this', 'those', 'though', 'three', 'through', 'throughout', 'thru', 'thus', 'to', 'together', 'too', 'top', 'toward', 'towards', 'twelve', 'twenty', 'two', 'un', 'under', 'until', 'up', 'upon', 'us', 've', 'very', 'via', 'was', 'wasn', 'we', 'well', 'were', 'weren', 'what', 'whatever', 'when', 'whence', 'whenever', 'where', 'whereafter', 'whereas', 'whereby', 'wherein', 'whereupon', 'wherever', 'whether', 'which', 'while', 'whither', 'who', 'whoever', 'whole', 'whom', 'whose', 'why', 'will', 'with', 'within', 'without', 'won', 'would', 'wouldn', 'y', 'yet', 'you', 'your', 'yours', 'yourself', 'yourselves']
class WordTokenizer(ABC):
@abstractmethod
def set_vocab(self, vocab: Iterable[str]):
pass
@abstractmethod
def get_vocab(self, vocab: Iterable[str]):
pass
@abstractmethod
def tokenize(self, text: str) -> List[int]:
pass
@abstractmethod
def save(self, output_path: str):
pass
@staticmethod
@abstractmethod
def load(input_path: str):
pass
| 3,838 | 141.185185 | 3,321 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/tokenizer/PhraseTokenizer.py
|
from typing import Union, Tuple, List, Iterable, Dict
import collections
import string
import os
import json
import logging
from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS
import nltk
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False, ngram_separator: str = "_", max_ngram_length: int = 5):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logging.info("PhraseTokenizer - Phrase ngram lengths: {}".format(self.ngram_lengths))
logging.info("PhraseTokenizer - Num phrases: {}".format(len(self.ngram_lookup)))
def tokenize(self, text: str) -> List[int]:
tokens = nltk.word_tokenize(text, preserve_line=True)
#phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx:idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx:idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx:idx + ngram_len] = [ngram.lower()]
idx += 1
#Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, 'phrasetokenizer_config.json'), 'w') as fOut:
json.dump({'vocab': list(self.word2idx.keys()), 'stop_words': list(self.stop_words), 'do_lower_case': self.do_lower_case, 'ngram_separator': self.ngram_separator, 'max_ngram_length': self.max_ngram_length}, fOut)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'phrasetokenizer_config.json'), 'r') as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
| 4,088 | 42.042105 | 224 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/tokenizer/WhitespaceTokenizer.py
|
from typing import Union, Tuple, List, Iterable, Dict
import collections
import string
import os
import json
from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str) -> List[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, 'whitespacetokenizer_config.json'), 'w') as fOut:
json.dump({'vocab': list(self.word2idx.keys()), 'stop_words': list(self.stop_words), 'do_lower_case': self.do_lower_case}, fOut)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'whitespacetokenizer_config.json'), 'r') as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
| 2,238 | 33.446154 | 140 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/models/tokenizer/__init__.py
|
from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
| 166 | 54.666667 | 60 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/datasets/SentenceLabelDataset.py
|
from torch.utils.data import Dataset
from typing import List
import bisect
import torch
import logging
import numpy as np
from tqdm import tqdm
from .. import SentenceTransformer
from ..readers.InputExample import InputExample
from multiprocessing import Pool, cpu_count
import multiprocessing
class SentenceLabelDataset(Dataset):
"""
Dataset for training with triplet loss.
This dataset takes a list of sentences grouped by their label and uses this grouping to dynamically select a
positive example from the same group and a negative example from the other sentences for a selected anchor sentence.
This dataset should be used in combination with dataset_reader.LabelSentenceReader
One iteration over this dataset selects every sentence as anchor once.
This also uses smart batching like SentenceDataset.
"""
def __init__(self, examples: List[InputExample], model: SentenceTransformer, provide_positive: bool = True,
provide_negative: bool = True,
parallel_tokenization: bool = True,
max_processes: int = 4,
chunk_size: int = 5000):
"""
Converts input examples to a SentenceLabelDataset usable to train the model with
SentenceTransformer.smart_batching_collate as the collate_fn for the DataLoader
Assumes only one sentence per InputExample and labels as integers from 0 to max_num_labels
and should be used in combination with dataset_reader.LabelSentenceReader.
Labels with only one example are ignored.
smart_batching_collate as collate_fn is required because it transforms the tokenized texts to the tensors.
:param examples:
the input examples for the training
:param model
the Sentence BERT model for the conversion
:param provide_positive:
set this to False, if you don't need a positive example (e.g. for BATCH_HARD_TRIPLET_LOSS).
:param provide_negative:
set this to False, if you don't need a negative example (e.g. for BATCH_HARD_TRIPLET_LOSS
or MULTIPLE_NEGATIVES_RANKING_LOSS).
:param parallel_tokenization
If true, multiple processes will be started for the tokenization
:param max_processes
Maximum number of processes started for tokenization. Cannot be larger can cpu_count()
:param chunk_size
#chunk_size number of examples are send to each process. Larger values increase overall tokenization speed
"""
self.model = model
self.groups_right_border = []
self.grouped_inputs = []
self.grouped_labels = []
self.num_labels = 0
self.max_processes = min(max_processes, cpu_count())
self.chunk_size = chunk_size
self.parallel_tokenization = parallel_tokenization
if self.parallel_tokenization:
if multiprocessing.get_start_method() != 'fork':
logging.info("Parallel tokenization is only available on Unix systems which allow to fork processes. Fall back to sequential tokenization")
self.parallel_tokenization = False
self.convert_input_examples(examples, model)
self.idxs = np.arange(len(self.grouped_inputs))
self.provide_positive = provide_positive
self.provide_negative = provide_negative
def convert_input_examples(self, examples: List[InputExample], model: SentenceTransformer):
"""
Converts input examples to a SentenceLabelDataset.
Assumes only one sentence per InputExample and labels as integers from 0 to max_num_labels
and should be used in combination with dataset_reader.LabelSentenceReader.
Labels with only one example are ignored.
:param examples:
the input examples for the training
:param model
the Sentence Transformer model for the conversion
:param is_pretokenized
If set to true, no tokenization will be applied. It is expected that the input is tokenized via model.tokenize
"""
inputs = []
labels = []
label_sent_mapping = {}
too_long = 0
label_type = None
logging.info("Start tokenization")
if not self.parallel_tokenization or self.max_processes == 1 or len(examples) <= self.chunk_size:
tokenized_texts = [self.tokenize_example(example) for example in examples]
else:
logging.info("Use multi-process tokenization with {} processes".format(self.max_processes))
self.model.to('cpu')
with Pool(self.max_processes) as p:
tokenized_texts = list(p.imap(self.tokenize_example, examples, chunksize=self.chunk_size))
# Group examples and labels
# Add examples with the same label to the same dict
for ex_index, example in enumerate(tqdm(examples, desc="Convert dataset")):
if label_type is None:
if isinstance(example.label, int):
label_type = torch.long
elif isinstance(example.label, float):
label_type = torch.float
tokenized_text = tokenized_texts[ex_index][0]
if hasattr(model, 'max_seq_length') and model.max_seq_length is not None and model.max_seq_length > 0 and len(tokenized_text) > model.max_seq_length:
too_long += 1
if example.label in label_sent_mapping:
label_sent_mapping[example.label].append(ex_index)
else:
label_sent_mapping[example.label] = [ex_index]
inputs.append(tokenized_text)
labels.append(example.label)
# Group sentences, such that sentences with the same label
# are besides each other. Only take labels with at least 2 examples
distinct_labels = list(label_sent_mapping.keys())
for i in range(len(distinct_labels)):
label = distinct_labels[i]
if len(label_sent_mapping[label]) >= 2:
self.grouped_inputs.extend([inputs[j] for j in label_sent_mapping[label]])
self.grouped_labels.extend([labels[j] for j in label_sent_mapping[label]])
self.groups_right_border.append(len(self.grouped_inputs)) #At which position does this label group / bucket end?
self.num_labels += 1
self.grouped_labels = torch.tensor(self.grouped_labels, dtype=label_type)
logging.info("Num sentences: %d" % (len(self.grouped_inputs)))
logging.info("Sentences longer than max_seqence_length: {}".format(too_long))
logging.info("Number of labels with >1 examples: {}".format(len(distinct_labels)))
def tokenize_example(self, example):
if example.texts_tokenized is not None:
return example.texts_tokenized
return [self.model.tokenize(text) for text in example.texts]
def __getitem__(self, item):
if not self.provide_positive and not self.provide_negative:
return [self.grouped_inputs[item]], self.grouped_labels[item]
# Anchor element
anchor = self.grouped_inputs[item]
# Check start and end position for this label in our list of grouped sentences
group_idx = bisect.bisect_right(self.groups_right_border, item)
left_border = 0 if group_idx == 0 else self.groups_right_border[group_idx - 1]
right_border = self.groups_right_border[group_idx]
if self.provide_positive:
positive_item_idx = np.random.choice(np.concatenate([self.idxs[left_border:item], self.idxs[item + 1:right_border]]))
positive = self.grouped_inputs[positive_item_idx]
else:
positive = []
if self.provide_negative:
negative_item_idx = np.random.choice(np.concatenate([self.idxs[0:left_border], self.idxs[right_border:]]))
negative = self.grouped_inputs[negative_item_idx]
else:
negative = []
return [anchor, positive, negative], self.grouped_labels[item]
def __len__(self):
return len(self.grouped_inputs)
| 8,156 | 43.091892 | 161 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/datasets/SentencesDataset.py
|
from torch.utils.data import Dataset
from typing import List
import torch
from .. import SentenceTransformer
from ..readers.InputExample import InputExample
class SentencesDataset(Dataset):
"""
Dataset for smart batching, that is each batch is only padded to its longest sequence instead of padding all
sequences to the max length.
The SentenceBertEncoder.smart_batching_collate is required for this to work.
SmartBatchingDataset does *not* work without it.
"""
def __init__(self,
examples: List[InputExample],
model: SentenceTransformer
):
"""
Create a new SentencesDataset with the tokenized texts and the labels as Tensor
:param examples
A list of sentence.transformers.readers.InputExample
:param model:
SentenceTransformerModel
"""
self.model = model
self.examples = examples
self.label_type = torch.long if isinstance(self.examples[0].label, int) else torch.float
def __getitem__(self, item):
label = torch.tensor(self.examples[item].label, dtype=self.label_type)
if self.examples[item].texts_tokenized is None:
self.examples[item].texts_tokenized = [self.model.tokenize(text) for text in self.examples[item].texts]
return self.examples[item].texts_tokenized, label
def __len__(self):
return len(self.examples)
| 1,443 | 34.219512 | 115 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/datasets/EncodeDataset.py
|
from torch.utils.data import Dataset
from typing import List, Union
from .. import SentenceTransformer
class EncodeDataset(Dataset):
def __init__(self,
sentences: Union[List[str], List[int]],
model: SentenceTransformer,
is_tokenized: bool = True):
"""
EncodeDataset is used by SentenceTransformer.encode method. It just stores
the input texts and returns a tokenized version of it.
"""
self.model = model
self.sentences = sentences
self.is_tokenized = is_tokenized
def __getitem__(self, item):
return self.sentences[item] if self.is_tokenized else self.model.tokenize(self.sentences[item])
def __len__(self):
return len(self.sentences)
| 777 | 28.923077 | 103 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/datasets/ParallelSentencesDataset.py
|
from torch.utils.data import Dataset
import logging
import gzip
from queue import Queue
from .. import SentenceTransformer
from typing import List
import random
class ParallelSentencesDataset(Dataset):
"""
This dataset reader can be used to read-in parallel sentences, i.e., it reads in a file with tab-seperated sentences with the same
sentence in different languages. For example, the file can look like this (EN\tDE\tES):
hello world hallo welt hola mundo
second sentence zweiter satz segunda oración
The sentence in the first column will be mapped to a sentence embedding using the given the embedder. For example,
embedder is a mono-lingual sentence embedding method for English. The sentences in the other languages will also be
mapped to this English sentence embedding.
When getting a sample from the dataset, we get one sentence with the according sentence embedding for this sentence.
teacher_model can be any class that implement an encode function. The encode function gets a list of sentences and
returns a list of sentence embeddings
"""
def __init__(self, student_model: SentenceTransformer, teacher_model: SentenceTransformer, batch_size: int = 8, use_embedding_cache: bool = True):
"""
Parallel sentences dataset reader to train student model given a teacher model
:param student_model: Student sentence embedding model that should be trained
:param teacher_model: Teacher model, that provides the sentence embeddings for the first column in the dataset file
"""
self.student_model = student_model
self.teacher_model = teacher_model
self.datasets = []
self.datasets_iterator = []
self.datasets_tokenized = []
self.dataset_indices = []
self.copy_dataset_indices = []
self.cache = []
self.batch_size = batch_size
self.use_embedding_cache = use_embedding_cache
self.embedding_cache = {}
self.num_sentences = 0
def load_data(self, filepath: str, weight: int = 100, max_sentences: int = None, max_sentence_length: int = 128):
"""
Reads in a tab-seperated .txt/.csv/.tsv or .gz file. The different columns contain the different translations of the sentence in the first column
:param filepath: Filepath to the file
:param weight: If more that one dataset is loaded with load_data: With which frequency should data be sampled from this dataset?
:param max_sentences: Max number of lines to be read from filepath
:param max_sentence_length: Skip the example if one of the sentences is has more characters than max_sentence_length
:param batch_size: Size for encoding parallel sentences
:return:
"""
logging.info("Load "+filepath)
parallel_sentences = []
with gzip.open(filepath, 'rt', encoding='utf8') if filepath.endswith('.gz') else open(filepath, encoding='utf8') as fIn:
count = 0
for line in fIn:
sentences = line.strip().split("\t")
if max_sentence_length is not None and max_sentence_length > 0 and max([len(sent) for sent in sentences]) > max_sentence_length:
continue
parallel_sentences.append(sentences)
count += 1
if max_sentences is not None and max_sentences > 0 and count >= max_sentences:
break
self.add_dataset(parallel_sentences, weight=weight, max_sentences=max_sentences, max_sentence_length=max_sentence_length)
def add_dataset(self, parallel_sentences: List[List[str]], weight: int = 100, max_sentences: int = None, max_sentence_length: int = 128):
sentences_map = {}
for sentences in parallel_sentences:
if max_sentence_length is not None and max_sentence_length > 0 and max([len(sent) for sent in sentences]) > max_sentence_length:
continue
source_sentence = sentences[0]
if source_sentence not in sentences_map:
sentences_map[source_sentence] = set()
for sent in sentences:
sentences_map[source_sentence].add(sent)
if max_sentences is not None and max_sentences > 0 and len(sentences_map) >= max_sentences:
break
if len(sentences_map) == 0:
return
self.num_sentences += sum([len(sentences_map[sent]) for sent in sentences_map])
dataset_id = len(self.datasets)
self.datasets.append(list(sentences_map.items()))
self.datasets_iterator.append(0)
self.datasets_tokenized.append(False)
self.dataset_indices.extend([dataset_id] * weight)
def generate_data(self):
source_sentences_list = []
target_sentences_list = []
for data_idx in self.dataset_indices:
src_sentence, trg_sentences = self.next_entry(data_idx)
source_sentences_list.append(src_sentence)
target_sentences_list.append(trg_sentences)
#Generate embeddings
src_embeddings = self.get_embeddings(source_sentences_list)
for src_embedding, trg_sentences in zip(src_embeddings, target_sentences_list):
for trg_sentence in trg_sentences:
self.cache.append([[trg_sentence], src_embedding])
random.shuffle(self.cache)
def next_entry(self, data_idx):
source, target_sentences = self.datasets[data_idx][self.datasets_iterator[data_idx]]
if not self.datasets_tokenized[data_idx]:
target_sentences = [self.student_model.tokenize(sent) for sent in target_sentences]
self.datasets[data_idx][self.datasets_iterator[data_idx]] = [source, target_sentences]
self.datasets_iterator[data_idx] += 1
if self.datasets_iterator[data_idx] >= len(self.datasets[data_idx]): #Restart iterator
self.datasets_iterator[data_idx] = 0
self.datasets_tokenized[data_idx] = True
random.shuffle(self.datasets[data_idx])
return source, target_sentences
def get_embeddings(self, sentences):
if not self.use_embedding_cache:
return self.teacher_model.encode(sentences, batch_size=self.batch_size, show_progress_bar=False, convert_to_numpy=False)
#Use caching
new_sentences = []
for sent in sentences:
if sent not in self.embedding_cache:
new_sentences.append(sent)
if len(new_sentences) > 0:
new_embeddings = self.teacher_model.encode(new_sentences, batch_size=self.batch_size, show_progress_bar=False, convert_to_numpy=False)
for sent, embedding in zip(new_sentences, new_embeddings):
self.embedding_cache[sent] = embedding
return [self.embedding_cache[sent] for sent in sentences]
def __len__(self):
return self.num_sentences
def __getitem__(self, idx):
if len(self.cache) == 0:
self.generate_data()
return self.cache.pop()
| 7,073 | 43.490566 | 153 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/datasets/__init__.py
|
from .sampler import *
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
from .SentencesDataset import SentencesDataset
from .EncodeDataset import EncodeDataset
| 229 | 37.333333 | 62 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/datasets/sampler/LabelSampler.py
|
"""
This file contains sampler functions, that can be used to sample mini-batches with specific properties.
"""
from torch.utils.data import Sampler
import numpy as np
from ...datasets import SentenceLabelDataset
class LabelSampler(Sampler):
"""
This sampler is used for some specific Triplet Losses like BATCH_HARD_TRIPLET_LOSS
or MULTIPLE_NEGATIVES_RANKING_LOSS which require multiple or only one sample from one label per batch.
It draws n consecutive, random and unique samples from one label at a time. This is repeated for each label.
Labels with fewer than n unique samples are ignored.
This also applied to drawing without replacement, once less than n samples remain for a label, it is skipped.
This *DOES NOT* check if there are more labels than the batch is large or if the batch size is divisible
by the samples drawn per label.
"""
def __init__(self, data_source: SentenceLabelDataset, samples_per_label: int = 5,
with_replacement: bool = False):
"""
Creates a LabelSampler for a SentenceLabelDataset.
:param data_source:
the dataset from which samples are drawn
:param samples_per_label:
the number of consecutive, random and unique samples drawn per label
:param with_replacement:
if this is True, then each sample is drawn at most once (depending on the total number of samples per label).
if this is False, then one sample can be drawn in multiple draws, but still not multiple times in the same
drawing.
"""
super().__init__(data_source)
self.data_source = data_source
self.samples_per_label = samples_per_label
self.label_range = np.arange(data_source.num_labels)
self.borders = data_source.groups_right_border
self.with_replacement = with_replacement
np.random.shuffle(self.label_range)
def __iter__(self):
label_idx = 0
count = 0
already_seen = {}
while count < len(self.data_source):
label = self.label_range[label_idx]
if label not in already_seen:
already_seen[label] = set()
left_border = 0 if label == 0 else self.borders[label-1]
right_border = self.borders[label]
if self.with_replacement:
selection = np.arange(left_border, right_border)
else:
selection = [i for i in np.arange(left_border, right_border) if i not in already_seen[label]]
if len(selection) >= self.samples_per_label:
for element_idx in np.random.choice(selection, self.samples_per_label, replace=False):
count += 1
already_seen[label].add(element_idx)
yield element_idx
label_idx += 1
if label_idx >= len(self.label_range):
label_idx = 0
already_seen = {}
np.random.shuffle(self.label_range)
def __len__(self):
return len(self.data_source)
| 3,097 | 39.763158 | 121 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/datasets/sampler/__init__.py
|
from .LabelSampler import *
| 27 | 27 | 27 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/readers/TripletReader.py
|
from . import InputExample
import csv
import gzip
import os
class TripletReader(object):
"""
Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1),
one positive example (s2) and one negative example (s3)
"""
def __init__(self, dataset_folder, s1_col_idx=0, s2_col_idx=1, s3_col_idx=2, has_header=False, delimiter="\t",
quoting=csv.QUOTE_NONE):
self.dataset_folder = dataset_folder
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.s3_col_idx = s3_col_idx
self.has_header = has_header
self.delimiter = delimiter
self.quoting = quoting
def get_examples(self, filename, max_examples=0):
"""
"""
data = csv.reader(open(os.path.join(self.dataset_folder, filename), encoding="utf-8"), delimiter=self.delimiter,
quoting=self.quoting)
examples = []
if self.has_header:
next(data)
for id, row in enumerate(data):
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
s3 = row[self.s3_col_idx]
examples.append(InputExample(texts=[s1, s2, s3]))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
| 1,336 | 32.425 | 120 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/readers/PairedFilesReader.py
|
from . import InputExample
import csv
import gzip
import os
import gzip
class PairedFilesReader(object):
"""
Reads in the a Pair Dataset, split in two files
"""
def __init__(self, filepaths):
self.filepaths = filepaths
def get_examples(self, max_examples=0):
"""
"""
fIns = []
for filepath in self.filepaths:
fIn = gzip.open(filepath, 'rt', encoding='utf-8') if filepath.endswith('.gz') else open(filepath, encoding='utf-8')
fIns.append(fIn)
examples = []
eof = False
while not eof:
texts = []
for fIn in fIns:
text = fIn.readline()
if text == '':
eof = True
break
texts.append(text)
if eof:
break;
examples.append(InputExample(guid=str(len(examples)), texts=texts, label=1))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
| 1,058 | 23.068182 | 127 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/readers/NLIDataReader.py
|
from . import InputExample
import csv
import gzip
import os
class NLIDataReader(object):
"""
Reads in the Stanford NLI dataset and the MultiGenre NLI dataset
"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
"""
data_splits specified which data split to use (train, dev, test).
Expects that self.dataset_folder contains the files s1.$data_split.gz, s2.$data_split.gz,
labels.$data_split.gz, e.g., for the train split, s1.train.gz, s2.train.gz, labels.train.gz
"""
s1 = gzip.open(os.path.join(self.dataset_folder, 's1.' + filename),
mode="rt", encoding="utf-8").readlines()
s2 = gzip.open(os.path.join(self.dataset_folder, 's2.' + filename),
mode="rt", encoding="utf-8").readlines()
labels = gzip.open(os.path.join(self.dataset_folder, 'labels.' + filename),
mode="rt", encoding="utf-8").readlines()
examples = []
id = 0
for sentence_a, sentence_b, label in zip(s1, s2, labels):
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence_a, sentence_b], label=self.map_label(label)))
if 0 < max_examples <= len(examples):
break
return examples
@staticmethod
def get_labels():
return {"contradiction": 0, "entailment": 1, "neutral": 2}
def get_num_labels(self):
return len(self.get_labels())
def map_label(self, label):
return self.get_labels()[label.strip().lower()]
| 1,690 | 34.978723 | 113 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/readers/STSDataReader.py
|
from . import InputExample
import csv
import gzip
import os
class STSDataReader:
"""
Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab seperated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(self, dataset_folder, s1_col_idx=0, s2_col_idx=1, score_col_idx=2, delimiter="\t",
quoting=csv.QUOTE_NONE, normalize_scores=True, min_score=0, max_score=5):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""
filename specified which data split to use (train.csv, dev.csv, test.csv).
"""
filepath = os.path.join(self.dataset_folder, filename)
with gzip.open(filepath, 'rt', encoding='utf8') if filename.endswith('.gz') else open(filepath, encoding="utf-8") as fIn:
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename+str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""
Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(self, dataset_folder, s1_col_idx=5, s2_col_idx=6, score_col_idx=4, delimiter="\t",
quoting=csv.QUOTE_NONE, normalize_scores=True, min_score=0, max_score=5):
super().__init__(dataset_folder=dataset_folder, s1_col_idx=s1_col_idx, s2_col_idx=s2_col_idx, score_col_idx=score_col_idx, delimiter=delimiter,
quoting=quoting, normalize_scores=normalize_scores, min_score=min_score, max_score=max_score)
| 2,655 | 48.185185 | 185 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/readers/__init__.py
|
from .InputExample import InputExample
from .LabelSentenceReader import LabelSentenceReader
from .NLIDataReader import NLIDataReader
from .STSDataReader import STSDataReader, STSBenchmarkDataReader
from .TripletReader import TripletReader
| 238 | 46.8 | 64 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/readers/LabelSentenceReader.py
|
from . import InputExample
import csv
import gzip
import os
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers"""
def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator='\t'):
self.folder = folder
self.label_map = {}
self.label_col_idx = label_col_idx
self.sentence_col_idx = sentence_col_idx
self.separator = separator
def get_examples(self, filename, max_examples=0):
examples = []
id = 0
for line in open(os.path.join(self.folder, filename), encoding="utf-8"):
splits = line.strip().split(self.separator)
label = splits[self.label_col_idx]
sentence = splits[self.sentence_col_idx]
if label not in self.label_map:
self.label_map[label] = len(self.label_map)
label_id = self.label_map[label]
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence], label=label_id))
if 0 < max_examples <= id:
break
return examples
| 1,270 | 32.447368 | 86 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/readers/InputExample.py
|
from typing import Union, List
class InputExample:
"""
Structure for one input example with texts, the label and a unique id
"""
def __init__(self, guid: str = '', texts: List[str] = None, texts_tokenized: List[List[int]] = None, label: Union[int, float] = 0):
"""
Creates one InputExample with the given texts, guid and label
:param guid
id for the example
:param texts
the texts for the example. Note, str.strip() is called on the texts
:param texts_tokenized
Optional: Texts that are already tokenized. If texts_tokenized is passed, texts must not be passed.
:param label
the label for the example
"""
self.guid = guid
self.texts = [text.strip() for text in texts] if texts is not None else texts
self.texts_tokenized = texts_tokenized
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
| 1,037 | 36.071429 | 135 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/SimSiamLoss.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
LARGE_NUM = 1e9
class MLP1(nn.Module):
def __init__(self, hidden_dim=2048, norm=None, activation="relu"): # bottleneck structure
super().__init__()
''' page 3 baseline setting
Prediction MLP. The prediction MLP (h) has BN applied
to its hidden fc layers. Its output fc does not have BN
(ablation in Sec. 4.4) or ReLU. This MLP has 2 layers.
The dimension of h’s input and output (z and p) is d = 2048,
and h’s hidden layer’s dimension is 512, making h a
bottleneck structure (ablation in supplement).
'''
if activation == "relu":
activation_layer = nn.ReLU()
elif activation == "leakyrelu":
activation_layer = nn.LeakyReLU()
elif activation == "tanh":
activation_layer = nn.Tanh()
elif activation == "sigmoid":
activation_layer = nn.Sigmoid()
else:
raise ValueError(f"Unknown activation function {hidden_activation}")
if norm:
if norm=='bn':
norm_layer = nn.BatchNorm1d
else:
norm_layer = nn.LayerNorm
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
norm_layer(hidden_dim),
nn.ReLU(inplace=True)
)
else:
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Linear(hidden_dim, hidden_dim)
"""
Adding BN to the output of the prediction MLP h does not work
well (Table 3d). We find that this is not about collapsing.
The training is unstable and the loss oscillates.
"""
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
class SimSiamLoss(nn.Module):
"""
This loss was used in our SBERT publication (https://arxiv.org/abs/1908.10084) to train the SentenceTransformer
model on NLI data. It adds a softmax classifier on top of the output of two transformer networks.
:param model: SentenceTransformer model
:param sentence_embedding_dimension: Dimension of your sentence embeddings
:param num_labels: Number of different labels
:param concatenation_sent_rep: Concatenate vectors u,v for the softmax classifier?
:param concatenation_sent_difference: Add abs(u-v) for the softmax classifier?
:param concatenation_sent_multiplication: Add u*v for the softmax classifier?
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(InputExample(texts=['First pair, sent A', 'First pair, sent B'], label=0),
InputExample(texts=['Second Pair, sent A', 'Second Pair, sent B'], label=3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=train_num_labels)
"""
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False,
concatenation_sent_max_square: bool = False, # 拼接两个句子表示的max-square(如寐建议的一个trick)
data_augmentation_strategy: str = "normal", # 数据增强策略,可选项:不进行增强“none”、对抗“adv”、mean和max pooling对比“meanmax”、TODO
projection_norm_type: str = "ln",
do_hidden_normalization: bool = True, # 进行对比损失之前,是否对句子表示做正则化
temperature: float = 1.0, # 对比损失中的温度系数,仅对于交叉熵损失有效
mapping_to_small_space: int = None, # 是否将句子表示映射到一个较小的向量空间进行对比损失(类似SimCLR),及其映射的最终维度
projection_hidden_dim: int = None, # 定义MLP的中间维度大小,对于上面两个选项(mapping & predictor)均有用
projection_use_batch_norm: bool = None, # 定义是否在MLP的中间层添加BatchNorm,对于上面两个选项(mapping & predictor)均有用
):
super(SimSiamLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
self.concatenation_sent_max_square = concatenation_sent_max_square
self.data_augmentation_strategy = data_augmentation_strategy
self.do_hidden_normalization = do_hidden_normalization
self.temperature = temperature
self.predictor = MLP1(hidden_dim=sentence_embedding_dimension, norm=projection_norm_type)
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 1
if concatenation_sent_multiplication:
num_vectors_concatenated += 1
if concatenation_sent_max_square:
num_vectors_concatenated += 1
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def _reps_to_output(self, rep_a: torch.Tensor, rep_b: torch.Tensor):
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
if self.concatenation_sent_multiplication:
vectors_concat.append(rep_a * rep_b)
if self.concatenation_sent_max_square:
vectors_concat.append(torch.max(rep_a, rep_b).pow(2))
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
return output
def _contrastive_loss_forward(self,
hidden1: torch.Tensor,
hidden2: torch.Tensor,
hidden_norm: bool = True,
temperature: float = 1.0):
"""
hidden1/hidden2: (bsz, dim)
"""
batch_size, hidden_dim = hidden1.shape
hidden1 = torch.nn.functional.normalize(hidden1, p=2, dim=-1)
hidden2 = torch.nn.functional.normalize(hidden2, p=2, dim=-1)
scores = torch.einsum("bd,bd->b", hidden1, hidden2)
neg_cosine_loss = -1.0 * scores.mean()
return neg_cosine_loss
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
if self.data_augmentation_strategy == "meanmax": # 使用mean-max pooling的对比
rep_dicts = [self.model(sentence_feature) for sentence_feature in sentence_features]
reps_mean = [rep_dict['pad_mean_tokens'] for rep_dict in rep_dicts]
reps_max = [rep_dict['pad_max_tokens'] for rep_dict in rep_dicts]
rep_a_view1, rep_a_view2 = reps_mean[0], reps_max[0]
elif self.data_augmentation_strategy == "normal": # 最原始的版本,只需获取rep_a和rep_b即可 # TODO: 在这里添加更多的数据增强策略
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a = reps[0]
rep_a_view1, rep_a_view2 = rep_a, rep_a
else:
raise ValueError("Invalid data augmentation strategy")
# add predictor
rep_a_view1_normal, rep_a_view1_stop = self.predictor(rep_a_view1), rep_a_view1.detach()
rep_a_view2_normal, rep_a_view2_stop = self.predictor(rep_a_view2), rep_a_view2.detach()
final_loss = 0
contrastive_loss_a = self._contrastive_loss_forward(rep_a_view1_normal, rep_a_view2_stop, hidden_norm=self.do_hidden_normalization, temperature=self.temperature)
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_a", contrastive_loss_a.item(), global_step=self.model.global_step)
contrastive_loss_b = self._contrastive_loss_forward(rep_a_view2_normal, rep_a_view1_stop, hidden_norm=self.do_hidden_normalization, temperature=self.temperature)
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_b", contrastive_loss_b.item(), global_step=self.model.global_step)
final_loss = contrastive_loss_b + contrastive_loss_a
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_total", final_loss.item(), global_step=self.model.global_step)
return final_loss
| 9,366 | 47.786458 | 169 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/CosineSimilarityLoss.py
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
class CosineSimilarityLoss(nn.Module):
"""
CosineSimilarityLoss expects, that the InputExamples consists of two texts and a float label.
It computes the vectors u = model(input_text[0]) and v = model(input_text[1]) and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ||input_label - cos_score_transformation(cosine_sim(u,v))||_2.
:param model: SentenceTranformer model
:param loss_fct: Which pytorch loss function should be used to compare the cosine_similartiy(u,v) with the input_label? By default, MSE: ||input_label - cosine_sim(u,v)||_2
:param cos_score_transformation: The cos_score_transformation function is applied on top of cosine_similarity. By default, the identify function is used (i.e. no change).
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, InputExample, losses
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['My first sentence', 'My second sentence'], label=0.8),
InputExample(texts=['Another pair', 'Unrelated sentence'], label=0.3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, loss_fct = nn.MSELoss(), cos_score_transformation=nn.Identity()):
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.view(-1))
| 2,213 | 50.488372 | 177 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/AdvCLSoftmaxLoss_single_stream_backup.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
LARGE_NUM = 1e9
def scheduler0(cur_step, global_step):
return 1.0, 1.0
def scheduler1(cur_step, global_step):
"""global_step=9814"""
if cur_step < 7950:
return 1.0, 1.0
else:
return 0.0, 1.0
def scheduler2(cur_step, global_step):
"""global_step=9814"""
if cur_step < 7950:
return 1.0, 1.0
else:
return 0.01, 1.0
def scheduler3(cur_step, global_step):
"""global_step=9814"""
if cur_step < 7900:
return 1.0, 1.0
else:
return 0.0, 1.0
def scheduler4(cur_step, global_step):
"""global_step=9814"""
if cur_step < 7900:
return 1.0, 1.0
else:
return 0.01, 1.0
def scheduler5(cur_step, global_step):
"""global_step=9814"""
if cur_step < 8814:
return 1.0, 1.0
else:
return 0.0, 0.1
def scheduler6(cur_step, global_step):
"""global_step=9814"""
if cur_step < 8814:
return 1.0, 1.0
else:
return 0.0, 0.03
def scheduler7(cur_step, global_step):
"""global_step=9814"""
if cur_step < 8814:
return 1.0, 1.0
else:
return 0.1, 0.1
def scheduler8(cur_step, global_step):
"""global_step=9814"""
if cur_step < 8814:
return 1.0, 1.0
else:
return 0.1, 0.03
def scheduler9(cur_step, global_step):
level = cur_step // 1000
rate = pow(0.5, level)
return rate, 1.0
def scheduler10(cur_step, global_step):
level = cur_step // 1000
rate = pow(0.3, level)
return rate, 1.0
def scheduler11(cur_step, global_step):
level = cur_step // 1000
rate1 = pow(0.5, level)
rate2 = pow(0.7, level)
return rate1, rate2
def scheduler12(cur_step, global_step):
level = cur_step // 3000
rate = pow(0.464, level)
return rate, 1.0
def scheduler13(cur_step, global_step):
level = cur_step // 3000
rate = pow(0.215, level)
return rate, 1.0
def scheduler14(cur_step, global_step):
level = cur_step // 3000
rate = pow(0.1, level)
return rate, 1.0
def scheduler15(cur_step, global_step):
level = cur_step // 4000
rate = pow(0.316, level)
return rate, 1.0
def scheduler16(cur_step, global_step):
level = cur_step // 4000
rate = pow(0.1, level)
return rate, 1.0
def scheduler17(cur_step, global_step):
level = cur_step // 4000
rate = pow(0.032, level)
return rate, 1.0
def scheduler18(cur_step, global_step):
if cur_step < int(global_step * 0.8):
return 1.0, 1.0
else:
return 0.0, 1.0
LOSS_RATE_SCHEDULERS = [
scheduler0,
scheduler1,
scheduler2,
scheduler3,
scheduler4,
scheduler5,
scheduler6,
scheduler7,
scheduler8,
scheduler9,
scheduler10,
scheduler11,
scheduler12,
scheduler13,
scheduler14,
scheduler15,
scheduler16,
scheduler17,
scheduler18
]
class MLP(torch.nn.Module):
def __init__(self,
input_dim: int,
hidden_dim: int,
output_dim: int,
hidden_activation: str = "relu",
use_bn: bool = False,
use_bias: bool = True):
super(MLP, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.linear1 = torch.nn.Linear(input_dim, hidden_dim, bias=use_bias and not use_bn)
self.linear2 = torch.nn.Linear(hidden_dim, output_dim, bias=use_bias)
if hidden_activation == "relu":
self.activation = torch.nn.ReLU()
elif hidden_activation == "leakyrelu":
self.activation = torch.nn.LeakyReLU()
elif hidden_activation == "tanh":
self.activation = torch.nn.Tanh()
elif hidden_activation == "sigmoid":
self.activation = torch.nn.Sigmoid()
else:
raise ValueError(f"Unknown activation function {hidden_activation}")
self.use_bn = use_bn
if use_bn:
self.bn = torch.nn.BatchNorm1d(hidden_dim)
def forward(self, x: torch.Tensor):
hidden = self.linear1(x)
if self.use_bn:
hidden = self.bn(hidden)
activated_hidden = self.activation(hidden)
return self.linear2(activated_hidden)
class prediction_MLP(nn.Module):
def __init__(self, hidden_dim=2048, norm=None): # bottleneck structure
super().__init__()
''' page 3 baseline setting
Prediction MLP. The prediction MLP (h) has BN applied
to its hidden fc layers. Its output fc does not have BN
(ablation in Sec. 4.4) or ReLU. This MLP has 2 layers.
The dimension of h’s input and output (z and p) is d = 2048,
and h’s hidden layer’s dimension is 512, making h a
bottleneck structure (ablation in supplement).
'''
if norm:
if norm=='bn':
MLPNorm = nn.BatchNorm1d
else:
MLPNorm = nn.LayerNorm
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
MLPNorm(hidden_dim),
nn.ReLU(inplace=True)
)
else:
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Linear(hidden_dim, hidden_dim)
"""
Adding BN to the output of the prediction MLP h does not work
well (Table 3d). We find that this is not about collapsing.
The training is unstable and the loss oscillates.
"""
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
def distance_to_center_mse_loss(x: torch.Tensor):
"""x: shape (batch_size, hidden_dim)"""
bsz, hidden = x.shape
center = torch.mean(x, dim=0)
to_center_dist = torch.norm(x - center, p=2, dim=-1)
return to_center_dist.pow(2).mean()
class AdvCLSoftmaxLoss(nn.Module):
"""
This loss was used in our SBERT publication (https://arxiv.org/abs/1908.10084) to train the SentenceTransformer
model on NLI data. It adds a softmax classifier on top of the output of two transformer networks.
:param model: SentenceTransformer model
:param sentence_embedding_dimension: Dimension of your sentence embeddings
:param num_labels: Number of different labels
:param concatenation_sent_rep: Concatenate vectors u,v for the softmax classifier?
:param concatenation_sent_difference: Add abs(u-v) for the softmax classifier?
:param concatenation_sent_multiplication: Add u*v for the softmax classifier?
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(InputExample(texts=['First pair, sent A', 'First pair, sent B'], label=0),
InputExample(texts=['Second Pair, sent A', 'Second Pair, sent B'], label=3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=train_num_labels)
"""
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False,
concatenation_sent_max_square: bool = False, # 拼接两个句子表示的max-square(如寐建议的一个trick)
normal_loss_stop_grad: bool = False, # 对于传统损失(句子对分类)是否加stop-grad
use_adversarial_training: bool = False, # 是否加对抗损失
adversarial_loss_rate: float = 1.0, # 对抗损失的系数
do_noise_normalization: bool = True, # 是否将对抗扰动(噪声)正则化
noise_norm: float = 0.01, # 对抗扰动的大小
normal_normal_weight: float = 0.25, # normal to normal句子对分类损失的系数
normal_adv_weight: float = 0.25, # normal to adv句子对分类损失的系数
adv_normal_weight: float = 0.25, # adv to normal句子对分类损失的系数
adv_adv_weight: float = 0.25, # adv to adv句子对分类损失的系数
adv_loss_stop_grad: bool = False, # 对于对抗损失(一系列的句子对分类)是否加stop-grad
loss_rate_scheduler: int = 0, # 用来控制对比损失和主任务损失相对大小
use_contrastive_loss: bool = False, # 是否加对比损失
data_augmentation_strategy: str = "adv", # 数据增强策略,可选项:不进行增强“none”、对抗“adv”、mean和max pooling对比“meanmax”、随机打乱token位置信息“shuffle”、随机去除embedding某些维度信息“cutoff”、shuffle和cutoff策略分别生成两个view“shuffle-cutoff”
cutoff_direction: str = None, # 如果使用cutoff作为数据增强方法,该参数表示cutoff是对行进行还是对列进行
cutoff_rate: float = None, # 如果使用cutoff作为数据增强方法,该参数表示cutoff的比率(0到1之间,类似dropout)
contrastive_loss_only: bool = False, # 只使用对比损失进行(无监督)训练
no_pair: bool = False, # 不使用配对的语料,避免先验信息
contrastive_loss_type: str = "nt_xent", # 加对比损失的形式(“nt_xent” or “cosine”)
contrastive_loss_rate: float = 1.0, # 对比损失的系数
regularization_term_rate: float = 0.0, # 正则化项(同一个batch内分布的方差)所占的比率大小
do_hidden_normalization: bool = True, # 进行对比损失之前,是否对句子表示做正则化
temperature: float = 1.0, # 对比损失中的温度系数,仅对于交叉熵损失有效
mapping_to_small_space: int = None, # 是否将句子表示映射到一个较小的向量空间进行对比损失(类似SimCLR),及其映射的最终维度
add_contrastive_predictor: str = None, # 是否在对比学习中,将句子表示非线性映射到同等维度(类似SimSiam),以及将其添加到哪一端(normal or adv)
add_projection: bool = False, # 在predictor前面加一个映射网络
projection_norm_type: str = None, # 在predictor前面加的映射网络的norm type,取值为(None, 'bn', 'ln')
projection_hidden_dim: int = None, # 定义MLP的中间维度大小,对于上面两个选项(mapping & predictor)均有用
projection_use_batch_norm: bool = None, # 定义是否在MLP的中间层添加BatchNorm,对于上面两个选项(mapping & predictor)均有用
contrastive_loss_stop_grad: str = None # 对于对比损失是否加stop-grad,以及加到哪一端(normal or adv)
):
super(AdvCLSoftmaxLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
self.concatenation_sent_max_square = concatenation_sent_max_square
self.normal_loss_stop_grad = normal_loss_stop_grad
self.use_adversarial_training = use_adversarial_training
self.adversarial_loss_rate = adversarial_loss_rate
self.do_noise_normalization = do_noise_normalization
self.noise_norm = noise_norm
self.normal_normal_weight = normal_normal_weight
self.normal_adv_weight = normal_adv_weight
self.adv_normal_weight = adv_normal_weight
self.adv_adv_weight = adv_adv_weight
self.adv_loss_stop_grad = adv_loss_stop_grad
self.loss_rate_scheduler = loss_rate_scheduler
self.use_contrastive_loss = use_contrastive_loss
assert data_augmentation_strategy in ("none", "adv", "meanmax", "shuffle", "cutoff", "shuffle-cutoff", "shuffle+cutoff", "shuffle_embeddings")
if data_augmentation_strategy in ("cutoff", "shuffle-cutoff", "shuffle+cutoff"):
assert cutoff_direction is not None and cutoff_direction in ("row", "column", "random")
assert cutoff_rate is not None and 0.0 < cutoff_rate < 1.0
self.cutoff_direction = cutoff_direction
self.cutoff_rate = cutoff_rate
self.data_augmentation_strategy = data_augmentation_strategy
self.contrastive_loss_only = contrastive_loss_only
self.no_pair = no_pair
if no_pair:
assert use_contrastive_loss and contrastive_loss_only
assert contrastive_loss_type in ("nt_xent", "cosine")
self.contrastive_loss_type = contrastive_loss_type
self.contrastive_loss_rate = contrastive_loss_rate
self.regularization_term_rate = regularization_term_rate
self.do_hidden_normalization = do_hidden_normalization
self.temperature = temperature
self.add_projection = add_projection
if add_projection:
assert projection_norm_type in (None, "ln", "bn")
self.projection_head = prediction_MLP(hidden_dim=sentence_embedding_dimension, norm=projection_norm_type)
if mapping_to_small_space is not None:
assert add_contrastive_predictor is None
assert projection_hidden_dim is not None
assert projection_use_batch_norm is not None
self.projection_mode = "both"
self.projection = MLP(sentence_embedding_dimension, projection_hidden_dim, mapping_to_small_space, use_bn=projection_use_batch_norm)
else:
self.projection_mode = "none"
if add_contrastive_predictor is not None:
assert add_contrastive_predictor in ("normal", "adv")
assert mapping_to_small_space is None
assert projection_hidden_dim is not None
assert projection_use_batch_norm is not None
self.projection_mode = add_contrastive_predictor
self.projection = MLP(sentence_embedding_dimension, projection_hidden_dim, sentence_embedding_dimension, use_bn=projection_use_batch_norm)
else:
self.projection_mode = "none"
assert contrastive_loss_stop_grad in (None, "normal", "adv")
self.contrastive_loss_stop_grad = contrastive_loss_stop_grad
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 1
if concatenation_sent_multiplication:
num_vectors_concatenated += 1
if concatenation_sent_max_square:
num_vectors_concatenated += 1
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def _reps_to_output(self, rep_a: torch.Tensor, rep_b: torch.Tensor):
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
if self.concatenation_sent_multiplication:
vectors_concat.append(rep_a * rep_b)
if self.concatenation_sent_max_square:
vectors_concat.append(torch.max(rep_a, rep_b).pow(2))
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
return output
def _contrastive_loss_forward(self,
hidden1: torch.Tensor,
hidden2: torch.Tensor,
hidden_norm: bool = True,
temperature: float = 1.0):
"""
hidden1/hidden2: (bsz, dim)
"""
batch_size, hidden_dim = hidden1.shape
if self.add_projection:
hidden1 = self.projection_head(hidden1)
hidden2 = self.projection_head(hidden2)
if self.projection_mode in ("both", "normal"):
hidden1 = self.projection(hidden1)
if self.projection_mode in ("both", "adv"):
hidden2 = self.projection(hidden2)
if self.contrastive_loss_type == "cosine":
hidden1 = torch.nn.functional.normalize(hidden1, p=2, dim=-1)
hidden2 = torch.nn.functional.normalize(hidden2, p=2, dim=-1)
scores = torch.einsum("bd,bd->b", hidden1, hidden2)
neg_cosine_loss = -1.0 * scores.mean()
return neg_cosine_loss
elif self.contrastive_loss_type == "nt_xent":
if hidden_norm:
hidden1 = torch.nn.functional.normalize(hidden1, p=2, dim=-1)
hidden2 = torch.nn.functional.normalize(hidden2, p=2, dim=-1)
hidden1_large = hidden1
hidden2_large = hidden2
labels = torch.arange(0, batch_size).to(device=hidden1.device)
masks = torch.nn.functional.one_hot(torch.arange(0, batch_size), num_classes=batch_size).to(device=hidden1.device, dtype=torch.float)
logits_aa = torch.matmul(hidden1, hidden1_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = torch.matmul(hidden2, hidden2_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = torch.matmul(hidden1, hidden2_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_ba = torch.matmul(hidden2, hidden1_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
loss_a = torch.nn.functional.cross_entropy(torch.cat([logits_ab, logits_aa], dim=1), labels)
loss_b = torch.nn.functional.cross_entropy(torch.cat([logits_ba, logits_bb], dim=1), labels)
loss = loss_a + loss_b
return loss
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
if not self.training: # 验证阶段或预测阶段
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
output = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fct(output, labels.view(-1))
return loss
else:
return reps, output
elif not self.use_adversarial_training and not self.use_contrastive_loss: # 仅使用传统的监督训练方法(baseline设定下)
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
if self.normal_loss_stop_grad:
rep_b = rep_b.detach()
output = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fct(output, labels.view(-1))
return loss
else:
return reps, output
else: # 使用对抗训练或对比损失训练
total_step, cur_step = self.model.num_steps_total, self.model.global_step
adv_rate, cl_rate = LOSS_RATE_SCHEDULERS[self.loss_rate_scheduler](cur_step, total_step)
# data augmentation generation
if self.use_adversarial_training or (self.use_contrastive_loss and self.data_augmentation_strategy == "adv"): # 若需要用到对抗训练,或对比学习需要生产对抗样本做数据增强,就生成对抗样本
# 1. normal forward
sentence_feature_a, sentence_feature_b = sentence_features
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep_a = self.model(sentence_feature_a)['sentence_embedding']
embedding_output_a = self.model[0].auto_model.get_most_recent_embedding_output()
rep_b = self.model(sentence_feature_b)['sentence_embedding']
embedding_output_b = self.model[0].auto_model.get_most_recent_embedding_output()
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
output = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
normal_loss = loss_fct(output, labels.view(-1))
# 2. adversarial backward
embedding_output_a.retain_grad()
embedding_output_b.retain_grad()
normal_loss.backward(retain_graph=True)
unnormalized_noise_a = embedding_output_a.grad.detach_()
unnormalized_noise_b = embedding_output_b.grad.detach_()
for p in self.parameters():
if p.grad is not None:
p.grad.detach_()
p.grad.zero_() # clear the gradient on parameters
if self.do_noise_normalization: # do normalization
norm_a = unnormalized_noise_a.norm(p=2, dim=-1)
normalized_noise_a = unnormalized_noise_a / (norm_a.unsqueeze(dim=-1) + 1e-10) # add 1e-10 to avoid NaN
norm_b = unnormalized_noise_b.norm(p=2, dim=-1)
normalized_noise_b = unnormalized_noise_b / (norm_b.unsqueeze(dim=-1) + 1e-10) # add 1e-10 to avoid NaN
else: # no normalization
normalized_noise_a = unnormalized_noise_a
normalized_noise_b = unnormalized_noise_b
noise_a = self.noise_norm * normalized_noise_a
noise_b = self.noise_norm * normalized_noise_b
# 3. adversarial forward
noise_embedding_a = embedding_output_a + noise_a
noise_embedding_b = embedding_output_b + noise_b
self.model[0].auto_model.set_flag("data_aug_adv", True)
self.model[0].auto_model.set_flag("noise_embedding", noise_embedding_a)
adv_rep_a = self.model(sentence_feature_a)['sentence_embedding']
self.model[0].auto_model.set_flag("data_aug_adv", True)
self.model[0].auto_model.set_flag("noise_embedding", noise_embedding_b)
adv_rep_b = self.model(sentence_feature_b)['sentence_embedding']
elif self.use_contrastive_loss and self.data_augmentation_strategy == "meanmax": # 使用mean-max pooling的对比
rep_dicts = [self.model(sentence_feature) for sentence_feature in sentence_features]
reps_mean = [rep_dict['pad_mean_tokens'] for rep_dict in rep_dicts]
if not self.no_pair:
rep_a_mean, rep_b_mean = reps_mean
else:
rep_a_mean, rep_b_mean = reps_mean[0], None
reps_max = [rep_dict['pad_max_tokens'] for rep_dict in rep_dicts]
if not self.no_pair:
rep_a_max, rep_b_max = reps_max
else:
rep_a_max, rep_b_max = reps_max[0], None
elif self.use_contrastive_loss and self.data_augmentation_strategy in ("shuffle", "shuffle_embeddings"): # 随机打乱词序
if not self.no_pair:
sentence_feature_a, sentence_feature_b = sentence_features
else:
sentence_feature_a = sentence_features[0]
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep_a = self.model(sentence_feature_a)['sentence_embedding']
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
if not self.no_pair:
rep_b = self.model(sentence_feature_b)['sentence_embedding']
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
else:
rep_b = None
self.model[0].auto_model.set_flag(f"data_aug_{self.data_augmentation_strategy}", True)
rep_a_shuffle = self.model(sentence_feature_a)['sentence_embedding']
if not self.no_pair:
self.model[0].auto_model.set_flag(f"data_aug_{self.data_augmentation_strategy}", True)
rep_b_shuffle = self.model(sentence_feature_b)['sentence_embedding']
else:
rep_b_shuffle = None
elif self.use_contrastive_loss and self.data_augmentation_strategy == "cutoff": # cutoff数据增强策略
if not self.no_pair:
sentence_feature_a, sentence_feature_b = sentence_features
else:
sentence_feature_a = sentence_features[0]
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep_a = self.model(sentence_feature_a)['sentence_embedding']
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
if not self.no_pair:
rep_b = self.model(sentence_feature_b)['sentence_embedding']
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
else:
rep_b = None
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", self.cutoff_direction)
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", self.cutoff_rate)
rep_a_cutoff = self.model(sentence_feature_a)['sentence_embedding']
if not self.no_pair:
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", self.cutoff_direction)
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", self.cutoff_rate)
rep_b_cutoff = self.model(sentence_feature_b)['sentence_embedding']
else:
rep_b_cutoff = None
elif self.use_contrastive_loss and self.data_augmentation_strategy == "shuffle-cutoff": # 分别用shuffle和cutoff来生成两个view
if not self.no_pair:
sentence_feature_a, sentence_feature_b = sentence_features
else:
sentence_feature_a = sentence_features[0]
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
# shuffle strategy
self.model[0].auto_model.set_flag("data_aug_shuffle", True)
rep_a_shuffle = self.model(sentence_feature_a)['sentence_embedding']
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
if not self.no_pair:
self.model[0].auto_model.set_flag("data_aug_shuffle", True)
rep_b_shuffle = self.model(sentence_feature_b)['sentence_embedding']
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
else:
rep_b_shuffle = None
# cutoff strategy
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", self.cutoff_direction)
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", self.cutoff_rate)
rep_a_cutoff = self.model(sentence_feature_a)['sentence_embedding']
if not self.no_pair:
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", self.cutoff_direction)
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", self.cutoff_rate)
rep_b_cutoff = self.model(sentence_feature_b)['sentence_embedding']
else:
rep_b_cutoff = None
# for supervised loss
rep_a = rep_a_cutoff
rep_b = rep_b_cutoff
elif self.use_contrastive_loss and self.data_augmentation_strategy == "shuffle+cutoff": # 用shuffle和cutoff的组合作为一个view
if not self.no_pair:
sentence_feature_a, sentence_feature_b = sentence_features
else:
sentence_feature_a = sentence_features[0]
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep_a = self.model(sentence_feature_a)['sentence_embedding']
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
if not self.no_pair:
rep_b = self.model(sentence_feature_b)['sentence_embedding']
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
else:
rep_b = None
self.model[0].auto_model.set_flag("data_aug_shuffle", True)
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", self.cutoff_direction)
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", self.cutoff_rate)
rep_a_shuffle_cutoff = self.model(sentence_feature_a)['sentence_embedding']
if not self.no_pair:
self.model[0].auto_model.set_flag("data_aug_shuffle", True)
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", self.cutoff_direction)
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", self.cutoff_rate)
rep_b_shuffle_cutoff = self.model(sentence_feature_b)['sentence_embedding']
else:
rep_b_shuffle_cutoff = None
else: # 最原始的版本,只需获取rep_a和rep_b即可 # TODO: 在这里添加更多的数据增强策略
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
if not self.no_pair:
rep_a, rep_b = reps
else:
rep_a, rep_b = reps[0], None
# loss calculation
final_loss = 0
if self.use_adversarial_training:
if self.adv_loss_stop_grad:
rep_b = rep_b.detach()
adv_rep_b = adv_rep_b.detach()
match_output_n_n = self._reps_to_output(rep_a, rep_b)
match_output_n_a = self._reps_to_output(rep_a, adv_rep_b)
match_output_a_n = self._reps_to_output(adv_rep_a, rep_b)
match_output_a_a = self._reps_to_output(adv_rep_a, adv_rep_b)
loss_n_n = loss_fct(match_output_n_n, labels.view(-1))
loss_n_a = loss_fct(match_output_n_a, labels.view(-1))
loss_a_n = loss_fct(match_output_a_n, labels.view(-1))
loss_a_a = loss_fct(match_output_a_a, labels.view(-1))
adv_training_loss = self.normal_normal_weight * loss_n_n + self.normal_adv_weight * loss_n_a + \
self.adv_normal_weight * loss_a_n + self.adv_adv_weight * loss_a_a
final_loss += self.adversarial_loss_rate * adv_training_loss * adv_rate
self.model.tensorboard_writer.add_scalar(f"train_adv_loss", self.adversarial_loss_rate * adv_rate * adv_training_loss.item(), global_step=self.model.global_step)
elif not self.contrastive_loss_only:
match_output_n_n = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
loss_n_n = loss_fct(match_output_n_n, labels.view(-1))
final_loss += loss_n_n * adv_rate
self.model.tensorboard_writer.add_scalar(f"train_normal_loss", loss_n_n.item() * adv_rate, global_step=self.model.global_step)
if self.use_contrastive_loss:
if self.data_augmentation_strategy == "adv":
if self.contrastive_loss_stop_grad == "normal":
rep_a = rep_a.detach()
if not self.no_pair:
rep_b = rep_b.detach()
elif self.contrastive_loss_stop_grad == "adv":
adv_rep_a = adv_rep_a.detach()
if not self.no_pair:
adv_rep_b = adv_rep_b.detach()
else:
assert self.contrastive_loss_stop_grad is None
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = adv_rep_a, adv_rep_b
elif self.data_augmentation_strategy == "none":
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = rep_a, rep_b
elif self.data_augmentation_strategy == "meanmax":
rep_a_view1, rep_b_view1 = rep_a_mean, rep_b_mean
rep_a_view2, rep_b_view2 = rep_a_max, rep_b_max
elif self.data_augmentation_strategy in ("shuffle", "shuffle_embeddings"):
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = rep_a_shuffle, rep_b_shuffle
elif self.data_augmentation_strategy == "cutoff":
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = rep_a_cutoff, rep_b_cutoff
elif self.data_augmentation_strategy == "shuffle-cutoff":
rep_a_view1, rep_b_view1 = rep_a_shuffle, rep_b_shuffle
rep_a_view2, rep_b_view2 = rep_a_cutoff, rep_b_cutoff
elif self.data_augmentation_strategy == "shuffle+cutoff":
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = rep_a_shuffle_cutoff, rep_b_shuffle_cutoff
else:
raise ValueError("Invalid data augmentation strategy")
contrastive_loss_a = self._contrastive_loss_forward(rep_a_view1, rep_a_view2, hidden_norm=self.do_hidden_normalization, temperature=self.temperature)
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_a", contrastive_loss_a.item(), global_step=self.model.global_step)
if not self.no_pair:
contrastive_loss_b = self._contrastive_loss_forward(rep_b_view1, rep_b_view2, hidden_norm=self.do_hidden_normalization, temperature=self.temperature)
else:
contrastive_loss_b = torch.tensor(0.0)
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_b", contrastive_loss_b.item(), global_step=self.model.global_step)
contrastive_loss = contrastive_loss_a + contrastive_loss_b
final_loss += self.contrastive_loss_rate * contrastive_loss * cl_rate
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_total", self.contrastive_loss_rate * cl_rate * contrastive_loss.item(), global_step=self.model.global_step)
if self.regularization_term_rate > 1e-10:
regularization_term = distance_to_center_mse_loss(rep_a_view1) # note: only applied for rep_a_view1
final_loss += self.regularization_term_rate * regularization_term
self.model.tensorboard_writer.add_scalar(f"contrastive_loss_regularization_term", self.regularization_term_rate * regularization_term.item(), global_step=self.model.global_step)
return final_loss
| 36,924 | 50.427577 | 226 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/MSELoss.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
class MSELoss(nn.Module):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
For an example, see the documentation on extending language models to new languages.
"""
def __init__(self, model):
super(MSELoss, self).__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
rep = self.model(sentence_features[0])['sentence_embedding']
return self.loss_fct(rep, labels)
| 888 | 39.409091 | 118 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/TripletLoss.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""
The metric for the triplet loss
"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0).
Margin is an important hyperparameter and needs to be tuned respectively.
For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
:param model: SentenceTransformerModel
:param distance_metric: Function to compute distance between two embeddings. The class TripletDistanceMetric contains common distance metrices that can be used.
:param triplet_margin: The negative should be at least this much further away from the anchor than the positive.
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Anchor 1', 'Positive 1', 'Negative 1']),
InputExample(texts=['Anchor 2', 'Positive 2', 'Negative 2'])]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.TripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5):
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
| 2,728 | 45.254237 | 164 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/BatchHardSoftMarginTripletLoss.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from sentence_transformers.SentenceTransformer import SentenceTransformer
class BatchHardSoftMarginTripletLoss(BatchHardTripletLoss):
"""
BatchHardSoftMarginTripletLoss takes a batch with (label, sentence) pairs and computes the loss for all possible, valid
triplets, i.e., anchor and positive must have the same label, anchor and negative a different label. The labels
must be integers, with same label indicating sentences from the same class. You train dataset
must contain at least 2 examples per label class. The margin is computed automatically.
Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
Blog post: https://omoindrot.github.io/triplet-loss
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Sentence from class 0'], label=0), InputExample(texts=['Another sentence from class 0'], label=0),
InputExample(texts=['Sentence from class 1'], label=1), InputExample(texts=['Sentence from class 2'], label=2)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.BatchHardSoftMarginTripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric=BatchHardTripletLossDistanceFunction.eucledian_distance):
super(BatchHardSoftMarginTripletLoss, self).__init__(model)
self.sentence_embedder = model
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
rep = self.sentence_embedder(sentence_features[0])['sentence_embedding']
return self.batch_hard_triplet_soft_margin_loss(labels, rep)
# Hard Triplet Loss with Soft Margin
# Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
def batch_hard_triplet_soft_margin_loss(self, labels: Tensor, embeddings: Tensor) -> Tensor:
"""Build the triplet loss over a batch of embeddings.
For each anchor, we get the hardest positive and hardest negative to form a triplet.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
Label_Sentence_Triplet: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = self.distance_metric(embeddings)
# For each anchor, get the hardest positive
# First, we need to get a mask for every valid positive (they should have same label)
mask_anchor_positive = BatchHardTripletLoss.get_anchor_positive_triplet_mask(labels).float()
# We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
anchor_positive_dist = mask_anchor_positive * pairwise_dist
# shape (batch_size, 1)
hardest_positive_dist, _ = anchor_positive_dist.max(1, keepdim=True)
# For each anchor, get the hardest negative
# First, we need to get a mask for every valid negative (they should have different labels)
mask_anchor_negative = BatchHardTripletLoss.get_anchor_negative_triplet_mask(labels).float()
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
max_anchor_negative_dist, _ = pairwise_dist.max(1, keepdim=True)
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
# shape (batch_size,)
hardest_negative_dist, _ = anchor_negative_dist.min(1, keepdim=True)
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss with soft margin
#tl = hardest_positive_dist - hardest_negative_dist + margin
#tl[tl < 0] = 0
tl = torch.log1p(torch.exp(hardest_positive_dist - hardest_negative_dist))
triplet_loss = tl.mean()
return triplet_loss
| 4,942 | 54.539326 | 162 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/AdvCLSoftmaxLoss.py
|
import json
import os
import copy
import numpy as np
import torch
from torch import nn, Tensor
from torch.autograd import Function
from typing import Union, Tuple, List, Iterable, Dict, Set, Any, Optional
from ..SentenceTransformer import SentenceTransformer
import logging
LARGE_NUM = 1e9
def scheduler0(cur_step, global_step):
return 1.0, 1.0
def scheduler1(cur_step, global_step):
"""global_step=9814"""
if cur_step < 7950:
return 1.0, 1.0
else:
return 0.0, 1.0
def scheduler2(cur_step, global_step):
"""global_step=9814"""
if cur_step < 7950:
return 1.0, 1.0
else:
return 0.01, 1.0
def scheduler3(cur_step, global_step):
"""global_step=9814"""
if cur_step < 7900:
return 1.0, 1.0
else:
return 0.0, 1.0
def scheduler4(cur_step, global_step):
"""global_step=9814"""
if cur_step < 7900:
return 1.0, 1.0
else:
return 0.01, 1.0
def scheduler5(cur_step, global_step):
"""global_step=9814"""
if cur_step < 8814:
return 1.0, 1.0
else:
return 0.0, 0.1
def scheduler6(cur_step, global_step):
"""global_step=9814"""
if cur_step < 8814:
return 1.0, 1.0
else:
return 0.0, 0.03
def scheduler7(cur_step, global_step):
"""global_step=9814"""
if cur_step < 8814:
return 1.0, 1.0
else:
return 0.1, 0.1
def scheduler8(cur_step, global_step):
"""global_step=9814"""
if cur_step < 8814:
return 1.0, 1.0
else:
return 0.1, 0.03
def scheduler9(cur_step, global_step):
level = cur_step // 1000
rate = pow(0.5, level)
return rate, 1.0
def scheduler10(cur_step, global_step):
level = cur_step // 1000
rate = pow(0.3, level)
return rate, 1.0
def scheduler11(cur_step, global_step):
level = cur_step // 1000
rate1 = pow(0.5, level)
rate2 = pow(0.7, level)
return rate1, rate2
def scheduler12(cur_step, global_step):
level = cur_step // 3000
rate = pow(0.464, level)
return rate, 1.0
def scheduler13(cur_step, global_step):
level = cur_step // 3000
rate = pow(0.215, level)
return rate, 1.0
def scheduler14(cur_step, global_step):
level = cur_step // 3000
rate = pow(0.1, level)
return rate, 1.0
def scheduler15(cur_step, global_step):
level = cur_step // 4000
rate = pow(0.316, level)
return rate, 1.0
def scheduler16(cur_step, global_step):
level = cur_step // 4000
rate = pow(0.1, level)
return rate, 1.0
def scheduler17(cur_step, global_step):
level = cur_step // 4000
rate = pow(0.032, level)
return rate, 1.0
def scheduler18(cur_step, global_step):
if cur_step < int(global_step * 0.8):
return 1.0, 1.0
else:
return 0.0, 1.0
LOSS_RATE_SCHEDULERS = [
scheduler0,
scheduler1,
scheduler2,
scheduler3,
scheduler4,
scheduler5,
scheduler6,
scheduler7,
scheduler8,
scheduler9,
scheduler10,
scheduler11,
scheduler12,
scheduler13,
scheduler14,
scheduler15,
scheduler16,
scheduler17,
scheduler18
]
def get_label(tokenizer, path='./data/labels/bookcorpus/labels.json', low_rate=0.5):
with open(path, 'r') as f:
token_dic = json.load(f)
f.close()
freq_list = [token_dic[i] for i in token_dic]
num = 0
for i in freq_list:
if i == 0:
num += 1
freq_list.sort()
thres = freq_list[num + int((len(freq_list) - num) * low_rate)]
index_dic = {}
freq_label = {}
for k, v in token_dic.items():
index = tokenizer.convert_tokens_to_ids(k)
index_dic[index] = v
freq_label[index] = 1 if v < thres else 0
return freq_label
class MLP(torch.nn.Module):
def __init__(self,
input_dim: int,
hidden_dim: int,
output_dim: int,
hidden_activation: str = "relu",
use_bn: bool = False,
use_bias: bool = True):
super(MLP, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.linear1 = torch.nn.Linear(input_dim, hidden_dim, bias=use_bias and not use_bn)
self.linear2 = torch.nn.Linear(hidden_dim, output_dim, bias=use_bias)
if hidden_activation == "relu":
self.activation = torch.nn.ReLU()
elif hidden_activation == "leakyrelu":
self.activation = torch.nn.LeakyReLU()
elif hidden_activation == "tanh":
self.activation = torch.nn.Tanh()
elif hidden_activation == "sigmoid":
self.activation = torch.nn.Sigmoid()
else:
raise ValueError(f"Unknown activation function {hidden_activation}")
self.use_bn = use_bn
if use_bn:
self.bn = torch.nn.BatchNorm1d(hidden_dim)
def forward(self, x: torch.Tensor):
hidden = self.linear1(x)
if self.use_bn:
hidden = self.bn(hidden)
activated_hidden = self.activation(hidden)
return self.linear2(activated_hidden)
class prediction_MLP(nn.Module):
def __init__(self, hidden_dim=2048, norm=None): # bottleneck structure
super().__init__()
''' page 3 baseline setting
Prediction MLP. The prediction MLP (h) has BN applied
to its hidden fc layers. Its output fc does not have BN
(ablation in Sec. 4.4) or ReLU. This MLP has 2 layers.
The dimension of h’s input and output (z and p) is d = 2048,
and h’s hidden layer’s dimension is 512, making h a
bottleneck structure (ablation in supplement).
'''
if norm:
if norm == 'bn':
MLPNorm = nn.BatchNorm1d
else:
MLPNorm = nn.LayerNorm
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
MLPNorm(hidden_dim),
nn.ReLU(inplace=True)
)
else:
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Linear(hidden_dim, hidden_dim)
"""
Adding BN to the output of the prediction MLP h does not work
well (Table 3d). We find that this is not about collapsing.
The training is unstable and the loss oscillates.
"""
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
class GradientReverseFunction(Function):
"""
重写自定义的梯度计算方式
"""
@staticmethod
def forward(ctx: Any, input: torch.Tensor, coeff: Optional[float] = 1.) -> torch.Tensor:
ctx.coeff = coeff
output = input * 1.0
return output
@staticmethod
def backward(ctx: Any, grad_output: torch.Tensor) -> Tuple[torch.Tensor, Any]:
return grad_output.neg() * ctx.coeff, None
class GRL_Layer(nn.Module):
def __init__(self):
super(GRL_Layer, self).__init__()
def forward(self, *input):
return GradientReverseFunction.apply(*input)
class freqDiscrim(nn.Module):
def __init__(self, tokenizer, path='./data/labels/bookcorpus/labels.json', high_rank=0, low_rate=0.5,
hidden_dim=768, class_num=2): # bottleneck structure
super().__init__()
self.class_num = class_num
self.low_rate = low_rate
self.freq_label = get_label(tokenizer=tokenizer, path=path, low_rate=low_rate)
self.grl = GRL_Layer()
if high_rank != 0:
self.ffnn = nn.Sequential(
nn.Linear(hidden_dim, high_rank * hidden_dim),
nn.Linear(high_rank * hidden_dim, high_rank * hidden_dim),
nn.Linear(high_rank * hidden_dim, hidden_dim),
# nn.LeakyReLU(),
# *[nn.Linear(hidden_dim, hidden_dim) for _ in range(high_rank)],
nn.Linear(hidden_dim, self.class_num),
nn.ReLU(inplace=True)
)
else:
self.ffnn = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.Linear(hidden_dim, self.class_num),
nn.ReLU(inplace=True)
)
self.criterion = nn.CrossEntropyLoss()
def forward(self, hidden1, hidden2, sentence):
pred1 = self.ffnn(self.grl(hidden1)).view(-1, self.class_num).squeeze()
pred2 = self.ffnn(self.grl(hidden2)).view(-1, self.class_num).squeeze()
import copy
label = copy.deepcopy(sentence)
bsz, seq = label.shape
for x in range(bsz):
for y in range(seq):
label[x][y] = self.freq_label[label[x][y].item()]
label = label.view(-1)
loss = self.criterion(pred1, label) + self.criterion(pred2, label)
loss /= 2
return loss
class maskDiscrim(nn.Module):
def __init__(self, tokenizer, path='./data/labels/bookcorpus/labels.json',
low_rate=0.5, hidden_dim=768, class_num=2):
super().__init__()
self.freq_label = get_label(tokenizer=tokenizer, path=path, low_rate=low_rate)
self.ffnn = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.Linear(hidden_dim, class_num),
nn.ReLU(inplace=True)
)
self.criterion = nn.CrossEntropyLoss()
def forward(self, hidden, hidden_mask):
pred = self.ffnn(hidden)
pred_mask = self.ffnn(hidden_mask)
batch_size = hidden.shape[0]
label = torch.tensor([0 for _ in range(batch_size)]).to(pred.device)
label_mask = torch.tensor([1 for _ in range(batch_size)]).to(pred_mask.device)
loss = self.criterion(pred, label) + self.criterion(pred_mask, label_mask)
return loss
def distance_to_center_mse_loss(x: torch.Tensor):
"""x: shape (batch_size, hidden_dim)"""
bsz, hidden = x.shape
center = torch.mean(x, dim=0)
to_center_dist = torch.norm(x - center, p=2, dim=-1)
return to_center_dist.pow(2).mean()
class AdvCLSoftmaxLoss(nn.Module):
"""
This loss was used in our SBERT publication (https://arxiv.org/abs/1908.10084) to train the SentenceTransformer
model on NLI data. It adds a softmax classifier on top of the output of two transformer networks.
:param model: SentenceTransformer model
:param sentence_embedding_dimension: Dimension of your sentence embeddings
:param num_labels: Number of different labels
:param concatenation_sent_rep: Concatenate vectors u,v for the softmax classifier?
:param concatenation_sent_difference: Add abs(u-v) for the softmax classifier?
:param concatenation_sent_multiplication: Add u*v for the softmax classifier?
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(InputExample(texts=['First pair, sent A', 'First pair, sent B'], label=0),
InputExample(texts=['Second Pair, sent A', 'Second Pair, sent B'], label=3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=train_num_labels)
"""
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False,
concatenation_sent_max_square: bool = False, # 拼接两个句子表示的max-square(如寐建议的一个trick)
normal_loss_stop_grad: bool = False, # 对于传统损失(句子对分类)是否加stop-grad
use_adversarial_training: bool = False, # 是否加对抗损失
adversarial_loss_rate: float = 1.0, # 对抗损失的系数
do_noise_normalization: bool = True, # 是否将对抗扰动(噪声)正则化
noise_norm: float = 0.01, # 对抗扰动的大小
normal_normal_weight: float = 0.25, # normal to normal句子对分类损失的系数
normal_adv_weight: float = 0.25, # normal to adv句子对分类损失的系数
adv_normal_weight: float = 0.25, # adv to normal句子对分类损失的系数
adv_adv_weight: float = 0.25, # adv to adv句子对分类损失的系数
adv_loss_stop_grad: bool = False, # 对于对抗损失(一系列的句子对分类)是否加stop-grad
loss_rate_scheduler: int = 0, # 用来控制对比损失和主任务损失相对大小
use_contrastive_loss: bool = False, # 是否加对比损失
data_augmentation_strategy: str = "adv", # 数据增强策略,可选项:不进行增强“none”、对抗“adv”、mean和max pooling对比“meanmax”、随机打乱token位置信息“shuffle”、随机去除embedding某些维度信息“cutoff”、shuffle和cutoff策略分别生成两个view“shuffle-cutoff”
cutoff_direction: str = None, # 如果使用cutoff作为数据增强方法,该参数表示cutoff是对行进行还是对列进行
cutoff_rate: float = None, # 如果使用cutoff作为数据增强方法,该参数表示cutoff的比率(0到1之间,类似dropout)
data_augmentation_strategy_final_1: str = None, # 最终的五种数据增强方法(none、shuffle、token-cutoff、feature-cutoff、dropout),用于生成第一个view
data_augmentation_strategy_final_2: str = None, # 最终的五种数据增强方法(none、shuffle、token-cutoff、feature-cutoff、dropout),用于生成第二个view
cutoff_rate_final_1: float = None, # 与第一个view对应的cutoff/dropout的rate
cutoff_rate_final_2: float = None, # 与第二个view对应的cutoff/dropout的rate
contrastive_loss_only: bool = False, # 只使用对比损失进行(无监督)训练
no_pair: bool = False, # 不使用配对的语料,避免先验信息
contrastive_loss_type: str = "nt_xent", # 加对比损失的形式(“nt_xent” or “cosine”)
contrastive_loss_rate: float = 1.0, # 对比损失的系数
regularization_term_rate: float = 0.0, # 正则化项(同一个batch内分布的方差)所占的比率大小
do_hidden_normalization: bool = True, # 进行对比损失之前,是否对句子表示做正则化
temperature: float = 1.0, # 对比损失中的温度系数,仅对于交叉熵损失有效
mapping_to_small_space: int = None, # 是否将句子表示映射到一个较小的向量空间进行对比损失(类似SimCLR),及其映射的最终维度
add_contrastive_predictor: str = None, # 是否在对比学习中,将句子表示非线性映射到同等维度(类似SimSiam),以及将其添加到哪一端(normal or adv)
add_projection: bool = False, # 在predictor前面加一个映射网络
projection_norm_type: str = None, # 在predictor前面加的映射网络的norm type,取值为(None, 'bn', 'ln')
projection_hidden_dim: int = None, # 定义MLP的中间维度大小,对于上面两个选项(mapping & predictor)均有用
projection_use_batch_norm: bool = None, # 定义是否在MLP的中间层添加BatchNorm,对于上面两个选项(mapping & predictor)均有用
contrastive_loss_stop_grad: str = None, # 对于对比损失是否加stop-grad,以及加到哪一端(normal or adv)
adv_loss_cof: float = 1.0,
mask_loss_cof: float = 1.0,
low_rate: float = 0.5,
high_rank: int = 0):
super(AdvCLSoftmaxLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
self.concatenation_sent_max_square = concatenation_sent_max_square
self.normal_loss_stop_grad = normal_loss_stop_grad
self.adv_loss_cof = adv_loss_cof
self.mask_loss_cof = mask_loss_cof
self.use_adversarial_training = use_adversarial_training
self.adversarial_loss_rate = adversarial_loss_rate
self.do_noise_normalization = do_noise_normalization
self.noise_norm = noise_norm
self.normal_normal_weight = normal_normal_weight
self.normal_adv_weight = normal_adv_weight
self.adv_normal_weight = adv_normal_weight
self.adv_adv_weight = adv_adv_weight
self.adv_loss_stop_grad = adv_loss_stop_grad
self.loss_rate_scheduler = loss_rate_scheduler
self.use_contrastive_loss = use_contrastive_loss
assert data_augmentation_strategy in ("none", "adv", "meanmax", "shuffle", "cutoff", "shuffle-cutoff", "shuffle+cutoff", "shuffle_embeddings", "span")
if data_augmentation_strategy in ("cutoff", "shuffle-cutoff", "shuffle+cutoff"):
assert cutoff_direction is not None and cutoff_direction in ("row", "column", "random")
assert cutoff_rate is not None and 0.0 < cutoff_rate < 1.0
self.cutoff_direction = cutoff_direction
self.cutoff_rate = cutoff_rate
if data_augmentation_strategy in ("span"):
assert cutoff_rate is not None and 0.0 < cutoff_rate < 1.0
self.cutoff_rate = cutoff_rate
self.data_augmentation_strategy = data_augmentation_strategy
self.data_augmentation_strategy_final_1 = data_augmentation_strategy_final_1
self.data_augmentation_strategy_final_2 = data_augmentation_strategy_final_2
self.cutoff_rate_final_1 = cutoff_rate_final_1
self.cutoff_rate_final_2 = cutoff_rate_final_2
self.contrastive_loss_only = contrastive_loss_only
self.no_pair = no_pair
if no_pair:
assert use_contrastive_loss and contrastive_loss_only
assert contrastive_loss_type in ("nt_xent", "cosine")
self.contrastive_loss_type = contrastive_loss_type
self.contrastive_loss_rate = contrastive_loss_rate
self.regularization_term_rate = regularization_term_rate
self.do_hidden_normalization = do_hidden_normalization
self.temperature = temperature
self.add_projection = add_projection
if add_projection:
assert projection_norm_type in (None, "ln", "bn")
self.projection_head = prediction_MLP(hidden_dim=sentence_embedding_dimension, norm=projection_norm_type)
if mapping_to_small_space is not None:
assert add_contrastive_predictor is None
assert projection_hidden_dim is not None
assert projection_use_batch_norm is not None
self.projection_mode = "both"
self.projection = MLP(sentence_embedding_dimension, projection_hidden_dim, mapping_to_small_space, use_bn=projection_use_batch_norm)
else:
self.projection_mode = "none"
if add_contrastive_predictor is not None:
assert add_contrastive_predictor in ("normal", "adv")
assert mapping_to_small_space is None
assert projection_hidden_dim is not None
assert projection_use_batch_norm is not None
self.projection_mode = add_contrastive_predictor
self.projection = MLP(sentence_embedding_dimension, projection_hidden_dim, sentence_embedding_dimension, use_bn=projection_use_batch_norm)
else:
self.projection_mode = "none"
self.labels = get_label(self.model.tokenizer, low_rate=low_rate)
self.discrim = freqDiscrim(self.model.tokenizer, low_rate=low_rate, high_rank=high_rank)
self.maskdiscrim = maskDiscrim(self.model.tokenizer, low_rate=low_rate)
assert contrastive_loss_stop_grad in (None, "normal", "adv")
self.contrastive_loss_stop_grad = contrastive_loss_stop_grad
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 1
if concatenation_sent_multiplication:
num_vectors_concatenated += 1
if concatenation_sent_max_square:
num_vectors_concatenated += 1
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def _reps_to_output(self, rep_a: torch.Tensor, rep_b: torch.Tensor):
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
if self.concatenation_sent_multiplication:
vectors_concat.append(rep_a * rep_b)
if self.concatenation_sent_max_square:
vectors_concat.append(torch.max(rep_a, rep_b).pow(2))
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
return output
def _contrastive_loss_forward(self,
hidden1: torch.Tensor,
hidden2: torch.Tensor,
hidden_norm: bool = True,
temperature: float = 1.0):
"""
hidden1/hidden2: (bsz, dim)
"""
batch_size, hidden_dim = hidden1.shape
if self.add_projection:
hidden1 = self.projection_head(hidden1)
hidden2 = self.projection_head(hidden2)
if self.projection_mode in ("both", "normal"):
hidden1 = self.projection(hidden1)
if self.projection_mode in ("both", "adv"):
hidden2 = self.projection(hidden2)
if self.contrastive_loss_type == "cosine":
hidden1 = torch.nn.functional.normalize(hidden1, p=2, dim=-1)
hidden2 = torch.nn.functional.normalize(hidden2, p=2, dim=-1)
scores = torch.einsum("bd,bd->b", hidden1, hidden2)
neg_cosine_loss = -1.0 * scores.mean()
return neg_cosine_loss
elif self.contrastive_loss_type == "nt_xent":
if hidden_norm:
hidden1 = torch.nn.functional.normalize(hidden1, p=2, dim=-1)
hidden2 = torch.nn.functional.normalize(hidden2, p=2, dim=-1)
hidden1_large = hidden1
hidden2_large = hidden2
labels = torch.arange(0, batch_size).to(device=hidden1.device)
masks = torch.nn.functional.one_hot(torch.arange(0, batch_size), num_classes=batch_size).to(device=hidden1.device, dtype=torch.float)
logits_aa = torch.matmul(hidden1, hidden1_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = torch.matmul(hidden2, hidden2_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = torch.matmul(hidden1, hidden2_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_ba = torch.matmul(hidden2, hidden1_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
loss_a = torch.nn.functional.cross_entropy(torch.cat([logits_ab, logits_aa], dim=1), labels)
loss_b = torch.nn.functional.cross_entropy(torch.cat([logits_ba, logits_bb], dim=1), labels)
loss = loss_a + loss_b
return loss
def _recover_to_origin_keys(self, sentence_feature: Dict[str, Tensor], ori_keys: Set[str]):
return {k: v for k, v in sentence_feature.items() if k in ori_keys}
def _data_aug(self, sentence_feature, name, ori_keys, cutoff_rate):
assert name in ("none", "shuffle", "token_cutoff", "feature_cutoff", "dropout", "span")
sentence_feature = self._recover_to_origin_keys(sentence_feature, ori_keys)
if name == "none":
pass # do nothing
elif name == "shuffle":
self.model[0].auto_model.set_flag("data_aug_shuffle", True)
elif name == "token_cutoff":
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", "row")
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", cutoff_rate)
elif name == "span":
self.model[0].auto_model.set_flag("data_aug_span", True)
self.model[0].auto_model.set_flag("data_aug_span.rate", cutoff_rate)
elif name == "feature_cutoff":
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", "column")
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", cutoff_rate)
elif name == "dropout":
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", "random")
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", cutoff_rate)
rep = self.model(sentence_feature)["sentence_embedding"]
return rep, sentence_feature['token_embeddings']
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, flag):
global sentence_feature_a, rep_a_view1, rep_a_view2, rep_b_view1, rep_b_view2,\
sentence_feature_b, rep_b, rep_a, adv_rep_b, adv_rep_a, loss_fct, rep_a_mean, \
rep_b_mean, rep_a_max, rep_b_max, rep_a_shuffle, rep_b_shuffle, rep_a_cutoff, \
rep_b_cutoff, rep_a_shuffle_cutoff, rep_b_shuffle_cutoff, token_a_view1, token_a_view2, token_b_view1, token_b_view2, ori_feature_keys
if not self.training: # eval and prediction
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
output = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fct(output, labels.view(-1))
return loss
else:
return reps, output
elif not self.use_adversarial_training and not self.use_contrastive_loss: # 仅使用传统的监督训练方法(baseline设定下)
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
if self.normal_loss_stop_grad:
rep_b = rep_b.detach()
output = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fct(output, labels.view(-1))
return loss
else:
return reps, output
else: # 使用对抗训练或对比损失训练
total_step, cur_step = self.model.num_steps_total, self.model.global_step
adv_rate, cl_rate = LOSS_RATE_SCHEDULERS[self.loss_rate_scheduler](cur_step, total_step)
# data augmentation generation
if self.data_augmentation_strategy_final_1 is None:
if self.use_adversarial_training or (self.use_contrastive_loss and self.data_augmentation_strategy == "adv"): # 若需要用到对抗训练,或对比学习需要生产对抗样本做数据增强,就生成对抗样本
# 1. normal forward
sentence_feature_a, sentence_feature_b = sentence_features
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep_a = self.model(sentence_feature_a)['sentence_embedding']
embedding_output_a = self.model[0].auto_model.get_most_recent_embedding_output()
rep_b = self.model(sentence_feature_b)['sentence_embedding']
embedding_output_b = self.model[0].auto_model.get_most_recent_embedding_output()
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
output = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
normal_loss = loss_fct(output, labels.view(-1))
# 2. adversarial backward
embedding_output_a.retain_grad()
embedding_output_b.retain_grad()
normal_loss.backward(retain_graph=True)
unnormalized_noise_a = embedding_output_a.grad.detach_()
unnormalized_noise_b = embedding_output_b.grad.detach_()
for p in self.parameters():
if p.grad is not None:
p.grad.detach_()
p.grad.zero_() # clear the gradient on parameters
if self.do_noise_normalization: # do normalization
norm_a = unnormalized_noise_a.norm(p=2, dim=-1)
normalized_noise_a = unnormalized_noise_a / (norm_a.unsqueeze(dim=-1) + 1e-10) # add 1e-10 to avoid NaN
norm_b = unnormalized_noise_b.norm(p=2, dim=-1)
normalized_noise_b = unnormalized_noise_b / (norm_b.unsqueeze(dim=-1) + 1e-10) # add 1e-10 to avoid NaN
else: # no normalization
normalized_noise_a = unnormalized_noise_a
normalized_noise_b = unnormalized_noise_b
noise_a = self.noise_norm * normalized_noise_a
noise_b = self.noise_norm * normalized_noise_b
# 3. adversarial forward
noise_embedding_a = embedding_output_a + noise_a
noise_embedding_b = embedding_output_b + noise_b
self.model[0].auto_model.set_flag("data_aug_adv", True)
self.model[0].auto_model.set_flag("noise_embedding", noise_embedding_a)
adv_rep_a = self.model(sentence_feature_a)['sentence_embedding']
self.model[0].auto_model.set_flag("data_aug_adv", True)
self.model[0].auto_model.set_flag("noise_embedding", noise_embedding_b)
adv_rep_b = self.model(sentence_feature_b)['sentence_embedding']
elif self.use_contrastive_loss and self.data_augmentation_strategy == "meanmax": # 使用mean-max pooling的对比
rep_dicts = [self.model(sentence_feature) for sentence_feature in sentence_features]
reps_mean = [rep_dict['pad_mean_tokens'] for rep_dict in rep_dicts]
if not self.no_pair:
rep_a_mean, rep_b_mean = reps_mean
else:
rep_a_mean, rep_b_mean = reps_mean[0], None
reps_max = [rep_dict['pad_max_tokens'] for rep_dict in rep_dicts]
if not self.no_pair:
rep_a_max, rep_b_max = reps_max
else:
rep_a_max, rep_b_max = reps_max[0], None
elif self.use_contrastive_loss and self.data_augmentation_strategy in ("shuffle", "shuffle_embeddings"): # 随机打乱词序
if not self.no_pair:
sentence_feature_a, sentence_feature_b = sentence_features
else:
sentence_feature_a = sentence_features[0]
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep_a = self.model(sentence_feature_a)['sentence_embedding']
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
if not self.no_pair:
rep_b = self.model(sentence_feature_b)['sentence_embedding']
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
else:
rep_b = None
self.model[0].auto_model.set_flag(f"data_aug_{self.data_augmentation_strategy}", True)
rep_a_shuffle = self.model(sentence_feature_a)['sentence_embedding']
if not self.no_pair:
self.model[0].auto_model.set_flag(f"data_aug_{self.data_augmentation_strategy}", True)
rep_b_shuffle = self.model(sentence_feature_b)['sentence_embedding']
else:
rep_b_shuffle = None
elif self.use_contrastive_loss and self.data_augmentation_strategy == "cutoff": # cutoff数据增强策略
if not self.no_pair:
sentence_feature_a, sentence_feature_b = sentence_features
else:
sentence_feature_a = sentence_features[0]
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep_a = self.model(sentence_feature_a)['sentence_embedding']
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
if not self.no_pair:
rep_b = self.model(sentence_feature_b)['sentence_embedding']
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
else:
rep_b = None
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", self.cutoff_direction)
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", self.cutoff_rate)
rep_a_cutoff = self.model(sentence_feature_a)['sentence_embedding']
if not self.no_pair:
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", self.cutoff_direction)
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", self.cutoff_rate)
rep_b_cutoff = self.model(sentence_feature_b)['sentence_embedding']
else:
rep_b_cutoff = None
elif self.use_contrastive_loss and self.data_augmentation_strategy == "shuffle-cutoff": # 分别用shuffle和cutoff来生成两个view
if not self.no_pair:
sentence_feature_a, sentence_feature_b = sentence_features
else:
sentence_feature_a = sentence_features[0]
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
# shuffle strategy
self.model[0].auto_model.set_flag("data_aug_shuffle", True)
rep_a_shuffle = self.model(sentence_feature_a)['sentence_embedding']
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
if not self.no_pair:
self.model[0].auto_model.set_flag("data_aug_shuffle", True)
rep_b_shuffle = self.model(sentence_feature_b)['sentence_embedding']
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
else:
rep_b_shuffle = None
# cutoff strategy
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", self.cutoff_direction)
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", self.cutoff_rate)
rep_a_cutoff = self.model(sentence_feature_a)['sentence_embedding']
if not self.no_pair:
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", self.cutoff_direction)
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", self.cutoff_rate)
rep_b_cutoff = self.model(sentence_feature_b)['sentence_embedding']
else:
rep_b_cutoff = None
# for supervised loss
rep_a = rep_a_cutoff
rep_b = rep_b_cutoff
elif self.use_contrastive_loss and self.data_augmentation_strategy == "shuffle+cutoff": # 用shuffle和cutoff的组合作为一个view
if not self.no_pair:
sentence_feature_a, sentence_feature_b = sentence_features
else:
sentence_feature_a = sentence_features[0]
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep_a = self.model(sentence_feature_a)['sentence_embedding']
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
if not self.no_pair:
rep_b = self.model(sentence_feature_b)['sentence_embedding']
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
else:
rep_b = None
self.model[0].auto_model.set_flag("data_aug_shuffle", True)
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", self.cutoff_direction)
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", self.cutoff_rate)
rep_a_shuffle_cutoff = self.model(sentence_feature_a)['sentence_embedding']
if not self.no_pair:
self.model[0].auto_model.set_flag("data_aug_shuffle", True)
self.model[0].auto_model.set_flag("data_aug_cutoff", True)
self.model[0].auto_model.set_flag("data_aug_cutoff.direction", self.cutoff_direction)
self.model[0].auto_model.set_flag("data_aug_cutoff.rate", self.cutoff_rate)
rep_b_shuffle_cutoff = self.model(sentence_feature_b)['sentence_embedding']
else:
rep_b_shuffle_cutoff = None
elif self.use_contrastive_loss and self.data_augmentation_strategy == "span": # 采样span作为一个view
if not self.no_pair:
sentence_feature_a, sentence_feature_b = sentence_features
else:
sentence_feature_a = sentence_features[0]
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep_a = self.model(sentence_feature_a)['sentence_embedding']
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
if not self.no_pair:
rep_b = self.model(sentence_feature_b)['sentence_embedding']
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
else:
rep_b = None
self.model[0].auto_model.set_flag("data_aug_span", True)
self.model[0].auto_model.set_flag("data_aug_span.rate", self.cutoff_rate)
rep_a_cutoff = self.model(sentence_feature_a)['sentence_embedding']
if not self.no_pair:
self.model[0].auto_model.set_flag("data_aug_span", True)
self.model[0].auto_model.set_flag("data_aug_span.rate", self.cutoff_rate)
rep_b_cutoff = self.model(sentence_feature_b)['sentence_embedding']
else:
rep_b_cutoff = None
else: # 最原始的版本,只需获取rep_a和rep_b即可 # TODO: 在这里添加更多的数据增强策略
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
if not self.no_pair:
rep_a, rep_b = reps
else:
rep_a, rep_b = reps[0], None
else:
if not self.no_pair:
sentence_feature_a, sentence_feature_b = sentence_features
else:
sentence_feature_a = sentence_features[0]
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep_a_view1, token_a_view1 = self._data_aug(sentence_feature_a, self.data_augmentation_strategy_final_1,
ori_feature_keys, self.cutoff_rate_final_1)
rep_a_view2, token_a_view2 = self._data_aug(sentence_feature_a, self.data_augmentation_strategy_final_2,
ori_feature_keys, self.cutoff_rate_final_2)
if not self.no_pair:
rep_b_view1, token_b_view1 = self._data_aug(sentence_feature_b,
self.data_augmentation_strategy_final_1,
ori_feature_keys, self.cutoff_rate_final_1)
rep_b_view2, token_b_view2 = self._data_aug(sentence_feature_b,
self.data_augmentation_strategy_final_2,
ori_feature_keys, self.cutoff_rate_final_2)
else:
rep_b_view1 = None
rep_b_view2 = None
# loss calculation
final_loss = 0
if self.use_adversarial_training:
if self.adv_loss_stop_grad:
rep_b = rep_b.detach()
adv_rep_b = adv_rep_b.detach()
match_output_n_n = self._reps_to_output(rep_a, rep_b)
match_output_n_a = self._reps_to_output(rep_a, adv_rep_b)
match_output_a_n = self._reps_to_output(adv_rep_a, rep_b)
match_output_a_a = self._reps_to_output(adv_rep_a, adv_rep_b)
loss_n_n = loss_fct(match_output_n_n, labels.view(-1))
loss_n_a = loss_fct(match_output_n_a, labels.view(-1))
loss_a_n = loss_fct(match_output_a_n, labels.view(-1))
loss_a_a = loss_fct(match_output_a_a, labels.view(-1))
adv_training_loss = self.normal_normal_weight * loss_n_n + self.normal_adv_weight * loss_n_a + \
self.adv_normal_weight * loss_a_n + self.adv_adv_weight * loss_a_a
final_loss += self.adversarial_loss_rate * adv_training_loss * adv_rate
self.model.tensorboard_writer.add_scalar(f"train_adv_loss", self.adversarial_loss_rate * adv_rate * adv_training_loss.item(), global_step=self.model.global_step)
elif not self.contrastive_loss_only:
match_output_n_n = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
loss_n_n = loss_fct(match_output_n_n, labels.view(-1))
final_loss += loss_n_n * adv_rate
self.model.tensorboard_writer.add_scalar(f"train_normal_loss", loss_n_n.item() * adv_rate, global_step=self.model.global_step)
if self.use_contrastive_loss:
if self.data_augmentation_strategy_final_1 is None:
if self.data_augmentation_strategy == "adv":
if self.contrastive_loss_stop_grad == "normal":
rep_a = rep_a.detach()
if not self.no_pair:
rep_b = rep_b.detach()
elif self.contrastive_loss_stop_grad == "adv":
adv_rep_a = adv_rep_a.detach()
if not self.no_pair:
adv_rep_b = adv_rep_b.detach()
else:
assert self.contrastive_loss_stop_grad is None
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = adv_rep_a, adv_rep_b
elif self.data_augmentation_strategy == "none":
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = rep_a, rep_b
elif self.data_augmentation_strategy == "meanmax":
rep_a_view1, rep_b_view1 = rep_a_mean, rep_b_mean
rep_a_view2, rep_b_view2 = rep_a_max, rep_b_max
elif self.data_augmentation_strategy in ("shuffle", "shuffle_embeddings"):
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = rep_a_shuffle, rep_b_shuffle
elif self.data_augmentation_strategy in ("cutoff", "span"):
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = rep_a_cutoff, rep_b_cutoff
elif self.data_augmentation_strategy == "shuffle-cutoff":
rep_a_view1, rep_b_view1 = rep_a_shuffle, rep_b_shuffle
rep_a_view2, rep_b_view2 = rep_a_cutoff, rep_b_cutoff
elif self.data_augmentation_strategy == "shuffle+cutoff":
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = rep_a_shuffle_cutoff, rep_b_shuffle_cutoff
else:
raise ValueError("Invalid data augmentation strategy")
contrastive_loss_a = self._contrastive_loss_forward(rep_a_view1, rep_a_view2,
hidden_norm=self.do_hidden_normalization,
temperature=self.temperature)
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_a", contrastive_loss_a.item(),
global_step=self.model.global_step)
if not self.no_pair:
contrastive_loss_b = self._contrastive_loss_forward(rep_b_view1, rep_b_view2,
hidden_norm=self.do_hidden_normalization,
temperature=self.temperature)
else:
contrastive_loss_b = torch.tensor(0.0)
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_b", contrastive_loss_b.item(),
global_step=self.model.global_step)
contrastive_loss = contrastive_loss_a + contrastive_loss_b
final_loss += self.contrastive_loss_rate * contrastive_loss * cl_rate
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_total", self.contrastive_loss_rate * cl_rate * contrastive_loss.item(), global_step=self.model.global_step)
if self.regularization_term_rate > 1e-10:
regularization_term = distance_to_center_mse_loss(rep_a_view1) # note: only applied for rep_a_view1
final_loss += self.regularization_term_rate * regularization_term
self.model.tensorboard_writer.add_scalar(f"contrastive_loss_regularization_term", self.regularization_term_rate * regularization_term.item(), global_step=self.model.global_step)
if self.adv_loss_cof != 0.0:
# if flag == 0:
# token_a_view1, token_a_view2 = token_a_view1.detach(), token_a_view2.detach()
if self.no_pair:
adv_loss = self.discrim(token_a_view1, token_a_view2, sentence_feature_a['input_ids'])
else:
# if flag == 0:
# token_b_view1, token_b_view2 = token_b_view1.detach(), token_b_view2.detach()
adv_loss = self.discrim(token_a_view1, token_a_view2, sentence_feature_a['input_ids']) + \
self.discrim(token_b_view1, token_b_view2, sentence_feature_b['input_ids'])
adv_loss = adv_loss / 2
if flag != 0:
final_loss += adv_loss * self.adv_loss_cof
self.model.tensorboard_writer.add_scalar(f"train_freq_loss_total", self.adv_loss_cof * adv_loss.item(),
global_step=self.model.global_step)
if self.mask_loss_cof != 0.0:
# mask low frequency words with ''mask_rate''
sentence_feature_mask = copy.deepcopy(sentence_feature_a) # mask low-frequency version
batch_dim, sequence_dim = sentence_feature_mask['input_ids'].shape
for i in range(batch_dim):
for j in range(sequence_dim):
if self.labels[sentence_feature_mask['input_ids'][i, j].item()] == 0 \
and np.random.uniform() < 0.5: # mask_rate
sentence_feature_mask['input_ids'][i, j] = 103 # mask index
rep_mask, token_mask = self._data_aug(sentence_feature_mask, 'none',
ori_feature_keys, self.cutoff_rate_final_2)
mask_loss = self.maskdiscrim(rep_a_view1, rep_mask)
if flag != 0:
final_loss += mask_loss * self.mask_loss_cof
self.model.tensorboard_writer.add_scalar(f"train_mask_loss_total", self.mask_loss_cof * mask_loss.item(),
global_step=self.model.global_step)
return final_loss
| 50,752 | 50.947799 | 226 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/MegaBatchMarginLoss.py
|
from .. import util
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
import torch.nn.functional as F
class MegaBatchMarginLoss(nn.Module):
"""
Loss function inspired from ParaNMT paper:
https://www.aclweb.org/anthology/P18-1042/
Given a large batch (like 500 or more examples) of (anchor_i, positive_i) pairs,
find for each pair in the batch the hardest negative, i.e. find j != i such that cos_sim(anchor_i, positive_j)
is maximal. Then create from this a triplet (anchor_i, positive_i, positive_j) where positive_j
serves as the negative for this triplet.
Train than as with the triplet loss
"""
def __init__(self, model, positive_margin: float = 0.8, negative_margin: float = 0.3, use_mini_batched_version: bool = True, mini_batch_size: bool = 50):
"""
:param model: SentenceTransformerModel
:param positive_margin: Positive margin, cos(anchor, positive) should be > positive_margin
:param negative_margin: Negative margin, cos(anchor, negative) should be < negative_margin
:param use_mini_batched_version: As large batch sizes require a lot of memory, we can use a mini-batched version. We break down the large batch with 500 examples to smaller batches with fewer examples.
:param mini_batch_size: Size for the mini-batches. Should be a devisor for the batch size in your data loader.
"""
super(MegaBatchMarginLoss, self).__init__()
self.model = model
self.positive_margin = positive_margin
self.negative_margin = negative_margin
self.mini_batch_size = mini_batch_size
self.forward = self.forward_mini_batched if use_mini_batched_version else self.forward_non_mini_batched
def forward_mini_batched(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
anchor, positive = sentence_features
feature_names = list(anchor.keys())
with torch.no_grad():
self.model.eval()
all_positive_emb = self.model(positive)['sentence_embedding'].detach()
self.model.train()
diagonal_matrix = torch.eye(len(all_positive_emb), len(all_positive_emb), device=all_positive_emb.device)
#Iterate over the triplets (anchor, positive, hardest_negative) in smaller mini_batch sizes
for start_idx in range(0, len(all_positive_emb), self.mini_batch_size):
end_idx = start_idx + self.mini_batch_size
anchor_emb = self.model({key: anchor[key][start_idx:end_idx] for key in feature_names})['sentence_embedding']
# Find hard negatives. For each anchor, find the hardest negative
# Store them in the triplets (anchor, positive, hardest_negative)
hard_negative_features = {key: [] for key in feature_names}
with torch.no_grad():
cos_scores = util.pytorch_cos_sim(anchor_emb, all_positive_emb)
negative_scores = cos_scores - 2 * diagonal_matrix[start_idx:end_idx] # Remove positive scores along the diagonal, set them to -1 so that they are not selected by the max() operation
negatives_max, negatives_ids = torch.max(negative_scores, dim=1)
for hard_negative_id in negatives_ids:
for key in feature_names:
hard_negative_features[key].append(positive[key][hard_negative_id])
for key in feature_names:
hard_negative_features[key] = torch.stack(hard_negative_features[key])
#Compute differentiable negative and positive embeddings
positive_emb = self.model({key: positive[key][start_idx:end_idx] for key in feature_names})['sentence_embedding']
negative_emb = self.model(hard_negative_features)['sentence_embedding']
assert anchor_emb.shape == positive_emb.shape
assert anchor_emb.shape == negative_emb.shape
#Compute loss
pos_cosine = F.cosine_similarity(anchor_emb, positive_emb)
neg_cosine = F.cosine_similarity(anchor_emb, negative_emb)
losses = F.relu(self.positive_margin - pos_cosine) + F.relu(neg_cosine - self.negative_margin)
losses = losses.mean()
#Backpropagate unless it is the last mini batch. The last mini-batch will be back propagated by the outside train loop
if end_idx < len(cos_scores):
losses.backward()
return losses
##### Non mini-batched version ###
def forward_non_mini_batched(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
embeddings_a, embeddings_b = reps
cos_scores = util.pytorch_cos_sim(embeddings_a, embeddings_b)
positive_scores = torch.diagonal(cos_scores)
negative_scores = cos_scores - (2*torch.eye(*cos_scores.shape, device=cos_scores.device)) # Remove positive scores along the diagonal
negatives_max, _ = torch.max(negative_scores, dim=1)
losses = F.relu(self.positive_margin - positive_scores) + F.relu(negatives_max - self.negative_margin)
return losses.mean()
| 5,229 | 51.828283 | 209 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/BatchHardTripletLoss.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class BatchHardTripletLossDistanceFunction:
"""
This class defines distance functions, that can be used with Batch[All/Hard/SemiHard]TripletLoss
"""
@staticmethod
def cosine_distance(embeddings):
"""
Compute the 2D matrix of cosine distances (1-cosine_similarity) between all embeddings.
"""
return 1 - util.pytorch_cos_sim(embeddings, embeddings)
@staticmethod
def eucledian_distance(embeddings, squared=False):
"""
Compute the 2D matrix of eucledian distances between all the embeddings.
Args:
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: tensor of shape (batch_size, batch_size)
"""
dot_product = torch.matmul(embeddings, embeddings.t())
# Get squared L2 norm for each embedding. We can just take the diagonal of `dot_product`.
# This also provides more numerical stability (the diagonal of the result will be exactly 0).
# shape (batch_size,)
square_norm = torch.diag(dot_product)
# Compute the pairwise distance matrix as we have:
# ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2
# shape (batch_size, batch_size)
distances = square_norm.unsqueeze(0) - 2.0 * dot_product + square_norm.unsqueeze(1)
# Because of computation errors, some distances might be negative so we put everything >= 0.0
distances[distances < 0] = 0
if not squared:
# Because the gradient of sqrt is infinite when distances == 0.0 (ex: on the diagonal)
# we need to add a small epsilon where distances == 0.0
mask = distances.eq(0).float()
distances = distances + mask * 1e-16
distances = (1.0 - mask) * torch.sqrt(distances)
return distances
class BatchHardTripletLoss(nn.Module):
"""
BatchHardTripletLoss takes a batch with (label, sentence) pairs and computes the loss for all possible, valid
triplets, i.e., anchor and positive must have the same label, anchor and negative a different label. It then looks
for the hardest positive and the hardest negatives.
The labels must be integers, with same label indicating sentences from the same class. You train dataset
must contain at least 2 examples per label class. The margin is computed automatically.
Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
Blog post: https://omoindrot.github.io/triplet-loss
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Sentence from class 0'], label=0), InputExample(texts=['Another sentence from class 0'], label=0),
InputExample(texts=['Sentence from class 1'], label=1), InputExample(texts=['Sentence from class 2'], label=2)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.BatchHardTripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric = BatchHardTripletLossDistanceFunction.eucledian_distance, margin: float = 5):
super(BatchHardTripletLoss, self).__init__()
self.sentence_embedder = model
self.triplet_margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
rep = self.sentence_embedder(sentence_features[0])['sentence_embedding']
return self.batch_hard_triplet_loss(labels, rep)
# Hard Triplet Loss
# Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
# Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
# Blog post: https://omoindrot.github.io/triplet-loss
def batch_hard_triplet_loss(self, labels: Tensor, embeddings: Tensor) -> Tensor:
"""Build the triplet loss over a batch of embeddings.
For each anchor, we get the hardest positive and hardest negative to form a triplet.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
Label_Sentence_Triplet: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = self.distance_metric(embeddings)
# For each anchor, get the hardest positive
# First, we need to get a mask for every valid positive (they should have same label)
mask_anchor_positive = BatchHardTripletLoss.get_anchor_positive_triplet_mask(labels).float()
# We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
anchor_positive_dist = mask_anchor_positive * pairwise_dist
# shape (batch_size, 1)
hardest_positive_dist, _ = anchor_positive_dist.max(1, keepdim=True)
# For each anchor, get the hardest negative
# First, we need to get a mask for every valid negative (they should have different labels)
mask_anchor_negative = BatchHardTripletLoss.get_anchor_negative_triplet_mask(labels).float()
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
max_anchor_negative_dist, _ = pairwise_dist.max(1, keepdim=True)
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
# shape (batch_size,)
hardest_negative_dist, _ = anchor_negative_dist.min(1, keepdim=True)
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
tl = hardest_positive_dist - hardest_negative_dist + self.triplet_margin
tl[tl < 0] = 0
triplet_loss = tl.mean()
return triplet_loss
@staticmethod
def get_triplet_mask(labels):
"""Return a 3D mask where mask[a, p, n] is True iff the triplet (a, p, n) is valid.
A triplet (i, j, k) is valid if:
- i, j, k are distinct
- labels[i] == labels[j] and labels[i] != labels[k]
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
"""
# Check that i, j and k are distinct
indices_equal = torch.eye(labels.size(0), device=labels.device).bool()
indices_not_equal = ~indices_equal
i_not_equal_j = indices_not_equal.unsqueeze(2)
i_not_equal_k = indices_not_equal.unsqueeze(1)
j_not_equal_k = indices_not_equal.unsqueeze(0)
distinct_indices = (i_not_equal_j & i_not_equal_k) & j_not_equal_k
label_equal = labels.unsqueeze(0) == labels.unsqueeze(1)
i_equal_j = label_equal.unsqueeze(2)
i_equal_k = label_equal.unsqueeze(1)
valid_labels = ~i_equal_k & i_equal_j
return valid_labels & distinct_indices
@staticmethod
def get_anchor_positive_triplet_mask(labels):
"""Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check that i and j are distinct
indices_equal = torch.eye(labels.size(0), device=labels.device).bool()
indices_not_equal = ~indices_equal
# Check if labels[i] == labels[j]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = labels.unsqueeze(0) == labels.unsqueeze(1)
return labels_equal & indices_not_equal
@staticmethod
def get_anchor_negative_triplet_mask(labels):
"""Return a 2D mask where mask[a, n] is True iff a and n have distinct labels.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check if labels[i] != labels[k]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
return ~(labels.unsqueeze(0) == labels.unsqueeze(1))
| 9,398 | 45.300493 | 162 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/MultipleNegativesRankingLoss.py
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class MultipleNegativesRankingLoss(nn.Module):
"""
This loss expects as input a batch consisting of sentence pairs (a_1, p_1), (a_2, p_2)..., (a_n, p_n)
where we assume that (a_i, p_i) are a positive pair and (a_i, p_j) for i!=j a negative pair.
For each a_i, it uses all other p_j as negative samples, i.e., for a_i, we have 1 positive example (p_i) and
n-1 negative examples (p_j). It then minimizes the negative log-likehood for softmax normalized scores.
This loss function works great to train embeddings for retrieval setups where you have positive pairs (e.g. (query, relevant_doc))
as it will sample in each batch n-1 negative docs randomly.
The performance usually increases with increasing batch sizes.
For more information, see: https://arxiv.org/pdf/1705.00652.pdf
(Efficient Natural Language Response Suggestion for Smart Reply, Section 4.4)
You can also provide one or multiple hard negatives per anchor-positive pair by structering the data like this:
(a_1, p_1, n_1), (a_2, p_2, n_2)
Here, n_1 is a hard negative for (a_1, p_1). The loss will use for the pair (a_i, p_i) all p_j (j!=i) and all n_j as negatives.
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Anchor 1', 'Positive 1']),
InputExample(texts=['Anchor 2', 'Positive 2'])]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.MultipleNegativesRankingLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct = util.pytorch_cos_sim):
"""
:param model: SentenceTransformer model
:param scale: Output of similarity function is multiplied by scale value
:param similarity_fct: similarity function between sentence embeddings. By default, cos_sim. Can also be set to dot product (and then set sclae to 1)
"""
super(MultipleNegativesRankingLoss, self).__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
reps_a = reps[0]
reps_b = torch.cat(reps[1:])
return self.multiple_negatives_ranking_loss(reps_a, reps_b)
def multiple_negatives_ranking_loss(self, embeddings_a: Tensor, embeddings_b: Tensor):
"""
:param embeddings_a:
Tensor of shape (batch_size, embedding_dim)
:param embeddings_b:
Tensor of shape (batch_size, embedding_dim)
:return:
The scalar loss
"""
scores = self.similarity_fct(embeddings_a, embeddings_b) * self.scale
labels = torch.tensor(range(len(scores)), dtype=torch.long, device=scores.device) # Example a[i] should match with b[i]
return self.cross_entropy_loss(scores, labels)
| 3,613 | 47.837838 | 157 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/SimCLRLoss.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
LARGE_NUM = 1e9
class MLP1(nn.Module):
def __init__(self, hidden_dim=2048, norm=None, activation="relu"): # bottleneck structure
super().__init__()
''' page 3 baseline setting
Prediction MLP. The prediction MLP (h) has BN applied
to its hidden fc layers. Its output fc does not have BN
(ablation in Sec. 4.4) or ReLU. This MLP has 2 layers.
The dimension of h’s input and output (z and p) is d = 2048,
and h’s hidden layer’s dimension is 512, making h a
bottleneck structure (ablation in supplement).
'''
if activation == "relu":
activation_layer = nn.ReLU()
elif activation == "leakyrelu":
activation_layer = nn.LeakyReLU()
elif activation == "tanh":
activation_layer = nn.Tanh()
elif activation == "sigmoid":
activation_layer = nn.Sigmoid()
else:
raise ValueError(f"Unknown activation function {hidden_activation}")
if norm:
if norm=='bn':
norm_layer = nn.BatchNorm1d
else:
norm_layer = nn.LayerNorm
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
norm_layer(hidden_dim),
nn.ReLU(inplace=True)
)
else:
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Linear(hidden_dim, hidden_dim)
"""
Adding BN to the output of the prediction MLP h does not work
well (Table 3d). We find that this is not about collapsing.
The training is unstable and the loss oscillates.
"""
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
class SimCLRLoss(nn.Module):
"""
This loss was used in our SBERT publication (https://arxiv.org/abs/1908.10084) to train the SentenceTransformer
model on NLI data. It adds a softmax classifier on top of the output of two transformer networks.
:param model: SentenceTransformer model
:param sentence_embedding_dimension: Dimension of your sentence embeddings
:param num_labels: Number of different labels
:param concatenation_sent_rep: Concatenate vectors u,v for the softmax classifier?
:param concatenation_sent_difference: Add abs(u-v) for the softmax classifier?
:param concatenation_sent_multiplication: Add u*v for the softmax classifier?
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(InputExample(texts=['First pair, sent A', 'First pair, sent B'], label=0),
InputExample(texts=['Second Pair, sent A', 'Second Pair, sent B'], label=3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=train_num_labels)
"""
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False,
concatenation_sent_max_square: bool = False, # 拼接两个句子表示的max-square(如寐建议的一个trick)
data_augmentation_strategy: str = "normal", # 数据增强策略,可选项:不进行增强“none”、对抗“adv”、mean和max pooling对比“meanmax”、TODO
projection_norm_type: str = "ln",
do_hidden_normalization: bool = True, # 进行对比损失之前,是否对句子表示做正则化
temperature: float = 1.0, # 对比损失中的温度系数,仅对于交叉熵损失有效
mapping_to_small_space: int = None, # 是否将句子表示映射到一个较小的向量空间进行对比损失(类似SimCLR),及其映射的最终维度
add_contrastive_predictor: bool = True, # 是否在对比学习中,将句子表示非线性映射到同等维度(类似SimSiam),以及将其添加到哪一端(normal or adv)
projection_hidden_dim: int = None, # 定义MLP的中间维度大小,对于上面两个选项(mapping & predictor)均有用
projection_use_batch_norm: bool = None, # 定义是否在MLP的中间层添加BatchNorm,对于上面两个选项(mapping & predictor)均有用
):
super(SimCLRLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
self.concatenation_sent_max_square = concatenation_sent_max_square
self.data_augmentation_strategy = data_augmentation_strategy
self.do_hidden_normalization = do_hidden_normalization
self.temperature = temperature
self.add_contrastive_predictor = add_contrastive_predictor
if add_contrastive_predictor:
self.predictor = MLP1(hidden_dim=sentence_embedding_dimension, norm=projection_norm_type)
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 1
if concatenation_sent_multiplication:
num_vectors_concatenated += 1
if concatenation_sent_max_square:
num_vectors_concatenated += 1
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def _reps_to_output(self, rep_a: torch.Tensor, rep_b: torch.Tensor):
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
if self.concatenation_sent_multiplication:
vectors_concat.append(rep_a * rep_b)
if self.concatenation_sent_max_square:
vectors_concat.append(torch.max(rep_a, rep_b).pow(2))
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
return output
def _contrastive_loss_forward(self,
hidden1: torch.Tensor,
hidden2: torch.Tensor,
hidden_norm: bool = True,
temperature: float = 1.0):
"""
hidden1/hidden2: (bsz, dim)
"""
batch_size, hidden_dim = hidden1.shape
if hidden_norm:
hidden1 = torch.nn.functional.normalize(hidden1, p=2, dim=-1)
hidden2 = torch.nn.functional.normalize(hidden2, p=2, dim=-1)
hidden1_large = hidden1
hidden2_large = hidden2
labels = torch.arange(0, batch_size).to(device=hidden1.device)
masks = torch.nn.functional.one_hot(torch.arange(0, batch_size), num_classes=batch_size).to(device=hidden1.device, dtype=torch.float)
logits_aa = torch.matmul(hidden1, hidden1_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = torch.matmul(hidden2, hidden2_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = torch.matmul(hidden1, hidden2_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_ba = torch.matmul(hidden2, hidden1_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
loss_a = torch.nn.functional.cross_entropy(torch.cat([logits_ab, logits_aa], dim=1), labels)
loss_b = torch.nn.functional.cross_entropy(torch.cat([logits_ba, logits_bb], dim=1), labels)
loss = loss_a + loss_b
return loss
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
if self.data_augmentation_strategy == "meanmax": # 使用mean-max pooling的对比
rep_dicts = [self.model(sentence_feature) for sentence_feature in sentence_features]
reps_mean = [rep_dict['pad_mean_tokens'] for rep_dict in rep_dicts]
reps_max = [rep_dict['pad_max_tokens'] for rep_dict in rep_dicts]
rep_a_view1, rep_a_view2 = reps_mean[0], reps_max[0]
elif self.data_augmentation_strategy == "normal": # 最原始的版本,只需获取rep_a和rep_b即可 # TODO: 在这里添加更多的数据增强策略
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a = reps[0]
rep_a_view1, rep_a_view2 = rep_a, rep_a
else:
raise ValueError("Invalid data augmentation strategy")
# add predictor
if self.add_contrastive_predictor:
rep_a_view1 = self.predictor(rep_a_view1)
rep_a_view2 = self.predictor(rep_a_view2)
final_loss = 0
contrastive_loss = self._contrastive_loss_forward(rep_a_view1, rep_a_view2, hidden_norm=self.do_hidden_normalization, temperature=self.temperature)
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss", contrastive_loss.item(), global_step=self.model.global_step)
final_loss += contrastive_loss
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_total", contrastive_loss.item(), global_step=self.model.global_step)
return final_loss
| 10,167 | 48.120773 | 155 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/BatchAllTripletLoss.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from sentence_transformers.SentenceTransformer import SentenceTransformer
class BatchAllTripletLoss(nn.Module):
"""
BatchAllTripletLoss takes a batch with (label, sentence) pairs and computes the loss for all possible, valid
triplets, i.e., anchor and positive must have the same label, anchor and negative a different label. The labels
must be integers, with same label indicating sentences from the same class. You train dataset
must contain at least 2 examples per label class.
| Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
| Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
| Blog post: https://omoindrot.github.io/triplet-loss
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
:param margin: Negative samples should be at least margin further apart from the anchor than the positive.
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Sentence from class 0'], label=0), InputExample(texts=['Another sentence from class 0'], label=0),
InputExample(texts=['Sentence from class 1'], label=1), InputExample(texts=['Sentence from class 2'], label=2)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.BatchAllTripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric=BatchHardTripletLossDistanceFunction.eucledian_distance, margin: float = 5):
super(BatchAllTripletLoss, self).__init__()
self.sentence_embedder = model
self.triplet_margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
rep = self.sentence_embedder(sentence_features[0])['sentence_embedding']
return self.batch_all_triplet_loss(labels, rep)
def batch_all_triplet_loss(self, labels, embeddings):
"""Build the triplet loss over a batch of embeddings.
We generate all the valid triplets and average the loss over the positive ones.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
Label_Sentence_Triplet: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = self.distance_metric(embeddings)
anchor_positive_dist = pairwise_dist.unsqueeze(2)
anchor_negative_dist = pairwise_dist.unsqueeze(1)
# Compute a 3D tensor of size (batch_size, batch_size, batch_size)
# triplet_loss[i, j, k] will contain the triplet loss of anchor=i, positive=j, negative=k
# Uses broadcasting where the 1st argument has shape (batch_size, batch_size, 1)
# and the 2nd (batch_size, 1, batch_size)
triplet_loss = anchor_positive_dist - anchor_negative_dist + self.triplet_margin
# Put to zero the invalid triplets
# (where label(a) != label(p) or label(n) == label(a) or a == p)
mask = BatchHardTripletLoss.get_triplet_mask(labels)
triplet_loss = mask.float() * triplet_loss
# Remove negative losses (i.e. the easy triplets)
triplet_loss[triplet_loss < 0] = 0
# Count number of positive triplets (where triplet_loss > 0)
valid_triplets = triplet_loss[triplet_loss > 1e-16]
num_positive_triplets = valid_triplets.size(0)
num_valid_triplets = mask.sum()
fraction_positive_triplets = num_positive_triplets / (num_valid_triplets.float() + 1e-16)
# Get final mean triplet loss over the positive valid triplets
triplet_loss = triplet_loss.sum() / (num_positive_triplets + 1e-16)
return triplet_loss
| 4,700 | 50.659341 | 162 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/BatchSemiHardTripletLoss.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from sentence_transformers.SentenceTransformer import SentenceTransformer
class BatchSemiHardTripletLoss(nn.Module):
"""
BatchSemiHardTripletLoss takes a batch with (label, sentence) pairs and computes the loss for all possible, valid
triplets, i.e., anchor and positive must have the same label, anchor and negative a different label. It then looks
for the semi hard positives and negatives.
The labels must be integers, with same label indicating sentences from the same class. You train dataset
must contain at least 2 examples per label class. The margin is computed automatically.
Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
Blog post: https://omoindrot.github.io/triplet-loss
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Sentence from class 0'], label=0), InputExample(texts=['Another sentence from class 0'], label=0),
InputExample(texts=['Sentence from class 1'], label=1), InputExample(texts=['Sentence from class 2'], label=2)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.BatchSemiHardTripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric = BatchHardTripletLossDistanceFunction.eucledian_distance, margin: float = 5):
super(BatchSemiHardTripletLoss, self).__init__()
self.sentence_embedder = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
rep = self.sentence_embedder(sentence_features[0])['sentence_embedding']
return self.batch_semi_hard_triplet_loss(labels, rep)
# Semi-Hard Triplet Loss
# Based on: https://github.com/tensorflow/addons/blob/master/tensorflow_addons/losses/triplet.py#L71
# Paper: FaceNet: A Unified Embedding for Face Recognition and Clustering: https://arxiv.org/pdf/1503.03832.pdf
def batch_semi_hard_triplet_loss(self, labels: Tensor, embeddings: Tensor) -> Tensor:
"""Build the triplet loss over a batch of embeddings.
We generate all the valid triplets and average the loss over the positive ones.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
Label_Sentence_Triplet: scalar tensor containing the triplet loss
"""
labels = labels.unsqueeze(1)
pdist_matrix = self.distance_metric(embeddings)
adjacency = labels == labels.t()
adjacency_not = ~adjacency
batch_size = torch.numel(labels)
pdist_matrix_tile = pdist_matrix.repeat([batch_size, 1])
mask = adjacency_not.repeat([batch_size, 1]) & (pdist_matrix_tile > torch.reshape(pdist_matrix.t(), [-1, 1]))
mask_final = torch.reshape(torch.sum(mask, 1, keepdims=True) > 0.0, [batch_size, batch_size])
mask_final = mask_final.t()
negatives_outside = torch.reshape(BatchSemiHardTripletLoss._masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = negatives_outside.t()
negatives_inside = BatchSemiHardTripletLoss._masked_maximum(pdist_matrix, adjacency_not)
negatives_inside = negatives_inside.repeat([1, batch_size])
semi_hard_negatives = torch.where(mask_final, negatives_outside, negatives_inside)
loss_mat = (pdist_matrix - semi_hard_negatives) + self.margin
mask_positives = adjacency.float().to(labels.device) - torch.eye(batch_size, device=labels.device)
mask_positives = mask_positives.to(labels.device)
num_positives = torch.sum(mask_positives)
triplet_loss = torch.sum(torch.max(loss_mat * mask_positives, torch.tensor([0.0], device=labels.device))) / num_positives
return triplet_loss
@staticmethod
def _masked_minimum(data, mask, dim=1):
axis_maximums, _ = data.max(dim, keepdims=True)
masked_minimums = (data - axis_maximums) * mask
masked_minimums, _ = masked_minimums.min(dim, keepdims=True)
masked_minimums += axis_maximums
return masked_minimums
@staticmethod
def _masked_maximum(data, mask, dim=1):
axis_minimums, _ = data.min(dim, keepdims=True)
masked_maximums = (data - axis_minimums) * mask
masked_maximums, _ = masked_maximums.max(dim, keepdims=True)
masked_maximums += axis_minimums
return masked_maximums
| 5,586 | 48.442478 | 162 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/AdvCLSoftmaxLoss_refactoring.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
LARGE_NUM = 1e9
class MLP(torch.nn.Module):
def __init__(self,
input_dim: int,
hidden_dim: int,
output_dim: int,
hidden_activation: str = "relu",
use_bn: bool = False,
use_bias: bool = True):
super(MLP, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.linear1 = torch.nn.Linear(input_dim, hidden_dim, bias=use_bias and not use_bn)
self.linear2 = torch.nn.Linear(hidden_dim, output_dim, bias=use_bias)
if hidden_activation == "relu":
self.activation = torch.nn.ReLU()
elif hidden_activation == "leakyrelu":
self.activation = torch.nn.LeakyReLU()
elif hidden_activation == "tanh":
self.activation = torch.nn.Tanh()
elif hidden_activation == "sigmoid":
self.activation = torch.nn.Sigmoid()
else:
raise ValueError(f"Unknown activation function {hidden_activation}")
self.use_bn = use_bn
if use_bn:
self.bn = torch.nn.BatchNorm1d(hidden_dim)
def forward(self, x: torch.Tensor):
hidden = self.linear1(x)
if self.use_bn:
hidden = self.bn(hidden)
activated_hidden = self.activation(hidden)
return self.linear2(activated_hidden)
class prediction_MLP(nn.Module):
def __init__(self, hidden_dim=2048, norm=None): # bottleneck structure
super().__init__()
''' page 3 baseline setting
Prediction MLP. The prediction MLP (h) has BN applied
to its hidden fc layers. Its output fc does not have BN
(ablation in Sec. 4.4) or ReLU. This MLP has 2 layers.
The dimension of h’s input and output (z and p) is d = 2048,
and h’s hidden layer’s dimension is 512, making h a
bottleneck structure (ablation in supplement).
'''
if norm:
if norm=='bn':
MLPNorm = nn.BatchNorm1d
else:
MLPNorm = nn.LayerNorm
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
MLPNorm(hidden_dim),
nn.ReLU(inplace=True)
)
else:
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Linear(hidden_dim, hidden_dim)
"""
Adding BN to the output of the prediction MLP h does not work
well (Table 3d). We find that this is not about collapsing.
The training is unstable and the loss oscillates.
"""
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
class AdvCLSoftmaxLoss(nn.Module):
"""
This loss was used in our SBERT publication (https://arxiv.org/abs/1908.10084) to train the SentenceTransformer
model on NLI data. It adds a softmax classifier on top of the output of two transformer networks.
:param model: SentenceTransformer model
:param sentence_embedding_dimension: Dimension of your sentence embeddings
:param num_labels: Number of different labels
:param concatenation_sent_rep: Concatenate vectors u,v for the softmax classifier?
:param concatenation_sent_difference: Add abs(u-v) for the softmax classifier?
:param concatenation_sent_multiplication: Add u*v for the softmax classifier?
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(InputExample(texts=['First pair, sent A', 'First pair, sent B'], label=0),
InputExample(texts=['Second Pair, sent A', 'Second Pair, sent B'], label=3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=train_num_labels)
"""
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False,
concatenation_sent_max_square: bool = False, # 拼接两个句子表示的max-square(如寐建议的一个trick)
normal_loss_stop_grad: bool = False, # 对于传统损失(句子对分类)是否加stop-grad
use_adversarial_training: bool = False, # 是否加对抗损失
adversarial_loss_rate: float = 1.0, # 对抗损失的系数
do_noise_normalization: bool = True, # 是否将对抗扰动(噪声)正则化
noise_norm: float = 0.01, # 对抗扰动的大小
normal_normal_weight: float = 0.25, # normal to normal句子对分类损失的系数
normal_adv_weight: float = 0.25, # normal to adv句子对分类损失的系数
adv_normal_weight: float = 0.25, # adv to normal句子对分类损失的系数
adv_adv_weight: float = 0.25, # adv to adv句子对分类损失的系数
adv_loss_stop_grad: bool = False, # 对于对抗损失(一系列的句子对分类)是否加stop-grad
use_contrastive_loss: bool = False, # 是否加对比损失
data_augmentation_strategy: str = "adv", # 数据增强策略,可选项:不进行增强“none”、对抗“adv”、mean和max pooling对比“meanmax”、TODO
contrastive_loss_only: bool = False, # 只使用对比损失进行(无监督)训练
no_pair: bool = False, # 不使用配对的语料,避免先验信息
contrastive_loss_type: str = "nt_xent", # 加对比损失的形式(“nt_xent” or “cosine”)
contrastive_loss_rate: float = 1.0, # 对比损失的系数
do_hidden_normalization: bool = True, # 进行对比损失之前,是否对句子表示做正则化
temperature: float = 1.0, # 对比损失中的温度系数,仅对于交叉熵损失有效
mapping_to_small_space: int = None, # 是否将句子表示映射到一个较小的向量空间进行对比损失(类似SimCLR),及其映射的最终维度
add_contrastive_predictor: str = None, # 是否在对比学习中,将句子表示非线性映射到同等维度(类似SimSiam),以及将其添加到哪一端(normal or adv)
add_projection: bool = False, # 在predictor前面加一个映射网络
projection_norm_type: str = None, # 在predictor前面加的映射网络的norm type,取值为(None, 'bn', 'ln')
projection_hidden_dim: int = None, # 定义MLP的中间维度大小,对于上面两个选项(mapping & predictor)均有用
projection_use_batch_norm: bool = None, # 定义是否在MLP的中间层添加BatchNorm,对于上面两个选项(mapping & predictor)均有用
contrastive_loss_stop_grad: str = None # 对于对比损失是否加stop-grad,以及加到哪一端(normal or adv)
):
super(AdvCLSoftmaxLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
self.concatenation_sent_max_square = concatenation_sent_max_square
self.normal_loss_stop_grad = normal_loss_stop_grad
self.use_adversarial_training = use_adversarial_training
self.adversarial_loss_rate = adversarial_loss_rate
self.do_noise_normalization = do_noise_normalization
self.noise_norm = noise_norm
self.normal_normal_weight = normal_normal_weight
self.normal_adv_weight = normal_adv_weight
self.adv_normal_weight = adv_normal_weight
self.adv_adv_weight = adv_adv_weight
self.adv_loss_stop_grad = adv_loss_stop_grad
self.use_contrastive_loss = use_contrastive_loss
assert data_augmentation_strategy in ("none", "adv", "meanmax")
self.data_augmentation_strategy = data_augmentation_strategy
self.contrastive_loss_only = contrastive_loss_only
self.no_pair = no_pair
if no_pair:
assert use_contrastive_loss and contrastive_loss_only
assert contrastive_loss_type in ("nt_xent", "cosine")
self.contrastive_loss_type = contrastive_loss_type
self.contrastive_loss_rate = contrastive_loss_rate
self.do_hidden_normalization = do_hidden_normalization
self.temperature = temperature
self.add_projection = add_projection
if add_projection:
assert projection_norm_type in (None, "ln", "bn")
self.projection_head = prediction_MLP(hidden_dim=sentence_embedding_dimension, norm=projection_norm_type)
if mapping_to_small_space is not None:
assert add_contrastive_predictor is None
assert projection_hidden_dim is not None
assert projection_use_batch_norm is not None
self.projection_mode = "both"
self.projection = MLP(sentence_embedding_dimension, projection_hidden_dim, mapping_to_small_space, use_bn=projection_use_batch_norm)
else:
self.projection_mode = "none"
if add_contrastive_predictor is not None:
assert add_contrastive_predictor in ("normal", "adv")
assert mapping_to_small_space is None
assert projection_hidden_dim is not None
assert projection_use_batch_norm is not None
self.projection_mode = add_contrastive_predictor
self.projection = MLP(sentence_embedding_dimension, projection_hidden_dim, sentence_embedding_dimension, use_bn=projection_use_batch_norm)
else:
self.projection_mode = "none"
assert contrastive_loss_stop_grad in (None, "normal", "adv")
self.contrastive_loss_stop_grad = contrastive_loss_stop_grad
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 1
if concatenation_sent_multiplication:
num_vectors_concatenated += 1
if concatenation_sent_max_square:
num_vectors_concatenated += 1
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def _reps_to_output(self, rep_a: torch.Tensor, rep_b: torch.Tensor):
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
if self.concatenation_sent_multiplication:
vectors_concat.append(rep_a * rep_b)
if self.concatenation_sent_max_square:
vectors_concat.append(torch.max(rep_a, rep_b).pow(2))
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
return output
def _contrastive_loss_forward(self,
hidden1: torch.Tensor,
hidden2: torch.Tensor,
hidden_norm: bool = True,
temperature: float = 1.0):
"""
hidden1/hidden2: (bsz, dim)
"""
batch_size, hidden_dim = hidden1.shape
if self.add_projection:
hidden1 = self.projection_head(hidden1)
hidden2 = self.projection_head(hidden2)
# rumei???
if self.projection_mode in ("both", "normal"):
hidden1 = self.projection(hidden1)
if self.projection_mode in ("both", "adv"):
hidden2 = self.projection(hidden2)
if self.contrastive_loss_type == "cosine":
hidden1 = torch.nn.functional.normalize(hidden1, p=2, dim=-1)
hidden2 = torch.nn.functional.normalize(hidden2, p=2, dim=-1)
scores = torch.einsum("bd,bd->b", hidden1, hidden2)
neg_cosine_loss = -1.0 * scores.mean()
return neg_cosine_loss
elif self.contrastive_loss_type == "nt_xent":
if hidden_norm:
hidden1 = torch.nn.functional.normalize(hidden1, p=2, dim=-1)
hidden2 = torch.nn.functional.normalize(hidden2, p=2, dim=-1)
hidden1_large = hidden1
hidden2_large = hidden2
labels = torch.arange(0, batch_size).to(device=hidden1.device)
masks = torch.nn.functional.one_hot(torch.arange(0, batch_size), num_classes=batch_size).to(device=hidden1.device, dtype=torch.float)
logits_aa = torch.matmul(hidden1, hidden1_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = torch.matmul(hidden2, hidden2_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = torch.matmul(hidden1, hidden2_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_ba = torch.matmul(hidden2, hidden1_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
loss_a = torch.nn.functional.cross_entropy(torch.cat([logits_ab, logits_aa], dim=1), labels)
loss_b = torch.nn.functional.cross_entropy(torch.cat([logits_ba, logits_bb], dim=1), labels)
loss = loss_a + loss_b
return loss
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
if not self.training: # 验证阶段或预测阶段
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
output = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fct(output, labels.view(-1))
return loss
else:
return reps, output
elif not self.use_adversarial_training and not self.use_contrastive_loss: # 仅使用传统的监督训练方法(baseline设定下)
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
if self.normal_loss_stop_grad:
rep_b = rep_b.detach()
output = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fct(output, labels.view(-1))
return loss
else:
return reps, output
else: # 使用对抗训练或对比损失训练
# 1. normal forward
sentence_feature_a, sentence_feature_b = sentence_features
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep_a = self.model(sentence_feature_a)['sentence_embedding']
embedding_output_a = self.model[0].auto_model.embedding_output
rep_b = self.model(sentence_feature_b)['sentence_embedding']
embedding_output_b = self.model[0].auto_model.embedding_output
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
output = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
normal_loss = loss_fct(output, labels.view(-1))
# 2. adversarial backward
embedding_output_a.retain_grad()
embedding_output_b.retain_grad()
normal_loss.backward(retain_graph=True)
unnormalized_noise_a = embedding_output_a.grad.detach_()
unnormalized_noise_b = embedding_output_b.grad.detach_()
for p in self.parameters():
if p.grad is not None:
p.grad.detach_()
p.grad.zero_() # clear the gradient on parameters
if self.do_noise_normalization: # do normalization
norm_a = unnormalized_noise_a.norm(p=2, dim=-1)
normalized_noise_a = unnormalized_noise_a / (norm_a.unsqueeze(dim=-1) + 1e-10) # add 1e-10 to avoid NaN
norm_b = unnormalized_noise_b.norm(p=2, dim=-1)
normalized_noise_b = unnormalized_noise_b / (norm_b.unsqueeze(dim=-1) + 1e-10) # add 1e-10 to avoid NaN
else: # no normalization
normalized_noise_a = unnormalized_noise_a
normalized_noise_b = unnormalized_noise_b
noise_a = self.noise_norm * normalized_noise_a
noise_b = self.noise_norm * normalized_noise_b
# 3. adversarial forward
noise_embedding_a = embedding_output_a + noise_a
noise_embedding_b = embedding_output_b + noise_b
self.model[0].auto_model.noise_embedding = noise_embedding_a
adv_rep_a = self.model(sentence_feature_a)['sentence_embedding']
self.model[0].auto_model.noise_embedding = noise_embedding_b
adv_rep_b = self.model(sentence_feature_b)['sentence_embedding']
self.model[0].auto_model.noise_embedding = None # unset the noise_embedding (see `transformers/modeling_bert.py` for more details)
del self.model[0].auto_model.__dict__['noise_embedding'] # unset the noise_embedding
# 4. loss calculation
final_loss = 0
if self.use_adversarial_training:
# rumei???
if self.adv_loss_stop_grad:
rep_b = rep_b.detach()
adv_rep_b = adv_rep_b.detach()
match_output_n_n = self._reps_to_output(rep_a, rep_b)
match_output_n_a = self._reps_to_output(rep_a, adv_rep_b)
match_output_a_n = self._reps_to_output(adv_rep_a, rep_b)
match_output_a_a = self._reps_to_output(adv_rep_a, adv_rep_b)
loss_n_n = loss_fct(match_output_n_n, labels.view(-1))
loss_n_a = loss_fct(match_output_n_a, labels.view(-1))
loss_a_n = loss_fct(match_output_a_n, labels.view(-1))
loss_a_a = loss_fct(match_output_a_a, labels.view(-1))
adv_training_loss = self.normal_normal_weight * loss_n_n + self.normal_adv_weight * loss_n_a + \
self.adv_normal_weight * loss_a_n + self.adv_adv_weight * loss_a_a
final_loss += self.adversarial_loss_rate * adv_training_loss
self.model.tensorboard_writer.add_scalar(f"train_adv_loss", self.adversarial_loss_rate * adv_training_loss.item(), global_step=self.model.global_step)
elif not self.contrastive_loss_only:
match_output_n_n = self._reps_to_output(rep_a, rep_b)
loss_n_n = loss_fct(match_output_n_n, labels.view(-1))
final_loss += loss_n_n
self.model.tensorboard_writer.add_scalar(f"train_normal_loss", loss_n_n.item(), global_step=self.model.global_step)
if self.use_contrastive_loss:
# rume???
if self.contrastive_loss_stop_grad == "normal":
rep_a = rep_a.detach()
rep_b = rep_b.detach()
elif self.contrastive_loss_stop_grad == "adv":
adv_rep_a = adv_rep_a.detach()
adv_rep_b = adv_rep_b.detach()
else:
assert self.contrastive_loss_stop_grad is None
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = adv_rep_a, adv_rep_b
contrastive_loss_a = self._contrastive_loss_forward(rep_a_view1, rep_a_view2, hidden_norm=self.do_hidden_normalization, temperature=self.temperature)
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_a", contrastive_loss_a.item(), global_step=self.model.global_step)
contrastive_loss_b = self._contrastive_loss_forward(rep_b_view1, rep_b_view2, hidden_norm=self.do_hidden_normalization, temperature=self.temperature)
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_b", contrastive_loss_b.item(), global_step=self.model.global_step)
contrastive_loss = contrastive_loss_a + contrastive_loss_b
final_loss += self.contrastive_loss_rate * contrastive_loss
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_total", self.contrastive_loss_rate * contrastive_loss.item(), global_step=self.model.global_step)
return final_loss
| 21,694 | 51.026379 | 179 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/__init__.py
|
from .CosineSimilarityLoss import *
from .SoftmaxLoss import *
from .AdvCLSoftmaxLoss import *
from .MultipleNegativesRankingLoss import *
from .TripletLoss import *
from .MSELoss import *
from .ContrastiveLoss import *
from .OnlineContrastiveLoss import *
from .MegaBatchMarginLoss import *
# Triplet losses
from .BatchHardTripletLoss import *
from .BatchHardSoftMarginTripletLoss import *
from .BatchSemiHardTripletLoss import *
from .BatchAllTripletLoss import *
from .SimSiamLoss import *
from .SimCLRLoss import *
| 519 | 29.588235 | 45 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/OnlineContrastiveLoss.py
|
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from .ContrastiveLoss import SiameseDistanceMetric
from sentence_transformers.SentenceTransformer import SentenceTransformer
class OnlineContrastiveLoss(nn.Module):
"""
Online Contrastive loss. Similar to ConstrativeLoss, but it selects hard positive (positives that are far apart)
and hard negative pairs (negatives that are close) and computes the loss only for these pairs. Often yields
better performances than ConstrativeLoss.
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
:param margin: Negative samples (label == 0) should have a distance of at least the margin value.
:param size_average: Average by the size of the mini-batch.
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['This is a positive pair', 'Where the distance will be minimized'], label=1),
InputExample(texts=['This is a negative pair', 'Their distance will be increased'], label=0)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.OnlineContrastiveLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5):
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, size_average=False):
embeddings = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
| 2,732 | 51.557692 | 162 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/ContrastiveLoss.py
|
from enum import Enum
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""
The metric for the contrastive loss
"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1-F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
:param margin: Negative samples (label == 0) should have a distance of at least the margin value.
:param size_average: Average by the size of the mini-batch.
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['This is a positive pair', 'Where the distance will be minimized'], label=1),
InputExample(texts=['This is a negative pair', 'Their distance will be increased'], label=0)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.ContrastiveLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5, size_average:bool = True):
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2))
return losses.mean() if self.size_average else losses.sum()
| 2,794 | 44.080645 | 162 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/SoftmaxLoss.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
class SoftmaxLoss(nn.Module):
"""
This loss was used in our SBERT publication (https://arxiv.org/abs/1908.10084) to train the SentenceTransformer
model on NLI data. It adds a softmax classifier on top of the output of two transformer networks.
:param model: SentenceTransformer model
:param sentence_embedding_dimension: Dimension of your sentence embeddings
:param num_labels: Number of different labels
:param concatenation_sent_rep: Concatenate vectors u,v for the softmax classifier?
:param concatenation_sent_difference: Add abs(u-v) for the softmax classifier?
:param concatenation_sent_multiplication: Add u*v for the softmax classifier?
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(InputExample(texts=['First pair, sent A', 'First pair, sent B'], label=0),
InputExample(texts=['Second Pair, sent A', 'Second Pair, sent B'], label=3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=train_num_labels)
"""
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False):
super(SoftmaxLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 1
if concatenation_sent_multiplication:
num_vectors_concatenated += 1
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
if self.concatenation_sent_multiplication:
vectors_concat.append(rep_a * rep_b)
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fct(output, labels.view(-1))
return loss
else:
return reps, output
| 3,637 | 45.050633 | 152 |
py
|
SLT-FAI
|
SLT-FAI-main/sentence_transformers/losses/AdvSimSiamLoss.py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
LARGE_NUM = 1e9
class MLP(torch.nn.Module):
def __init__(self,
input_dim: int,
hidden_dim: int,
output_dim: int,
hidden_activation: str = "relu",
use_bn: bool = False,
use_bias: bool = True):
super(MLP, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.linear1 = torch.nn.Linear(input_dim, hidden_dim, bias=use_bias and not use_bn)
self.linear2 = torch.nn.Linear(hidden_dim, output_dim, bias=use_bias)
if hidden_activation == "relu":
self.activation = torch.nn.ReLU()
elif hidden_activation == "leakyrelu":
self.activation = torch.nn.LeakyReLU()
elif hidden_activation == "tanh":
self.activation = torch.nn.Tanh()
elif hidden_activation == "sigmoid":
self.activation = torch.nn.Sigmoid()
else:
raise ValueError(f"Unknown activation function {hidden_activation}")
self.use_bn = use_bn
if use_bn:
self.bn = torch.nn.BatchNorm1d(hidden_dim)
def forward(self, x: torch.Tensor):
hidden = self.linear1(x)
if self.use_bn:
hidden = self.bn(hidden)
activated_hidden = self.activation(hidden)
return self.linear2(activated_hidden)
class projection_MLP(nn.Module):
def __init__(self, hidden_dim=2048, norm=None):
super().__init__()
''' page 3 baseline setting
Projection MLP. The projection MLP (in f) has BN ap-
plied to each fully-connected (fc) layer, including its out-
put fc. Its output fc has no ReLU. The hidden fc is 2048-d.
This MLP has 3 layers.
'''
if norm:
if norm=='bn':
MLPNorm = nn.BatchNorm1d
else:
MLPNorm = nn.LayerNorm
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
MLPNorm(hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
MLPNorm(hidden_dim),
nn.ReLU(inplace=True)
)
self.layer3 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
MLPNorm(hidden_dim)
)
else:
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
)
self.layer3 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
)
self.num_layers = 3
def set_layers(self, num_layers):
self.num_layers = num_layers
def forward(self, x):
if self.num_layers == 3:
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
elif self.num_layers == 2:
x = self.layer1(x)
x = self.layer3(x)
else:
raise Exception
return x
class prediction_MLP(nn.Module):
def __init__(self, hidden_dim=2048, norm=None): # bottleneck structure
super().__init__()
''' page 3 baseline setting
Prediction MLP. The prediction MLP (h) has BN applied
to its hidden fc layers. Its output fc does not have BN
(ablation in Sec. 4.4) or ReLU. This MLP has 2 layers.
The dimension of h’s input and output (z and p) is d = 2048,
and h’s hidden layer’s dimension is 512, making h a
bottleneck structure (ablation in supplement).
'''
if norm:
if norm=='bn':
MLPNorm = nn.BatchNorm1d
else:
MLPNorm = nn.LayerNorm
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
MLPNorm(hidden_dim),
nn.ReLU(inplace=True)
)
else:
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Linear(hidden_dim, hidden_dim)
"""
Adding BN to the output of the prediction MLP h does not work
well (Table 3d). We find that this is not about collapsing.
The training is unstable and the loss oscillates.
"""
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
class AdvCLSoftmaxLoss(nn.Module):
"""
This loss was used in our SBERT publication (https://arxiv.org/abs/1908.10084) to train the SentenceTransformer
model on NLI data. It adds a softmax classifier on top of the output of two transformer networks.
:param model: SentenceTransformer model
:param sentence_embedding_dimension: Dimension of your sentence embeddings
:param num_labels: Number of different labels
:param concatenation_sent_rep: Concatenate vectors u,v for the softmax classifier?
:param concatenation_sent_difference: Add abs(u-v) for the softmax classifier?
:param concatenation_sent_multiplication: Add u*v for the softmax classifier?
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(InputExample(texts=['First pair, sent A', 'First pair, sent B'], label=0),
InputExample(texts=['Second Pair, sent A', 'Second Pair, sent B'], label=3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=train_num_labels)
"""
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False,
concatenation_sent_max_square: bool = False, # 拼接两个句子表示的max-square(如寐建议的一个trick)
normal_loss_stop_grad: bool = False, # 对于传统损失(句子对分类)是否加stop-grad
use_adversarial_training: bool = False, # 是否加对抗损失
adversarial_loss_rate: float = 1.0, # 对抗损失的系数
do_noise_normalization: bool = True, # 是否将对抗扰动(噪声)正则化
noise_norm: float = 0.01, # 对抗扰动的大小
normal_normal_weight: float = 0.25, # normal to normal句子对分类损失的系数
normal_adv_weight: float = 0.25, # normal to adv句子对分类损失的系数
adv_normal_weight: float = 0.25, # adv to normal句子对分类损失的系数
adv_adv_weight: float = 0.25, # adv to adv句子对分类损失的系数
adv_loss_stop_grad: bool = False, # 对于对抗损失(一系列的句子对分类)是否加stop-grad
use_contrastive_loss: bool = False, # 是否加对比损失
data_augmentation_strategy: str = "adv", # 数据增强策略,可选项:不进行增强“none”、对抗“adv”、mean和max pooling对比“meanmax”、TODO
contrastive_loss_only: bool = False, # 只使用对比损失进行(无监督)训练
no_pair: bool = False, # 不使用配对的语料,避免先验信息
contrastive_loss_type: str = "nt_xent", # 加对比损失的形式(“nt_xent” or “cosine”)
contrastive_loss_rate: float = 1.0, # 对比损失的系数
do_hidden_normalization: bool = True, # 进行对比损失之前,是否对句子表示做正则化
temperature: float = 1.0, # 对比损失中的温度系数,仅对于交叉熵损失有效
mapping_to_small_space: int = None, # 是否将句子表示映射到一个较小的向量空间进行对比损失(类似SimCLR),及其映射的最终维度
add_contrastive_predictor: str = None, # 是否在对比学习中,将句子表示非线性映射到同等维度(类似SimSiam),以及将其添加到哪一端(normal or adv)
add_projection: bool = False, # 在predictor前面加一个映射网络
projection_norm_type: str = None, # 在predictor前面加的映射网络的norm type,取值为(None, 'bn', 'ln')
projection_hidden_dim: int = None, # 定义MLP的中间维度大小,对于上面两个选项(mapping & predictor)均有用
projection_use_batch_norm: bool = None, # 定义是否在MLP的中间层添加BatchNorm,对于上面两个选项(mapping & predictor)均有用
contrastive_loss_stop_grad: str = None # 对于对比损失是否加stop-grad,以及加到哪一端(normal or adv)
):
super(AdvCLSoftmaxLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
self.concatenation_sent_max_square = concatenation_sent_max_square
self.normal_loss_stop_grad = normal_loss_stop_grad
self.use_adversarial_training = use_adversarial_training
self.adversarial_loss_rate = adversarial_loss_rate
self.do_noise_normalization = do_noise_normalization
self.noise_norm = noise_norm
self.normal_normal_weight = normal_normal_weight
self.normal_adv_weight = normal_adv_weight
self.adv_normal_weight = adv_normal_weight
self.adv_adv_weight = adv_adv_weight
self.adv_loss_stop_grad = adv_loss_stop_grad
self.use_contrastive_loss = use_contrastive_loss
assert data_augmentation_strategy in ("none", "adv", "meanmax")
self.data_augmentation_strategy = data_augmentation_strategy
self.contrastive_loss_only = contrastive_loss_only
self.no_pair = no_pair
if no_pair:
assert use_contrastive_loss and contrastive_loss_only
assert contrastive_loss_type in ("nt_xent", "cosine")
self.contrastive_loss_type = contrastive_loss_type
self.contrastive_loss_rate = contrastive_loss_rate
self.do_hidden_normalization = do_hidden_normalization
self.temperature = temperature
self.add_projection = add_projection
if add_projection:
assert projection_norm_type in (None, "ln", "bn")
self.projection_head = prediction_MLP(hidden_dim=sentence_embedding_dimension, norm=projection_norm_type)
if mapping_to_small_space is not None:
assert add_contrastive_predictor is None
assert projection_hidden_dim is not None
assert projection_use_batch_norm is not None
self.projection_mode = "both"
self.projection = MLP(sentence_embedding_dimension, projection_hidden_dim, mapping_to_small_space, use_bn=projection_use_batch_norm)
else:
self.projection_mode = "none"
if add_contrastive_predictor is not None:
assert add_contrastive_predictor in ("normal", "adv")
assert mapping_to_small_space is None
assert projection_hidden_dim is not None
assert projection_use_batch_norm is not None
self.projection_mode = add_contrastive_predictor
self.projection = MLP(sentence_embedding_dimension, projection_hidden_dim, sentence_embedding_dimension, use_bn=projection_use_batch_norm)
else:
self.projection_mode = "none"
assert contrastive_loss_stop_grad in (None, "normal", "adv")
self.contrastive_loss_stop_grad = contrastive_loss_stop_grad
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 1
if concatenation_sent_multiplication:
num_vectors_concatenated += 1
if concatenation_sent_max_square:
num_vectors_concatenated += 1
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def _reps_to_output(self, rep_a: torch.Tensor, rep_b: torch.Tensor):
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
if self.concatenation_sent_multiplication:
vectors_concat.append(rep_a * rep_b)
if self.concatenation_sent_max_square:
vectors_concat.append(torch.max(rep_a, rep_b).pow(2))
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
return output
def _contrastive_loss_forward(self,
hidden1: torch.Tensor,
hidden2: torch.Tensor,
hidden_norm: bool = True,
temperature: float = 1.0):
"""
hidden1/hidden2: (bsz, dim)
"""
batch_size, hidden_dim = hidden1.shape
if self.add_projection:
hidden1 = self.projection_head(hidden1)
hidden2 = self.projection_head(hidden2)
if self.projection_mode in ("both", "normal"):
hidden1 = self.projection(hidden1)
if self.projection_mode in ("both", "adv"):
hidden2 = self.projection(hidden2)
if self.contrastive_loss_type == "cosine":
hidden1 = torch.nn.functional.normalize(hidden1, p=2, dim=-1)
hidden2 = torch.nn.functional.normalize(hidden2, p=2, dim=-1)
scores = torch.einsum("bd,bd->b", hidden1, hidden2)
neg_cosine_loss = -1.0 * scores.mean()
return neg_cosine_loss
elif self.contrastive_loss_type == "nt_xent":
if hidden_norm:
hidden1 = torch.nn.functional.normalize(hidden1, p=2, dim=-1)
hidden2 = torch.nn.functional.normalize(hidden2, p=2, dim=-1)
hidden1_large = hidden1
hidden2_large = hidden2
labels = torch.arange(0, batch_size).to(device=hidden1.device)
masks = torch.nn.functional.one_hot(torch.arange(0, batch_size), num_classes=batch_size).to(device=hidden1.device, dtype=torch.float)
logits_aa = torch.matmul(hidden1, hidden1_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = torch.matmul(hidden2, hidden2_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = torch.matmul(hidden1, hidden2_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
logits_ba = torch.matmul(hidden2, hidden1_large.transpose(0, 1)) / temperature # shape (bsz, bsz)
loss_a = torch.nn.functional.cross_entropy(torch.cat([logits_ab, logits_aa], dim=1), labels)
loss_b = torch.nn.functional.cross_entropy(torch.cat([logits_ba, logits_bb], dim=1), labels)
loss = loss_a + loss_b
return loss
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
if not self.training: # 验证阶段或预测阶段
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
output = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fct(output, labels.view(-1))
return loss
else:
return reps, output
elif not self.use_adversarial_training and not self.use_contrastive_loss: # 仅使用传统的监督训练方法(baseline设定下)
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
if self.normal_loss_stop_grad:
rep_b = rep_b.detach()
output = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fct(output, labels.view(-1))
return loss
else:
return reps, output
else: # 使用对抗训练或对比损失训练
if self.use_adversarial_training or (self.use_contrastive_loss and self.data_augmentation_strategy == "adv"): # 若需要用到对抗训练,或对比学习需要生产对抗样本做数据增强,就生成对抗样本
# 1. normal forward
sentence_feature_a, sentence_feature_b = sentence_features
ori_feature_keys = set(sentence_feature_a.keys()) # record the keys since the features will be updated
rep_a = self.model(sentence_feature_a)['sentence_embedding']
embedding_output_a = self.model[0].auto_model.embedding_output
rep_b = self.model(sentence_feature_b)['sentence_embedding']
embedding_output_b = self.model[0].auto_model.embedding_output
sentence_feature_a = {k: v for k, v in sentence_feature_a.items() if k in ori_feature_keys}
sentence_feature_b = {k: v for k, v in sentence_feature_b.items() if k in ori_feature_keys}
output = self._reps_to_output(rep_a, rep_b)
loss_fct = nn.CrossEntropyLoss()
normal_loss = loss_fct(output, labels.view(-1))
# 2. adversarial backward
embedding_output_a.retain_grad()
embedding_output_b.retain_grad()
normal_loss.backward(retain_graph=True)
unnormalized_noise_a = embedding_output_a.grad.detach_()
unnormalized_noise_b = embedding_output_b.grad.detach_()
for p in self.parameters():
if p.grad is not None:
p.grad.detach_()
p.grad.zero_() # clear the gradient on parameters
if self.do_noise_normalization: # do normalization
norm_a = unnormalized_noise_a.norm(p=2, dim=-1)
normalized_noise_a = unnormalized_noise_a / (norm_a.unsqueeze(dim=-1) + 1e-10) # add 1e-10 to avoid NaN
norm_b = unnormalized_noise_b.norm(p=2, dim=-1)
normalized_noise_b = unnormalized_noise_b / (norm_b.unsqueeze(dim=-1) + 1e-10) # add 1e-10 to avoid NaN
else: # no normalization
normalized_noise_a = unnormalized_noise_a
normalized_noise_b = unnormalized_noise_b
noise_a = self.noise_norm * normalized_noise_a
noise_b = self.noise_norm * normalized_noise_b
# 3. adversarial forward
noise_embedding_a = embedding_output_a + noise_a
noise_embedding_b = embedding_output_b + noise_b
self.model[0].auto_model.noise_embedding = noise_embedding_a
adv_rep_a = self.model(sentence_feature_a)['sentence_embedding']
self.model[0].auto_model.noise_embedding = noise_embedding_b
adv_rep_b = self.model(sentence_feature_b)['sentence_embedding']
self.model[0].auto_model.noise_embedding = None # unset the noise_embedding (see `transformers/modeling_bert.py` for more details)
del self.model[0].auto_model.__dict__['noise_embedding'] # unset the noise_embedding
elif self.use_contrastive_loss and self.data_augmentation_strategy == "meanmax": # 使用mean-max pooling的对比
rep_dicts = [self.model(sentence_feature) for sentence_feature in sentence_features]
reps_mean = [rep_dict['pad_mean_tokens'] for rep_dict in rep_dicts]
if not self.no_pair:
rep_a_mean, rep_b_mean = reps_mean
else:
rep_a_mean, rep_b_mean = reps_mean[0], None
reps_max = [rep_dict['pad_max_tokens'] for rep_dict in rep_dicts]
if not self.no_pair:
rep_a_max, rep_b_max = reps_max
else:
rep_a_max, rep_b_max = reps_max[0], None
else: # 最原始的版本,只需获取rep_a和rep_b即可 # TODO: 在这里添加更多的数据增强策略
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
if not self.no_pair:
rep_a, rep_b = reps
else:
rep_a, rep_b = reps[0], None
# 4. loss calculation
final_loss = 0
if self.use_adversarial_training:
if self.adv_loss_stop_grad:
rep_b = rep_b.detach()
adv_rep_b = adv_rep_b.detach()
match_output_n_n = self._reps_to_output(rep_a, rep_b)
match_output_n_a = self._reps_to_output(rep_a, adv_rep_b)
match_output_a_n = self._reps_to_output(adv_rep_a, rep_b)
match_output_a_a = self._reps_to_output(adv_rep_a, adv_rep_b)
loss_n_n = loss_fct(match_output_n_n, labels.view(-1))
loss_n_a = loss_fct(match_output_n_a, labels.view(-1))
loss_a_n = loss_fct(match_output_a_n, labels.view(-1))
loss_a_a = loss_fct(match_output_a_a, labels.view(-1))
adv_training_loss = self.normal_normal_weight * loss_n_n + self.normal_adv_weight * loss_n_a + \
self.adv_normal_weight * loss_a_n + self.adv_adv_weight * loss_a_a
final_loss += self.adversarial_loss_rate * adv_training_loss
self.model.tensorboard_writer.add_scalar(f"train_adv_loss", self.adversarial_loss_rate * adv_training_loss.item(), global_step=self.model.global_step)
elif not self.contrastive_loss_only:
match_output_n_n = self._reps_to_output(rep_a, rep_b)
loss_n_n = loss_fct(match_output_n_n, labels.view(-1))
final_loss += loss_n_n
self.model.tensorboard_writer.add_scalar(f"train_normal_loss", loss_n_n.item(), global_step=self.model.global_step)
if self.use_contrastive_loss:
if self.data_augmentation_strategy == "adv":
if self.contrastive_loss_stop_grad == "normal":
rep_a = rep_a.detach()
if not self.no_pair:
rep_b = rep_b.detach()
elif self.contrastive_loss_stop_grad == "adv":
adv_rep_a = adv_rep_a.detach()
if not self.no_pair:
adv_rep_b = adv_rep_b.detach()
else:
assert self.contrastive_loss_stop_grad is None
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = adv_rep_a, adv_rep_b
elif self.data_augmentation_strategy == "none":
rep_a_view1, rep_b_view1 = rep_a, rep_b
rep_a_view2, rep_b_view2 = rep_a, rep_b
elif self.data_augmentation_strategy == "meanmax":
rep_a_view1, rep_b_view1 = rep_a_mean, rep_b_mean
rep_a_view2, rep_b_view2 = rep_a_max, rep_b_max
else:
raise ValueError("Invalid data augmentation strategy")
contrastive_loss_a = self._contrastive_loss_forward(rep_a_view1, rep_a_view2, hidden_norm=self.do_hidden_normalization, temperature=self.temperature)
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_a", contrastive_loss_a.item(), global_step=self.model.global_step)
if not self.no_pair:
contrastive_loss_b = self._contrastive_loss_forward(rep_b_view1, rep_b_view2, hidden_norm=self.do_hidden_normalization, temperature=self.temperature)
else:
contrastive_loss_b = torch.tensor(0.0)
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_b", contrastive_loss_b.item(), global_step=self.model.global_step)
contrastive_loss = contrastive_loss_a + contrastive_loss_b
final_loss += self.contrastive_loss_rate * contrastive_loss
self.model.tensorboard_writer.add_scalar(f"train_contrastive_loss_total", self.contrastive_loss_rate * contrastive_loss.item(), global_step=self.model.global_step)
return final_loss
| 25,701 | 49.794466 | 179 |
py
|
robust_trust_region
|
robust_trust_region-main/wrapper/bilateralfilter/setup.py
|
#File: setup.py
#!/usr/bin/python
from distutils.core import setup, Extension
# Third-party modules - we depend on numpy for everything
import numpy
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
pht_module = Extension('_bilateralfilter',
sources=['bilateralfilter_wrap.cxx',
'bilateralfilter.cpp',
'permutohedral.cpp'
],
extra_compile_args = ["-fopenmp"],
include_dirs = [numpy_include],
extra_link_args=['-lgomp']
)
setup(name = 'bilateralfilter',
version = '0.1',
author = 'SWIG Docs',
description = 'Simple swig pht from docs',
ext_modules = [pht_module],
py_modules = ['bilateralfilter'],
)
| 1,000 | 31.290323 | 78 |
py
|
robust_trust_region
|
robust_trust_region-main/wrapper/bilateralfilter/bilateralfilter.py
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_bilateralfilter', [dirname(__file__)])
except ImportError:
import _bilateralfilter
return _bilateralfilter
if fp is not None:
try:
_mod = imp.load_module('_bilateralfilter', fp, pathname, description)
finally:
fp.close()
return _mod
_bilateralfilter = swig_import_helper()
del swig_import_helper
else:
import _bilateralfilter
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _bilateralfilter.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
return _bilateralfilter.SwigPyIterator_value(self)
def incr(self, n=1):
return _bilateralfilter.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _bilateralfilter.SwigPyIterator_decr(self, n)
def distance(self, x):
return _bilateralfilter.SwigPyIterator_distance(self, x)
def equal(self, x):
return _bilateralfilter.SwigPyIterator_equal(self, x)
def copy(self):
return _bilateralfilter.SwigPyIterator_copy(self)
def next(self):
return _bilateralfilter.SwigPyIterator_next(self)
def __next__(self):
return _bilateralfilter.SwigPyIterator___next__(self)
def previous(self):
return _bilateralfilter.SwigPyIterator_previous(self)
def advance(self, n):
return _bilateralfilter.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _bilateralfilter.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _bilateralfilter.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _bilateralfilter.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _bilateralfilter.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _bilateralfilter.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _bilateralfilter.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _bilateralfilter.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class FloatVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, FloatVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, FloatVector, name)
__repr__ = _swig_repr
def iterator(self):
return _bilateralfilter.FloatVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _bilateralfilter.FloatVector___nonzero__(self)
def __bool__(self):
return _bilateralfilter.FloatVector___bool__(self)
def __len__(self):
return _bilateralfilter.FloatVector___len__(self)
def __getslice__(self, i, j):
return _bilateralfilter.FloatVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _bilateralfilter.FloatVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _bilateralfilter.FloatVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _bilateralfilter.FloatVector___delitem__(self, *args)
def __getitem__(self, *args):
return _bilateralfilter.FloatVector___getitem__(self, *args)
def __setitem__(self, *args):
return _bilateralfilter.FloatVector___setitem__(self, *args)
def pop(self):
return _bilateralfilter.FloatVector_pop(self)
def append(self, x):
return _bilateralfilter.FloatVector_append(self, x)
def empty(self):
return _bilateralfilter.FloatVector_empty(self)
def size(self):
return _bilateralfilter.FloatVector_size(self)
def swap(self, v):
return _bilateralfilter.FloatVector_swap(self, v)
def begin(self):
return _bilateralfilter.FloatVector_begin(self)
def end(self):
return _bilateralfilter.FloatVector_end(self)
def rbegin(self):
return _bilateralfilter.FloatVector_rbegin(self)
def rend(self):
return _bilateralfilter.FloatVector_rend(self)
def clear(self):
return _bilateralfilter.FloatVector_clear(self)
def get_allocator(self):
return _bilateralfilter.FloatVector_get_allocator(self)
def pop_back(self):
return _bilateralfilter.FloatVector_pop_back(self)
def erase(self, *args):
return _bilateralfilter.FloatVector_erase(self, *args)
def __init__(self, *args):
this = _bilateralfilter.new_FloatVector(*args)
try:
self.this.append(this)
except Exception:
self.this = this
def push_back(self, x):
return _bilateralfilter.FloatVector_push_back(self, x)
def front(self):
return _bilateralfilter.FloatVector_front(self)
def back(self):
return _bilateralfilter.FloatVector_back(self)
def assign(self, n, x):
return _bilateralfilter.FloatVector_assign(self, n, x)
def resize(self, *args):
return _bilateralfilter.FloatVector_resize(self, *args)
def insert(self, *args):
return _bilateralfilter.FloatVector_insert(self, *args)
def reserve(self, n):
return _bilateralfilter.FloatVector_reserve(self, n)
def capacity(self):
return _bilateralfilter.FloatVector_capacity(self)
__swig_destroy__ = _bilateralfilter.delete_FloatVector
__del__ = lambda self: None
FloatVector_swigregister = _bilateralfilter.FloatVector_swigregister
FloatVector_swigregister(FloatVector)
def initializePermutohedral(image, H, W, sigmargb, sigmaxy, lattice_):
return _bilateralfilter.initializePermutohedral(image, H, W, sigmargb, sigmaxy, lattice_)
initializePermutohedral = _bilateralfilter.initializePermutohedral
def bilateralfilter(image, arg2, out, H, W, sigmargb, sigmaxy):
return _bilateralfilter.bilateralfilter(image, arg2, out, H, W, sigmargb, sigmaxy)
bilateralfilter = _bilateralfilter.bilateralfilter
def bilateralfilter_batch(images, ins, outs, N, K, H, W, sigmargb, sigmaxy):
return _bilateralfilter.bilateralfilter_batch(images, ins, outs, N, K, H, W, sigmargb, sigmaxy)
bilateralfilter_batch = _bilateralfilter.bilateralfilter_batch
class Permutohedral(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Permutohedral, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Permutohedral, name)
__repr__ = _swig_repr
def __init__(self):
this = _bilateralfilter.new_Permutohedral()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _bilateralfilter.delete_Permutohedral
__del__ = lambda self: None
def init(self, feature, feature_size, N):
return _bilateralfilter.Permutohedral_init(self, feature, feature_size, N)
def compute(self, *args):
return _bilateralfilter.Permutohedral_compute(self, *args)
Permutohedral_swigregister = _bilateralfilter.Permutohedral_swigregister
Permutohedral_swigregister(Permutohedral)
# This file is compatible with both classic and new-style classes.
| 9,684 | 30.343042 | 99 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/inference.py
|
import argparse
import os
import numpy as np
from tqdm import tqdm
from PIL import Image
import matplotlib.pyplot as plt
from torchvision import transforms
from torch.autograd import Variable
from mypath import Path
from dataloaders import make_data_loader
from dataloaders.custom_transforms import denormalizeimage
from dataloaders.utils import decode_segmap
from dataloaders import custom_transforms as tr
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from utils.saver import Saver
import time
import multiprocessing
from DenseCRFLoss import DenseCRFLoss
global grad_seg
def main():
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Inference")
parser.add_argument('--backbone', type=str, default='resnet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--workers', type=int, default=4,
metavar='N', help='dataloader threads')
parser.add_argument('--n_class', type=int, default=21)
parser.add_argument('--crop_size', type=int, default=513,
help='crop image size')
parser.add_argument('--no_cuda', action='store_true', default=
False, help='disables CUDA training')
# checking point
parser.add_argument('--checkpoint', type=str, default=None,
help='put the path to checkpoint if needed')
# rloss options
parser.add_argument('--rloss_weight', type=float, default=0,
metavar='M', help='densecrf loss (default: 0)')
parser.add_argument('--rloss_scale',type=float,default=1.0,
help='scale factor for rloss input, choose small number for efficiency, domain: (0,1]')
parser.add_argument('--sigma_rgb',type=float,default=15.0,
help='DenseCRF sigma_rgb')
parser.add_argument('--sigma_xy',type=float,default=80.0,
help='DenseCRF sigma_xy')
# input image
parser.add_argument('--image_path',type=str,default='./misc/test.png',
help='input image path')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
# Define Dataloader
kwargs = {'num_workers': args.workers, 'pin_memory': True}
print(args)
# Define network
model = DeepLab(num_classes=args.n_class,
backbone=args.backbone,
output_stride=16,
sync_bn=False,
freeze_bn=False)
# Using cuda
if not args.no_cuda:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
patch_replication_callback(model)
model = model.cuda()
# load checkpoint
if not os.path.isfile(args.checkpoint):
raise RuntimeError("=> no checkpoint found at '{}'" .format(args.checkpoint))
checkpoint = torch.load(args.checkpoint)
if args.cuda:
model.module.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
best_pred = checkpoint['best_pred']
print("=> loaded checkpoint '{}' (epoch {}) best_pred {}"
.format(args.checkpoint, checkpoint['epoch'], best_pred))
model.eval()
densecrflosslayer = DenseCRFLoss(weight=1e-8, sigma_rgb=args.sigma_rgb, sigma_xy=args.sigma_xy)
if not args.no_cuda:
densecrflosslayer.cuda()
print(densecrflosslayer)
composed_transforms = transforms.Compose([
tr.FixScaleCropImage(crop_size=args.crop_size),
tr.NormalizeImage(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensorImage()])
image = composed_transforms(Image.open(args.image_path).convert('RGB')).unsqueeze(0)
image_cpu = image
if not args.no_cuda:
image = image.cuda()
output = model(image)
pred = output.data.cpu().numpy()
pred = np.argmax(pred, axis=1)
# Add batch sample into evaluator
softmax = nn.Softmax(dim=1)
probs = softmax(output)
probs = Variable(probs, requires_grad=True)
croppings = torch.ones(pred.shape).float()
if not args.no_cuda:
croppings = croppings.cuda()
# resize output & image & croppings for densecrf
start = time.time()
densecrfloss = densecrflosslayer(image_cpu, probs, croppings,args.rloss_scale)
print('inference time:',time.time()-start)
print("densecrf loss {}".format(densecrfloss.item()))
# visualize densecrfloss
densecrfloss.backward()
#print (probs.grad.sum())
#print (reduced_probs.grad.sum())
#grad_seg = reduced_probs.grad.cpu().numpy()
#"""
grad_seg = probs.grad.cpu().numpy()
#print (grad_seg.shape)
for i in range(args.n_class):
fig=plt.figure()
plt.imshow(grad_seg[0,i,:,:], cmap="hot") #vmin=0, vmax=1)
plt.colorbar()
plt.axis('off')
plt.savefig('./misc/'+args.image_path.split('/')[-1].split('.')[0]+'_grad_seg_class_' + str(i) +'.png')
plt.show(block=False)
plt.close(fig)
# visualize prediction
segmap = decode_segmap(pred[0],'pascal')*255
np.set_printoptions(threshold=np.nan)
segmap = segmap.astype(np.uint8)
segimg = Image.fromarray(segmap, 'RGB')
segimg.save('./misc/'+args.image_path.split('/')[-1].split('.')[0]+'_prediction.png')
#"""
if __name__ == "__main__":
main()
| 5,786 | 37.58 | 111 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/DenseCRFLoss.py
|
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import sys
sys.path.append("../wrapper/bilateralfilter/build/lib.linux-x86_64-3.6")
from bilateralfilter import bilateralfilter, bilateralfilter_batch
from dataloaders.custom_transforms import denormalizeimage
import time
from multiprocessing import Pool
import multiprocessing
from itertools import repeat
import pickle
class DenseCRFLossFunction(Function):
@staticmethod
def forward(ctx, images, segmentations, sigma_rgb, sigma_xy, ROIs):
ctx.save_for_backward(segmentations)
ctx.N, ctx.K, ctx.H, ctx.W = segmentations.shape
ROIs = ROIs.unsqueeze_(1).repeat(1,ctx.K,1,1)
segmentations = torch.mul(segmentations.cuda(), ROIs.cuda())
ctx.ROIs = ROIs
densecrf_loss = 0.0
images = images.numpy().flatten()
segmentations = segmentations.cpu().numpy().flatten()
AS = np.zeros(segmentations.shape, dtype=np.float32)
bilateralfilter_batch(images, segmentations, AS, ctx.N, ctx.K, ctx.H, ctx.W, sigma_rgb, sigma_xy)
densecrf_loss -= np.dot(segmentations, AS)
# averaged by the number of images
densecrf_loss /= ctx.N
ctx.AS = np.reshape(AS, (ctx.N, ctx.K, ctx.H, ctx.W))
return Variable(torch.tensor([densecrf_loss]), requires_grad=True)
@staticmethod
def backward(ctx, grad_output):
grad_segmentation = -2*grad_output*torch.from_numpy(ctx.AS)/ctx.N
grad_segmentation=grad_segmentation.cuda()
grad_segmentation = torch.mul(grad_segmentation, ctx.ROIs.cuda())
return None, grad_segmentation, None, None, None
class DenseCRFLoss(nn.Module):
def __init__(self, weight, sigma_rgb, sigma_xy, scale_factor):
super(DenseCRFLoss, self).__init__()
self.weight = weight
self.sigma_rgb = sigma_rgb
self.sigma_xy = sigma_xy
self.scale_factor = scale_factor
def forward(self, images, segmentations, ROIs):
""" scale imag by scale_factor """
scaled_images = F.interpolate(images,scale_factor=self.scale_factor)
scaled_segs = F.interpolate(segmentations,scale_factor=self.scale_factor,mode='bilinear',align_corners=False)
scaled_ROIs = F.interpolate(ROIs.unsqueeze(1),scale_factor=self.scale_factor).squeeze(1)
return self.weight*DenseCRFLossFunction.apply(
scaled_images, scaled_segs, self.sigma_rgb, self.sigma_xy*self.scale_factor, scaled_ROIs)
def extra_repr(self):
return 'sigma_rgb={}, sigma_xy={}, weight={}, scale_factor={}'.format(
self.sigma_rgb, self.sigma_xy, self.weight, self.scale_factor
)
| 2,810 | 39.157143 | 117 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/mypath.py
|
import os
class Path(object):
@staticmethod
def db_root_dir(dataset):
data_root = os.environ['DATA_ROOT']
if dataset == 'pascal':
# folder that contains pascal/. It should have three subdirectories
# called "JPEGImages", "SegmentationClassAug", and "pascal_2012_scribble"
# containing RGB images, groundtruth, and scribbles respectively.
return data_root + '/VOCdevkit/VOC2012/'
elif dataset == 'sbd':
return data_root + '/path/to/datasets/benchmark_RELEASE/' # folder that contains dataset/.
elif dataset == 'cityscapes':
return data_root + '/path/to/datasets/cityscapes/' # foler that contains leftImg8bit/
elif dataset == 'coco':
return data_root + '/coco/'
else:
print('Dataset {} not available.'.format(dataset))
raise NotImplementedError
| 914 | 42.571429 | 103 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/GridCRFLoss.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sys
import math
from dataloaders.custom_transforms import denormalizeimage
from itertools import repeat
class BilinearPottsRelaxation(object):
@staticmethod
def comute(a, b):
return a * (1 - b)
class TVPottsRelaxation(object):
@staticmethod
def comute(a, b):
return torch.abs(a - b)
class SquaredPottsRelaxation(object):
@staticmethod
def comute(a, b):
return (a - b) ** 2
class GridPottsLoss(nn.Module):
def __init__(self, weight, scale_factor, relaxation=BilinearPottsRelaxation, neighbourhood=8):
super(GridPottsLoss, self).__init__()
self.weight = weight
self.scale_factor = scale_factor
self.rel = relaxation
SQRT2 = math.sqrt(2)
if neighbourhood == 4:
self.neighbourhood = [(0, 1, 1), (1, 0, 1)]
elif neighbourhood == 8:
self.neighbourhood = [(0, 1, 1), (1, 0, 1), (1, 1, SQRT2), (-1, 1, SQRT2)]
else:
raise Exception("Unknown neighbourhood: %d" % neighbourhood)
def forward(self, images, segmentations, ROIs):
if self.weight == 0:
self.max_weight = torch.tensor(1, device=segmentations.device)
result = torch.tensor(0, dtype=segmentations.dtype, device=segmentations.device)
return result
def get_diff(val, dx, dy, op=torch.sub):
shape = val.shape
h, w = shape[-2:]
return op(val[..., max(0,-dx):min(h,h-dx), max(0,-dy):min(w,w-dy)],
val[..., max(0,dx):min(h,h+dx), max(0,dy):min(w,w+dy)])
# return op(val[..., :h-dx, :w-dy], val[..., dx:, dy:])
# scale imag by scale_factor
scaled_images = F.interpolate(images,scale_factor=self.scale_factor)
scaled_segs = F.interpolate(segmentations,scale_factor=self.scale_factor,mode='bilinear',align_corners=False)
scaled_ROIs = F.interpolate(ROIs.unsqueeze(1),scale_factor=self.scale_factor)
use_cuda = segmentations.is_cuda
sigma2 = 0
count = 0
for dx, dy, _ in self.neighbourhood:
new_rois = get_diff(scaled_ROIs, dx, dy, torch.min)
rgb_diff = get_diff(scaled_images, dx, dy) ** 2 * new_rois
sigma2 += torch.sum(rgb_diff, (1,2,3), keepdim=True)
count += torch.sum(new_rois, (1,2,3), keepdim=True)
sigma2 = sigma2 / count
sigma2[count == 0] = 1
sigma2[sigma2 == 0] = 1
sigma2 *= 2
count = 0
loss = 0
max_weight = None
for dx, dy, f in self.neighbourhood:
new_rois = get_diff(scaled_ROIs, dx, dy, torch.min)
rgb_diff = torch.sum(get_diff(scaled_images, dx, dy) ** 2, 1, keepdim=True)
rgb_weight = new_rois * torch.exp(-rgb_diff / sigma2) / f
if use_cuda:
rgb_weight = rgb_weight.cuda()
loc_max_weight, _ = torch.max(rgb_weight[:,0,:,:], 1)
loc_max_weight, _ = torch.max(loc_max_weight, 1)
max_weight = loc_max_weight if max_weight is None else torch.max(loc_max_weight, max_weight)
pixel_loss = get_diff(scaled_segs, dx, dy, self.rel.comute) * rgb_weight
count += torch.sum(new_rois, (1,2,3), keepdim=True)
loss += torch.sum(pixel_loss, (1,2,3), keepdim=True)
self.max_weight = max_weight * self.weight
count[count == 0] = 1
if use_cuda:
count = count.cuda()
loss /= count
assert not torch.isnan(loss).any()
return torch.mean(loss) * self.weight
def extra_repr(self):
return 'weight={}, scale_factor={}, neighborhood={}, relaxation={}'.format(
self.weight, self.scale_factor, len(self.neighbourhood) * 2, self.rel
)
| 3,859 | 34.740741 | 117 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/train_with_dcr.py
|
import os, sys
import argparse
import math
import time
from tqdm import tqdm
import numpy as np
import torchvision
import torch
import torch.nn.functional as F
from mypath import Path
from dataloaders import make_data_loader
from dataloaders.utils import decode_seg_map_sequence, normalize_image_to_range
from dataloaders.custom_transforms import denormalizeimage
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from utils.loss import SegmentationLosses
from utils.calculate_weights import calculate_weigths_labels
from utils.metrics import Evaluator
from utils.proposal_generator import ProposalGeneratorFileCache
from utils.log_lin_softmax import log_lin_softmax
from train import TrainerBase
from DenseCRFLoss import DenseCRFLoss
import GridCRFLoss
def nll_error_loss(logits, seeds, error_labels, eps):
N, C = logits.shape[:2]
prob_log_mix = log_lin_softmax(eps / (C - 1), 1 - C * eps / (C - 1), logits, 1)
if seeds is not None:
prob_log_mix = prob_log_mix.permute(0,2,3,1)
prob_log_mix[seeds != 255, :] = F.log_softmax(logits.permute(0,2,3,1)[seeds != 255, :], -1)
prob_log_mix = prob_log_mix.permute(0,3,1,2)
celoss = F.nll_loss(prob_log_mix, error_labels[:,0].long(), ignore_index=255)
if seeds is not None:
celoss *= (error_labels != 255).float().sum() / (seeds != 255).sum()
celoss /= N
return celoss
class Trainer(TrainerBase):
def __init__(self, args):
self.evaluator_full = None
def ProposalGenerator(*args, **kwargs):
return ProposalGeneratorFileCache(*args, **kwargs, eps=0)
self.proposal_generator = None
if args.use_dcr:
if args.proposals is not None:
self.proposal_generator = ProposalGenerator(None, path=args.proposals)
print("No explicit proposal generator")
else:
if args.use_dcr == "AlphaExpansion":
import AlphaExpansion
generator = AlphaExpansion.AlphaExpansion(
max_iter=args.gc_max_iters,
potts_weight=args.potts_weight,
ce_weight=args.tr_weight,
restrict=args.tr_restricted,
scale=args.gc_scale
)
if args.alpha_use_edge_predictor:
from PIL import Image
old_generator = generator
path = args.alpha_use_edge_predictor
def _decorator(unary, image, *args, **kwargs):
img = Image.open(path + "/%05d.png" % kwargs['index'])
img = np.array(img, np.float32)
edges = torch.tensor(img) / 255.
return old_generator(unary, edges[None, None], *args, **kwargs)
generator = _decorator
self.proposal_generator = ProposalGenerator(generator)
# Define Dataloader
kwargs = {'num_workers': args.workers, 'pin_memory': True}
self.train_loader, self.val_loader, self.test_loader, nclass \
= make_data_loader(args, self.proposal_generator, **kwargs)
super().__init__(args, nclass)
# Define network
model = DeepLab(num_classes=self.nclass,
backbone=args.backbone,
output_stride=args.out_stride,
sync_bn=args.sync_bn,
freeze_bn=args.freeze_bn,
v=args.v)
self.freeze_bn = args.freeze_bn
train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},
{'params': model.get_10x_lr_params(), 'lr': args.lr * args.last_layer_mult_lr}]
#error model
if self.args.tr_error_model == 'Const':
self.error_prob = lambda: torch.tensor(self.args.tr_error_prob)
elif self.args.tr_error_model == 'Uniform':
x = -math.log(1/self.args.tr_error_prob - 1)
log_error_prob = torch.tensor(x, requires_grad=True)
train_params.append({'params': [log_error_prob], 'lr': args.lr})
self.error_prob = lambda: torch.sigmoid(log_error_prob)
elif self.args.tr_error_model == 'Poly0':
start = 1 - 1.0 / self.nclass
target = -math.log(1/self.args.tr_error_prob - 1)
power = 1
self.error_prob = lambda: \
start + (end - start) * (self.scheduler.T / self.scheduler.N) ** power
# Define Optimizer
optimizer = torch.optim.SGD(train_params, momentum=args.momentum,
weight_decay=args.weight_decay, nesterov=args.nesterov)
# Define Criterion
# whether to use class balanced weights
if args.use_balanced_weights:
classes_weights_path = os.path.join(Path.db_root_dir(args.dataset), args.dataset+'_classes_weights.npy')
if os.path.isfile(classes_weights_path):
weight = np.load(classes_weights_path)
else:
weight = calculate_weigths_labels(args.dataset, self.train_loader, self.nclass)
weight = torch.from_numpy(weight.astype(np.float32))
else:
weight = None
self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)
self.tr_extra_criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode='ce')
self.model, self.optimizer = model, optimizer
relaxation = {
'bilinear': GridCRFLoss.BilinearPottsRelaxation,
'squared': GridCRFLoss.SquaredPottsRelaxation,
'tv': GridCRFLoss.TVPottsRelaxation,
}[args.relaxation]
self.gridcrf = GridCRFLoss.GridPottsLoss(weight=args.potts_weight,
scale_factor=args.rloss_scale, relaxation=relaxation)
self.pce = nn.CrossEntropyLoss(ignore_index=255)
# Using cuda
if args.cuda:
self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
patch_replication_callback(self.model)
self.model = self.model.cuda()
# Resuming checkpoint
self.best_pred = 0.0
if args.resume is not None:
if not os.path.isfile(args.resume):
raise RuntimeError("=> no checkpoint found at '{}'" .format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
if args.cuda:
self.model.module.load_state_dict(checkpoint['state_dict'])
else:
self.model.load_state_dict(checkpoint['state_dict'])
if not args.ft:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.best_pred = checkpoint['best_pred']
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
# Clear start epoch if fine-tuning
if args.ft:
args.start_epoch = 0
if args.precompute_last_layer:
self.train_last_layer()
def training(self, epoch):
train_loss = 0.0
train_celoss = 0.0
train_crfloss = 0.0
train_smooth = 0.0
train_unary = 0.0
train_relaxed_un = 0.0
train_relaxed_sm = 0.0
self.evaluator.reset()
self.evaluator_full = None
if self.args.use_dcr:
train_smooth_p1 = 0.0
train_unary_p1 = 0.0
train_smooth_p1_upsample = 0.0
train_unary_p1_upsample = 0.0
if self.args.proposals:
self.proposal_generator.update_model(
self.model.module, True)
else:
if self.args.use_dcr == "AlphaExpansion":
self.proposal_generator.update_model(
self.model.module, False if epoch > 0 else None)
self.proposal_generator.alpha_expansion.max_iter = \
5 if epoch % self.args.hidden_update == 0 else 0
else:
self.proposal_generator.update_model(
self.model.module,
True if epoch % self.args.hidden_update != 0 else None
)
self.model.train()
if self.freeze_bn:
freeze_batchnorm(self.model)
print('\n=>Epoches %i, learning rate = %.4f, previous best = %.4f'
% (epoch, self.scheduler.actual_lr, self.best_pred))
num_img_tr = len(self.train_loader)
softmax = nn.Softmax(dim=1)
self.evaluator.reset()
tbar = tqdm(self.train_loader)
for i, sample in enumerate(tbar):
iter = i + num_img_tr * epoch
image, target_cpu = sample['image'], sample['label']
inside = target_cpu != 254
croppings = inside.float()
outside = target_cpu == 254
target_cpu[outside] = 255
image.transpose(0, 1)[:, outside] = 0
target = target_cpu
if self.args.cuda:
image, target = image.cuda(), target_cpu.cuda()
target_long = target.long()
self.scheduler(self.optimizer, i, epoch, self.best_pred)
self.optimizer.zero_grad()
output = self.model(image)
probs = softmax(output)
if self.args.use_dcr:
hidden = sample['label_proposal']
un, sm = sample['un'], sample['sm']
if self.args.cuda:
hidden = hidden.cuda()
hidden[hidden == 254] = 255
if self.args.tr_soften:
if hidden.ndim != 4:
hidden.unsqueeze_(1)
if hidden.shape[1] == 1:
bad = hidden == 255
hidden[bad] = 0
hidden = torch.zeros_like(output).scatter_(1, hidden.long(), 1)
hidden_perm = hidden.permute([0,2,3,1])
hidden_perm_shape = hidden_perm.shape
hidden_perm = hidden_perm.reshape(-1, hidden.shape[1])
hidden_perm[bad.reshape(-1)] = 255
hidden = hidden_perm.reshape(hidden_perm_shape).permute([0,3,1,2])
del hidden_perm
hidden[(hidden != 255) & (target[:,None] == 255)] *= 1 - self.args.tr_soften
hidden[(hidden != 255) & (target[:,None] == 255)] += self.args.tr_soften / output.shape[1]
if self.args.tr_error_model in ['Const', 'Uniform']:
eps = self.error_prob()
self.writer.add_scalar('train_iter/error_prob', eps.item(), iter)
celoss = nll_error_loss(output, target, hidden, eps)
else:
celoss = self.criterion(output, hidden)
if self.args.use_pce_at_tr > 0:
celoss += self.tr_extra_criterion(output, target) * self.args.use_pce_at_tr
loss = celoss + 0
else:
if self.args.relaxation_target == "Prob":
gridcrf_target = probs
elif self.args.relaxation_target == "LogProb":
gridcrf_target = F.log_softmax(output, dim=1)
elif self.args.relaxation_target == "Logits":
gridcrf_target = output
else:
raise KeyError
init_rel_sm = self.gridcrf(image, gridcrf_target, croppings.cuda())
if self.args.tr_error_model in ['Const', 'Uniform']:
eps = self.error_prob()
self.writer.add_scalar('train_iter/error_prob', eps.item(), iter)
init_rel_un = nll_error_loss(output, None, target[:,None], eps)
else:
init_rel_un = self.criterion(output, target_long)
loss = init_rel_sm + init_rel_un
train_loss += loss.item()
self.writer.add_scalar('train_iter/total_gap_loss', loss.item(), iter)
loss.backward()
self.optimizer.step()
if 'label_full' in sample:
self.evaluator_full = self.evaluator_full or Evaluator(self.nclass)
self.evaluator_full.add_batch(sample['label_full'].numpy(), torch.argmax(output, 1).cpu().numpy())
self.evaluator.add_batch(target_cpu.numpy(), torch.argmax(output, 1).cpu().numpy())
tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))
self.writer.add_scalar('train_iter/learning_rate', self.scheduler.actual_lr, iter)
self.writer.add_scalar('train_iter/loss', loss.item(), iter)
if self.args.v == '3.2' and i % max(1, num_img_tr // 5) == 0:
self.writer.add_histogram("train_iter/LastConvFeatNorm", list(self.model.module.decoder.last_conv.parameters())[0].norm(dim=0) , i)
# Show 5 * 9 inference results each epoch
if self.args.viz_images_per_epoch and i % max(1, num_img_tr // self.args.viz_images_per_epoch) == 0:
global_step = i + num_img_tr * epoch
prefix = "e%02d/" % epoch
if self.args.use_dcr:
self.summary.visualize_image(self.writer, self.args.dataset, image, hidden, output, i, prefix=prefix)
else:
self.summary.visualize_image(self.writer, self.args.dataset, image, target, output, i, prefix=prefix)
grid = torchvision.utils.make_grid(
decode_seg_map_sequence(target[:9].detach().cpu().numpy(), dataset=self.args.dataset),
3, normalize=False, range=(0, 255)
)
self.writer.add_image(prefix + "Seeds", grid, i)
self.writer.add_histogram(prefix + "PredHist", F.log_softmax(output, dim=1), i)
best_class = torch.argmax(probs[:9].detach(), dim=1)
best_prob = torch.max(probs[:9].detach(), dim=1, keepdim=True)[0]
grid = torchvision.utils.make_grid(
decode_seg_map_sequence(best_class.cpu().numpy(), dataset=self.args.dataset) * best_prob.cpu(),
3, normalize=False, range=(0, 255)
)
self.writer.add_image(prefix + "PredictionCertanty", grid, i)
self.writer.add_scalar('train/mIoU', self.evaluator.Mean_Intersection_over_Union(), epoch)
if self.evaluator_full:
self.writer.add_scalar('train/mIoU_full', self.evaluator_full.Mean_Intersection_over_Union(), epoch)
self.writer.add_scalar('train/loss', train_loss, epoch)
self.writer.add_scalar('train_gd/unary_loss', train_relaxed_un, epoch)
self.writer.add_scalar('train_gd/smooth_loss', train_relaxed_sm, epoch)
self.writer.add_scalar('train_gd/total_loss', train_relaxed_un + train_relaxed_sm, epoch)
if self.args.use_dcr:
self.writer.add_scalar('train_p1/unary_loss', train_unary_p1, epoch)
self.writer.add_scalar('train_p1/smooth_loss', train_smooth_p1, epoch)
self.writer.add_scalar('train_p1/total_loss', train_unary_p1 + train_smooth_p1, epoch)
self.writer.add_scalar('train_p1_up/unary_loss', train_unary_p1_upsample, epoch)
self.writer.add_scalar('train_p1_up/smooth_loss', train_smooth_p1_upsample, epoch)
self.writer.add_scalar('train_p1_up/total_loss', train_unary_p1_upsample + train_smooth_p1_upsample, epoch)
print('[Epoch: %d, numImages: %5d]' % (epoch + 1, i * self.args.batch_size + image.data.shape[0]))
print('Loss: %.3f' % train_loss)
sys.stdout.flush()
#if self.args.no_val:
if self.args.save_interval:
# save checkpoint every interval epoch
is_best = False
if (epoch + 1) % self.args.save_interval == 0:
self.saver.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
}, is_best, filename='checkpoint_epoch_{}.pth.tar'.format(str(epoch+1)))
def main():
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Training")
parser.add_argument('--backbone', type=str, default='resnet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--dataset', type=str, default='pascal',
choices=['pascal', 'coco', 'cityscapes'],
help='dataset name (default: pascal)')
parser.add_argument('--train_dataset_suffix', type=str, default='',
help='train mask directory suffix')
parser.add_argument('--use-sbd', action='store_true', default=False,
help='whether to use SBD dataset (default: True)')
parser.add_argument('--workers', type=int, default=4,
metavar='N', help='dataloader threads')
parser.add_argument('--base-size', type=int, default=513,
help='base image size')
parser.add_argument('--crop-size', type=int, default=513,
help='crop image size')
parser.add_argument('--sync-bn', type=bool, default=None,
help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze-bn', type=bool, default=False,
help='whether to freeze bn parameters (default: False)')
parser.add_argument('--loss-type', type=str, default='l2',
choices=['ce', 'focal', 'l2', 'l1', 'margin0'],
help='loss func type (default: l2)')
# training hyper params
parser.add_argument('--epochs', type=int, default=None, metavar='N',
help='number of epochs to train (default: auto)')
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='start epochs (default:0)')
parser.add_argument('--batch-size', type=int, default=None,
metavar='N', help='input batch size for \
training (default: auto)')
parser.add_argument('--test-batch-size', type=int, default=None,
metavar='N', help='input batch size for \
testing (default: auto)')
parser.add_argument('--use-balanced-weights', action='store_true', default=False,
help='whether to use balanced weights (default: False)')
# optimizer params
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (default: auto)')
parser.add_argument('--lr-scheduler', type=str, default='poly',
choices=['poly', 'step', 'cos'],
help='lr scheduler mode: (default: poly)')
parser.add_argument('--last-layer-mult-lr', type=float, default=10,
help='last layer learning rate multiplier')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=5e-4,
metavar='M', help='w-decay (default: 5e-4)')
parser.add_argument('--nesterov', action='store_true', default=False,
help='whether use nesterov (default: False)')
# cuda, seed and logging
parser.add_argument('--no-cuda', action='store_true', default=
False, help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default=None,
help='set the checkpoint name')
# finetuning pre-trained models
parser.add_argument('--ft', action='store_true', default=False,
help='finetuning on a different dataset')
# evaluation option
parser.add_argument('--eval-interval', type=int, default=1,
help='evaluuation interval (default: 1)')
parser.add_argument('--no-val', action='store_true', default=False,
help='skip validation during training')
# model saving option
parser.add_argument('--save-interval', type=int, default=None,
help='save model interval in epochs')
parser.add_argument('--viz-images-per-epoch', type=int, default=5,
help='Number of viz images to save per epoch')
# rloss options
parser.add_argument('--densecrfloss', type=float, default=0,
metavar='M', help='densecrf loss (default: 0)')
parser.add_argument('--rloss-scale',type=float,default=1.0,
help='scale factor for rloss input, choose small number for efficiency, domain: (0,1]')
parser.add_argument('--sigma-rgb',type=float,default=15.0,
help='DenseCRF sigma_rgb')
parser.add_argument('--sigma-xy',type=float,default=80.0,
help='DenseCRF sigma_xy')
parser.add_argument('--relaxation', type=str, default='bilinear',
choices=['bilinear', 'squared', 'tv'],
help='Potts relaxation type (default: bilinear)')
parser.add_argument('--relaxation-target', type=str, default='Prob',
choices=['Prob', 'Logits', 'LogProb'])
parser.add_argument('--full-supervision', action='store_true', default=False)
# dcr settings
parser.add_argument('--use-dcr', type=str, default=None,
choices=[None, 'AlphaExpansion'],
help='Type of DCR/Trust-Region to use')
parser.add_argument('--alpha-use-edge-predictor',type=str,default=None)
parser.add_argument('--proposals',type=str,default=None)
parser.add_argument('--tr-soften',type=float,default=0.0)
parser.add_argument('--tr-error-model',type=str,default=None,
choices=['Const', 'Uniform', 'Poly0', 'ADM'])
parser.add_argument('--tr-error-prob',type=float,default=0.5)
parser.add_argument('--gc-max-iters',type=int,default=5,
help='Maximum number of graph cut iterations')
parser.add_argument('--gc-scale',type=float,default=1,
help='Scale input to graph cut')
parser.add_argument('--potts-weight',type=float,default=1.0,
help='Weight of potts term')
parser.add_argument('--tr-weight',type=float,default=1.0,
help='Weight of TR term')
parser.add_argument('--tr-restricted', action='store_true', default=False)
parser.add_argument('--hidden-update',type=int,default=None,
help='Epoch frequency of phase1 solution updates')
parser.add_argument('--use-pce-at-tr', type=float, default=0,
help='whether to use SBD dataset (default: 0)')
parser.add_argument('--single-image-training', type=int, default=None)
parser.add_argument('--train-shuffle', type=int, default=1)
parser.add_argument('--no-aug', action='store_true', default=False)
parser.add_argument('--use-linear-relaxation', action='store_true', default=False)
parser.add_argument('--entropy-loss', type=float, default=0.0)
parser.add_argument('--precompute-last-layer', action='store_true', default=False)
parser.add_argument('--v', type=str, default=None)
args = parser.parse_args()
args.train_shuffle = bool(args.train_shuffle)
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
if args.sync_bn is None:
if args.cuda and len(args.gpu_ids) > 1:
args.sync_bn = True
else:
args.sync_bn = False
# default settings for epochs, batch_size and lr
if args.epochs is None:
epoches = {
'coco': 30,
'cityscapes': 200,
'pascal': 50,
}
args.epochs = epoches[args.dataset.lower()]
if args.batch_size is None:
args.batch_size = 4 * len(args.gpu_ids)
if args.test_batch_size is None:
args.test_batch_size = args.batch_size
if args.lr is None:
lrs = {
'coco': 0.1,
'cityscapes': 0.01,
'pascal': 0.007,
}
args.lr = lrs[args.dataset.lower()] / (4 * len(args.gpu_ids)) * args.batch_size
if args.checkname is None:
args.checkname = 'deeplab-'+str(args.backbone)
print(args)
torch.manual_seed(args.seed)
trainer = Trainer(args)
print('Starting Epoch:', trainer.args.start_epoch)
print('Total Epoches:', trainer.args.epochs)
if not trainer.args.no_val:
trainer.validation(0)
for epoch in range(trainer.args.start_epoch, trainer.args.epochs):
start_time = time.time()
trainer.training(epoch)
trainer.writer.add_scalar('train/time_per_epoch', time.time() - start_time, epoch)
if not trainer.args.no_val and epoch % args.eval_interval == (args.eval_interval - 1):
trainer.validation(epoch + 1)
trainer.writer.close()
if __name__ == "__main__":
main()
| 26,384 | 46.455036 | 147 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/train_withdensecrfloss.py
|
import argparse
import os, time
import numbers
import json
import numpy as np
from tqdm import tqdm
from mypath import Path
from dataloaders import make_data_loader
from dataloaders.custom_transforms import denormalizeimage
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from utils.loss import SegmentationLosses
from utils.calculate_weights import calculate_weigths_labels
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
from train import TrainerBase
from DenseCRFLoss import DenseCRFLoss
import GridCRFLoss
class Trainer(TrainerBase):
def __init__(self, args):
self.args = args
# Define Saver
self.saver = Saver(args)
self.saver.save_experiment_config()
# Define Tensorboard Summary
self.summary = TensorboardSummary(self.saver.experiment_dir)
self.writer = self.summary.create_summary()
self.writer.add_text("Args/experiment_dir", self.saver.experiment_dir)
for key, value in vars(args).items():
if isinstance(value, numbers.Number):
self.writer.add_scalar("Args/" + key, value)
else:
self.writer.add_text("Args/" + key, str(value))
self.writer.add_text("Args/All", json.dumps(vars(args), indent=4, sort_keys=True))
# Define Dataloader
kwargs = {'num_workers': args.workers, 'pin_memory': True}
self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)
super().__init__(args, self.nclass)
# Define network
model = DeepLab(num_classes=self.nclass,
backbone=args.backbone,
output_stride=args.out_stride,
sync_bn=args.sync_bn,
freeze_bn=args.freeze_bn,
v=args.v)
train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},
{'params': model.get_10x_lr_params(), 'lr': args.lr * args.last_layer_mult_lr}]
# Define Optimizer
optimizer = torch.optim.SGD(train_params, momentum=args.momentum,
weight_decay=args.weight_decay, nesterov=args.nesterov)
# Define Criterion
# whether to use class balanced weights
if args.use_balanced_weights:
classes_weights_path = os.path.join(Path.db_root_dir(args.dataset), args.dataset+'_classes_weights.npy')
if os.path.isfile(classes_weights_path):
weight = np.load(classes_weights_path)
else:
weight = calculate_weigths_labels(args.dataset, self.train_loader, self.nclass)
weight = torch.from_numpy(weight.astype(np.float32))
else:
weight = None
self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)
self.model, self.optimizer = model, optimizer
self.extra_losses = []
if args.densecrfloss > 0:
self.extra_losses.append(
DenseCRFLoss(weight=args.densecrfloss, sigma_rgb=args.sigma_rgb, sigma_xy=args.sigma_xy, scale_factor=args.rloss_scale)
)
if args.bilineargridpottsloss > 0:
self.extra_losses.append(
GridCRFLoss.GridPottsLoss(weight=args.bilineargridpottsloss, scale_factor=args.rloss_scale, relaxation=GridCRFLoss.BilinearPottsRelaxation)
)
if args.squaregridpottsloss > 0:
self.extra_losses.append(
GridCRFLoss.GridPottsLoss(weight=args.squaregridpottsloss, scale_factor=args.rloss_scale, relaxation=GridCRFLoss.SquaredPottsRelaxation)
)
if args.tvgridpottsloss > 0:
self.extra_losses.append(
GridCRFLoss.GridPottsLoss(weight=args.tvgridpottsloss, scale_factor=args.rloss_scale, relaxation=GridCRFLoss.TVPottsRelaxation)
)
for loss in self.extra_losses:
print(loss)
# Define Evaluator
self.evaluator = Evaluator(self.nclass)
# Define lr scheduler
self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr,
args.epochs, len(self.train_loader))
# Using cuda
if args.cuda:
self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
patch_replication_callback(self.model)
self.model = self.model.cuda()
# Resuming checkpoint
self.best_pred = 0.0
if args.resume is not None:
if not os.path.isfile(args.resume):
raise RuntimeError("=> no checkpoint found at '{}'" .format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
if args.cuda:
self.model.module.load_state_dict(checkpoint['state_dict'])
else:
self.model.load_state_dict(checkpoint['state_dict'])
if not args.ft:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.best_pred = checkpoint['best_pred']
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
# Clear start epoch if fine-tuning
if args.ft:
args.start_epoch = 0
if args.precompute_last_layer:
self.train_last_layer()
def training(self, epoch):
train_loss = 0.0
train_celoss = 0.0
train_crfloss = 0.0
self.model.train()
tbar = tqdm(self.train_loader)
num_img_tr = len(self.train_loader)
softmax = nn.Softmax(dim=1)
for i, sample in enumerate(tbar):
image, target = sample['image'], sample['label']
croppings = (target!=254).float()
target[target==254]=255
if self.args.cuda:
image, target = image.cuda(), target.cuda()
self.scheduler(self.optimizer, i, epoch, self.best_pred)
self.optimizer.zero_grad()
output = self.model(image)
celoss = self.criterion(output, target)
loss = celoss + 0
if len(self.extra_losses) > 0:
probs = softmax(output)
denormalized_image = denormalizeimage(sample['image'], mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
for loss_fun in self.extra_losses:
crfloss = loss_fun(denormalized_image,probs,croppings)
if self.args.cuda:
crfloss = crfloss.cuda()
loss += crfloss[0]
train_crfloss += crfloss.item()
loss.backward()
self.optimizer.step()
train_loss += loss.item()
train_celoss += celoss.item()
tbar.set_description('Train loss: %.3f = CE loss %.3f + CRF loss %.3f'
% (train_loss / (i + 1),train_celoss / (i + 1),train_crfloss / (i + 1)))
self.writer.add_scalar('train/total_loss_iter', loss.item(), i + num_img_tr * epoch)
# Show 10 * 3 inference results each epoch
if self.args.viz_images_per_epoch and i % (num_img_tr // self.args.viz_images_per_epoch) == 0:
global_step = i + num_img_tr * epoch
self.summary.visualize_image(self.writer, self.args.dataset, image, target, output, global_step)
self.writer.add_scalar('train/total_loss_epoch', train_loss, epoch)
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))
print('Loss: %.3f' % train_loss)
#if self.args.no_val:
if self.args.save_interval:
# save checkpoint every interval epoch
is_best = False
if (epoch + 1) % self.args.save_interval == 0:
self.saver.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
}, is_best, filename='checkpoint_epoch_{}.pth.tar'.format(str(epoch+1)))
def main():
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Training")
parser.add_argument('--backbone', type=str, default='resnet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--dataset', type=str, default='pascal',
choices=['pascal', 'coco', 'cityscapes'],
help='dataset name (default: pascal)')
parser.add_argument('--train_dataset_suffix', type=str, default='',
help='train mask directory suffix')
parser.add_argument('--use-sbd', action='store_true', default=False,
help='whether to use SBD dataset (default: True)')
parser.add_argument('--workers', type=int, default=4,
metavar='N', help='dataloader threads')
parser.add_argument('--base-size', type=int, default=513,
help='base image size')
parser.add_argument('--crop-size', type=int, default=513,
help='crop image size')
parser.add_argument('--sync-bn', type=bool, default=None,
help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze-bn', type=bool, default=False,
help='whether to freeze bn parameters (default: False)')
parser.add_argument('--loss-type', type=str, default='ce',
choices=['ce', 'focal'],
help='loss func type (default: ce)')
# training hyper params
parser.add_argument('--epochs', type=int, default=None, metavar='N',
help='number of epochs to train (default: auto)')
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='start epochs (default:0)')
parser.add_argument('--batch-size', type=int, default=None,
metavar='N', help='input batch size for \
training (default: auto)')
parser.add_argument('--test-batch-size', type=int, default=None,
metavar='N', help='input batch size for \
testing (default: auto)')
parser.add_argument('--use-balanced-weights', action='store_true', default=False,
help='whether to use balanced weights (default: False)')
# optimizer params
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (default: auto)')
parser.add_argument('--lr-scheduler', type=str, default='poly',
choices=['poly', 'step', 'cos'],
help='lr scheduler mode: (default: poly)')
parser.add_argument('--last-layer-mult-lr', type=float, default=10,
help='last layer learning rate multiplier')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=5e-4,
metavar='M', help='w-decay (default: 5e-4)')
parser.add_argument('--nesterov', action='store_true', default=False,
help='whether use nesterov (default: False)')
# cuda, seed and logging
parser.add_argument('--no-cuda', action='store_true', default=
False, help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default=None,
help='set the checkpoint name')
# finetuning pre-trained models
parser.add_argument('--ft', action='store_true', default=False,
help='finetuning on a different dataset')
# evaluation option
parser.add_argument('--eval-interval', type=int, default=1,
help='evaluuation interval (default: 1)')
parser.add_argument('--no-val', action='store_true', default=False,
help='skip validation during training')
# model saving option
parser.add_argument('--save-interval', type=int, default=None,
help='save model interval in epochs')
parser.add_argument('--viz-images-per-epoch', type=int, default=5,
help='Number of viz images to save per epoch')
# rloss options
parser.add_argument('--densecrfloss', type=float, default=0,
metavar='M', help='densecrf loss (default: 0)')
parser.add_argument('--rloss-scale',type=float,default=1.0,
help='scale factor for rloss input, choose small number for efficiency, domain: (0,1]')
parser.add_argument('--sigma-rgb',type=float,default=15.0,
help='DenseCRF sigma_rgb')
parser.add_argument('--sigma-xy',type=float,default=80.0,
help='DenseCRF sigma_xy')
parser.add_argument('--bilineargridpottsloss', type=float, default=0,
help='bilinear relaxation of sparse/grid Potts loss (default: 0)')
parser.add_argument('--squaregridpottsloss', type=float, default=0,
help='square relaxation of sparse/grid Potts loss (default: 0)')
parser.add_argument('--tvgridpottsloss', type=float, default=0,
help='TV (abs) relaxation of sparse/grid Potts loss (default: 0)')
parser.add_argument('--precompute-last-layer', action='store_true', default=False)
parser.add_argument('--v', type=str, default=None)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
if args.sync_bn is None:
if args.cuda and len(args.gpu_ids) > 1:
args.sync_bn = True
else:
args.sync_bn = False
# default settings for epochs, batch_size and lr
if args.epochs is None:
epoches = {
'coco': 30,
'cityscapes': 200,
'pascal': 50,
}
args.epochs = epoches[args.dataset.lower()]
if args.batch_size is None:
args.batch_size = 4 * len(args.gpu_ids)
if args.test_batch_size is None:
args.test_batch_size = args.batch_size
if args.lr is None:
lrs = {
'coco': 0.1,
'cityscapes': 0.01,
'pascal': 0.007,
}
args.lr = lrs[args.dataset.lower()] / (4 * len(args.gpu_ids)) * args.batch_size
if args.checkname is None:
args.checkname = 'deeplab-'+str(args.backbone)
print(args)
torch.manual_seed(args.seed)
trainer = Trainer(args)
print('Starting Epoch:', trainer.args.start_epoch)
print('Total Epoches:', trainer.args.epochs)
if not trainer.args.no_val:
trainer.validation(0)
for epoch in range(trainer.args.start_epoch, trainer.args.epochs):
start_time = time.time()
trainer.training(epoch)
trainer.writer.add_scalar('train/time_per_epoch', time.time() - start_time, epoch)
if not trainer.args.no_val and epoch % args.eval_interval == (args.eval_interval - 1):
trainer.validation(epoch + 1)
trainer.writer.close()
if __name__ == "__main__":
main()
| 16,448 | 44.31405 | 155 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/train.py
|
import numbers
import json
from tqdm import tqdm
import torch, torchvision
import torch.nn.functional as F
from modeling.deeplab import *
from dataloaders.utils import decode_seg_map_sequence, normalize_image_to_range
from dataloaders import make_data_loader
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
class TrainerBase(object):
def __init__(self, args, nclass):
self.args = args
self.nclass = nclass
# Define Saver
self.saver = Saver(args)
self.saver.save_experiment_config()
# Define Tensorboard Summary
self.summary = TensorboardSummary(self.saver.experiment_dir)
self.writer = self.summary.create_summary()
# Log program arguments
self.writer.add_text("Args/experiment_dir", self.saver.experiment_dir)
for key, value in vars(args).items():
if isinstance(value, numbers.Number):
self.writer.add_scalar("Args/" + key, value)
else:
self.writer.add_text("Args/" + key, str(value))
self.writer.add_text("Args/All", json.dumps(vars(args), indent=4, sort_keys=True))
# Define Evaluator
self.evaluator = Evaluator(self.nclass)
# Define lr scheduler
self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr,
args.epochs,
# args.hidden_update or args.epochs,
len(self.train_loader))
def train_last_layer(self):
print('\n=>Computing the last layer')
self.model.eval()
kwargs = {'num_workers': self.args.workers, 'pin_memory': True}
train_loader = make_data_loader(self.args, None, **kwargs)[0]
num_img_tr = len(train_loader)
tbar = tqdm(train_loader)
features = None
count = None
with torch.no_grad():
for i, sample in enumerate(tbar):
image, target_cpu = sample['image'], sample['label']
inside = target_cpu != 254
croppings = inside.float()
outside = target_cpu == 254
target_cpu[outside] = 255
image.transpose(0, 1)[:, outside] = 0
target = target_cpu
if self.args.cuda:
image, target = image.cuda(), target_cpu.cuda()
target_long = target.long()
output = self.model(image)
last_layer = self.model.module.decoder.last_layer
if features is None:
features = torch.zeros(
[last_layer.shape[1], self.nclass],
device=output.device
)
features2 = torch.zeros_like(features)
count = torch.zeros(
[1, self.nclass],
device=output.device
)
for f, t in zip(last_layer, target_long):
f = F.interpolate(f.unsqueeze(0), size=image.size()[2:], mode='bilinear', align_corners=True).squeeze(0)
f2 = f.reshape((f.shape[0], -1))
t = t.reshape((-1,))
good = t < 255
f2 = f2[:, good]
t = t[good]
features.scatter_add_(1, t[None,:].repeat(f2.shape[0], 1), f2)
features2.scatter_add_(1, t[None,:].repeat(f2.shape[0], 1), f2 ** 2)
count += torch.bincount(t, minlength=self.nclass)[None,:]
tbar.set_description('Computing last layer features, norm of sum: %f' % features.norm())
features /= count
# features2 -= (features2 - features ** 2 * count).sum(dim=1, keepdim=True) / count.sum()
# features2 = features2 / count - features ** 2
features2 = (features2 - features ** 2 * count).sum(dim=1, keepdim=True) / count.sum()
print("Sigma shape:", features2.shape)
print("Sigma range:", features2.min(), features2.max())
print("Weight norm per class:", features.norm(dim=0) ** 2 / 2)
print("Weight norm per feature:", features.norm(dim=1) ** 2 / 2)
features2 = 0.5 * features2 ** -1
for name, param in self.model.module.decoder.last_conv[-1].named_parameters():
if name == 'weight':
param.data[...] = (features2 * features).transpose(0,1)[..., None, None]
elif name == 'bias':
param.data[...] = -(features2 ** 0.5 * features).norm(dim=0) ** 2 / 2
print(name, type(param), param.size())
def validation(self, epoch):
self.model.eval()
self.evaluator.reset()
tbar = tqdm(self.val_loader, desc='\r')
test_loss = 0.0
for i, sample in enumerate(tbar):
image, target = sample['image'], sample['label']
target[target==254]=255
if self.args.cuda:
image, target = image.cuda(), target.cuda()
with torch.no_grad():
output = self.model(image)
loss = self.criterion(output, target.byte())
test_loss += loss.item()
tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))
pred = output.data.cpu()
target = target.cpu()
pred = torch.argmax(pred, axis=1)
if i < self.args.viz_images_per_epoch:
vis_image = normalize_image_to_range(image.cpu())[0]
vis_gt = decode_seg_map_sequence(target, dataset=self.args.dataset)[0]
vis_pred = decode_seg_map_sequence(pred, dataset=self.args.dataset)[0]
grid = torchvision.utils.make_grid([vis_image, vis_gt, vis_pred], 1)
self.writer.add_image('val/Sample_%01d' % i, grid, epoch)
# Add batch sample into evaluator
self.evaluator.add_batch(target.numpy(), pred.numpy())
# Fast test during the training
Acc = self.evaluator.Pixel_Accuracy()
Acc_class = self.evaluator.Pixel_Accuracy_Class()
mIoU = self.evaluator.Mean_Intersection_over_Union()
FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()
self.writer.add_scalar('val/total_loss_epoch', test_loss, epoch)
self.writer.add_scalar('val/mIoU', mIoU, epoch)
self.writer.add_scalar('val/Acc', Acc, epoch)
self.writer.add_scalar('val/Acc_class', Acc_class, epoch)
self.writer.add_scalar('val/fwIoU', FWIoU, epoch)
print('Validation:')
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.val_loader.batch_size + image.data.shape[0]))
print("Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(Acc, Acc_class, mIoU, FWIoU))
print('Loss: %.3f' % test_loss)
new_pred = mIoU
if new_pred > self.best_pred:
is_best = True
self.best_pred = new_pred
# self.saver.save_checkpoint({
# 'epoch': epoch,
# 'state_dict': self.model.module.state_dict(),
# 'optimizer': self.optimizer.state_dict(),
# 'best_pred': self.best_pred,
# }, is_best)
| 7,395 | 41.751445 | 124 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/AlphaExpansion.py
|
import torch
import alphaexpansion
import torch.nn as nn
from torch.autograd import Function
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import sys, warnings
from datetime import datetime
class AlphaExpansion(nn.Module):
def __init__(self, max_iter, potts_weight, ce_weight=1, restrict=False, scale=1):
super(AlphaExpansion, self).__init__()
self.max_iter, self.potts_weight, self.ce_weight = max_iter, potts_weight, ce_weight
self.restrict = restrict
self.scale_factor = scale
print("AlphaExpansion module is set up")
def forward(self, unary, images, ROI, seeds, x0=None, **_):
is_cuda = unary.is_cuda
if ROI.ndim < 4:
ROI = ROI[:, None, :, :]
if seeds.ndim < 4:
seeds = seeds[:, None, :, :]
assert self.scale_factor <= 1
if self.scale_factor < 1:
warnings.warn("AlphaExpansion: scale_factor is not 1, the interpolated result may suffer from rounding errors")
orig_size = images.shape[-2:]
kwargs = {"scale_factor": self.scale_factor, "recompute_scale_factor": False}
images = F.interpolate(images, **kwargs)
unary = F.interpolate(unary, mode='bilinear', align_corners=False, **kwargs)
ROI = F.interpolate(ROI, **kwargs)
seeds = F.interpolate(seeds.float(), **kwargs).byte()
if x0 is not None:
x0 = F.interpolate(x0.float(), **kwargs).byte()
x0[seeds != 255] = seeds[seeds != 255]
unary = unary * self.ce_weight
if self.restrict:
N, C = unary.shape[:2]
if N > 1:
present = seeds.clone().long().reshape(N, -1)
present[present == 255] = 0
restricted = torch.zeros([N, C]).scatter(1, present, 1) == 0
unary[restricted] = self.potts_weight * 9
else:
present = seeds.long().unique()
if (present != 255).byte().sum() <= 1:
present = torch.arange(C+1, dtype=torch.int64)
present[-1] = 255
to_new_label = torch.cumsum(
torch.zeros(256).scatter(0, present, 1),
dim=0
) - 1
unlabeled = to_new_label[255].item()
to_new_label[255] = 255
seeds = torch.index_select(to_new_label, 0, seeds.reshape(-1).long()).reshape(seeds.shape)
if x0 is not None:
x0 = torch.index_select(to_new_label, 0, x0.reshape(-1).long()).reshape(x0.shape)
if present[-1] == 255:
unary = unary[:, present[:-1], ...]
else:
unary = unary[:, present, ...]
out = np.zeros(seeds.shape, np.float32)
unary_energy = np.zeros(seeds.shape[:1], np.float32)
smooth_energy = np.zeros(seeds.shape[:1], np.float32)
images, ROI, seeds, unary = [x.detach().cpu().numpy() for x in [images, ROI, seeds, unary]]
if x0 is None:
x0 = np.zeros(seeds.shape, np.float32)
# x0 = np.argmin(unary, 1)[:,None,:,:].astype(np.float32)
else:
x0 = x0.numpy()
alphaexpansion.run_expansion(
images, x0, ROI, seeds, unary,
self.max_iter, self.potts_weight, out, unary_energy, smooth_energy)
out[ROI == 0] = 255
result = torch.tensor(out)
if self.restrict:
if N > 1:
present2 = result.reshape(N, -1)
present2[present == 255] = 0
restricted2 = torch.zeros([N, C]).scatter(1, present2.long(), 1) == 0
if (restricted & ~restricted2).any():
print ("Failed to respect the label restriction")
else:
result[result == 255] = unlabeled
result = present[result.reshape(-1).long()].reshape(result.shape)
if is_cuda:
result = result.cuda()
if self.scale_factor < 1:
result = F.interpolate(result.float(), size=orig_size)
return result.byte(), torch.tensor(unary_energy), torch.tensor(smooth_energy)
| 4,248 | 41.49 | 123 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/dataloaders/custom_transforms.py
|
import torch
import torch.nn.functional as F
import random
import numpy as np
from PIL import Image, ImageOps, ImageFilter
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):
self.mean = torch.tensor(mean)[:,None,None]
self.std = torch.tensor(std)[:,None,None]
def __call__(self, sample):
result = {}
for im_key in sample:
if 'image' in im_key:
img = sample[im_key]
img /= 255.0
img -= self.mean
img /= self.std
result[im_key] = img
else:
result[im_key] = sample[im_key]
return result
class Denormalize(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):
self.mean = torch.tensor(mean)[:,None,None] * 255
self.std = torch.tensor(std)[:,None,None] * 255
def __call__(self, sample):
result = {}
for im_key in sample:
if 'image' in im_key:
img = sample[im_key] * self.std + self.mean
torch.clamp(img, 0, 255, out=img)
result[im_key] = img
else:
result[im_key] = sample[im_key]
return result
class NormalizeImage(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):
self.mean = mean
self.std = std
def __call__(self, img):
img = np.array(img).astype(np.float32)
img /= 255.0
img -= self.mean
img /= self.std
return img
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
result = {}
for key in sample:
val = sample[key]
if not torch.is_tensor(val):
val = np.array(val).astype(np.float32)
if 'image' in key:
val = val.transpose((2, 0, 1))
val = torch.from_numpy(val).float()
result[key] = val
return result
class ToPIL(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
result = {}
for key in sample:
val = sample[key]
val = val.numpy().astype(np.float32)
if 'image' in key:
val = val.transpose((2, 0, 1))
val = torch.from_numpy(val).float()
result[key] = val
return result
class ToTensorImage(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, img):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
img = np.array(img).astype(np.float32).transpose((2, 0, 1))
img = torch.from_numpy(img).float()
return img
class RandomHorizontalFlip(object):
def __call__(self, sample):
if random.random() < 0.5:
return sample
return { key: self.flip(sample[key]) if 'image' in key or 'label' in key else sample[key]
for key in sample }
def flip(self, datum):
if torch.is_tensor(datum):
return datum.flip(-1)
return datum.transpose(Image.FLIP_LEFT_RIGHT)
class RandomRotate(object):
def __init__(self, degree):
self.degree = degree
def __call__(self, sample):
rotate_degree = random.uniform(-1*self.degree, self.degree)
return { key: sample[key].rotate(rotate_degree, Image.BILINEAR) if 'image' in key or 'label' in key else sample[key]
for key in sample }
class RandomGaussianBlur(object):
def __call__(self, sample):
if random.random() < 0.5:
return sample
result = {}
for key in sample:
val = sample[key]
if key == 'image':
val = val.filter(ImageFilter.GaussianBlur(
radius=random.random()))
result[key] = val
return result
class RandomScaleCrop(object):
def __init__(self, base_size, crop_size, label_fill=254, image_fill=0, random=True):
self.base_size = base_size
self.crop_size = crop_size
self.label_fill = label_fill
self.image_fill = image_fill
self.random = random
def __call__(self, sample):
# random scale (short edge)
w, h = sample['image'].size
short_size = min(w, h)
if self.random:
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
for key in sample:
if 'image' in key:
sample[key] = sample[key].resize((ow, oh), Image.BILINEAR)
elif 'label' in key:
if torch.is_tensor(sample[key]):
sample[key] = F.interpolate(sample[key][None,...], size=(oh, ow))[0]
else:
sample[key] = sample[key].resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < self.crop_size:
padh = self.crop_size - oh if oh < self.crop_size else 0
padw = self.crop_size - ow if ow < self.crop_size else 0
for key in sample:
fill = None
if 'image' in key:
fill = self.image_fill
elif 'label' in key:
fill = self.label_fill
if fill is not None:
padw2, padh2 = padw // 2, padh // 2
if torch.is_tensor(sample[key]):
sample[key] = F.pad(
sample[key],
(padw2, padw - padw2, padh2, padh - padh2),
value=fill,
)
else:
sample[key] = ImageOps.expand(
sample[key],
border=(padw2, padh2, padw - padw2, padh - padh2),
fill=fill,
)
# random crop crop_size
w, h = sample['image'].size
x1 = random.randint(0, w - self.crop_size) if self.random else (w - self.crop_size) // 2
y1 = random.randint(0, h - self.crop_size) if self.random else (h - self.crop_size) // 2
for key in sample:
fill = None
if 'image' in key or 'label' in key:
if torch.is_tensor(sample[key]):
sample[key] = sample[key][..., y1:y1 + self.crop_size, x1:x1 + self.crop_size]
else:
sample[key] = sample[key].crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
return sample
class Pad(object):
def __init__(self, crop_size, im_fill=0, lb_fill=254):
self.crop_size = crop_size
self.lb_fill = lb_fill
self.im_fill = im_fill
def __call__(self, sample):
result = {}
for key in sample:
if 'label' not in key and 'image' not in key:
result[key] = sample[key]
continue
arr = sample[key]
oh, ow = arr.shape[-2:]
fill = self.lb_fill if 'label' in key else self.im_fill
padh = self.crop_size - oh if oh < self.crop_size else 0
padw = self.crop_size - ow if ow < self.crop_size else 0
arr = F.pad(
arr,
(padw//2, padw - padw//2, padh//2, padh - padh//2),
value=fill,
)
result[key] = arr
return result
class FixScaleCrop(object):
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
w, h = img.size
if w > h:
oh = self.crop_size
ow = int(1.0 * w * oh / h)
else:
ow = self.crop_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = img.size
x1 = int(round((w - self.crop_size) / 2.))
y1 = int(round((h - self.crop_size) / 2.))
img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
return {'image': img,
'label': mask}
class FixScaleCropImage(object):
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, img):
w, h = img.size
if w > h:
oh = self.crop_size
ow = int(1.0 * w * oh / h)
else:
ow = self.crop_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
# center crop
w, h = img.size
x1 = int(round((w - self.crop_size) / 2.))
y1 = int(round((h - self.crop_size) / 2.))
img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
return img
class FixedResize(object):
def __init__(self, size):
self.size = (size, size) # size: (h, w)
def __call__(self, sample):
result = {}
for key in sample:
if 'label' not in key and 'image' not in key:
result[key] = sample[key]
continue
arr = sample[key]
method = Image.NEAREST if 'label' in key else Image.BILINEAR
arr = arr.resize(self.size, method)
result[key] = arr
return result
def denormalizeimage(images, mean=(0., 0., 0.), std=(1., 1., 1.)):
"""Denormalize tensor images with mean and standard deviation.
Args:
images (tensor): N*C*H*W
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
images = images.cpu().numpy()
# N*C*H*W to N*H*W*C
images = images.transpose((0,2,3,1))
images *= std
images += mean
images *= 255.0
# N*H*W*C to N*C*H*W
images = images.transpose((0,3,1,2))
return torch.tensor(images)
| 10,938 | 31.363905 | 124 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/dataloaders/utils.py
|
import numpy as np
import torch
def decode_seg_map_sequence(label_masks, dataset='pascal'):
rgb_masks = []
for label_mask in label_masks:
rgb_mask = decode_segmap(label_mask, dataset)
rgb_masks.append(rgb_mask)
rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2]))
return rgb_masks
def decode_segmap(label_mask, dataset, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
if dataset == 'pascal' or dataset == 'coco':
n_classes = 21
label_colours = get_pascal_labels()
elif dataset == 'cityscapes':
n_classes = 19
label_colours = get_cityscapes_labels()
else:
raise NotImplementedError
M, N = label_mask.shape[-2:]
r = np.ones([M, N], dtype=np.float32)
g = np.ones([M, N], dtype=np.float32)
b = np.ones([M, N], dtype=np.float32)
for ll in range(0, n_classes):
if label_mask.ndim == 3:
r += label_mask[ll] * label_colours[ll, 0]
g += label_mask[ll] * label_colours[ll, 1]
b += label_mask[ll] * label_colours[ll, 2]
else:
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros([M, N, 3], dtype=np.float32)
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
import matplotlib.pyplot as plt
plt.imshow(rgb)
plt.show()
else:
return rgb
def encode_segmap(mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(get_pascal_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
def get_cityscapes_labels():
return np.array([
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32]])
def get_pascal_labels():
"""Load the mapping that associates pascal classes with label colors
Returns:
np.ndarray with dimensions (21, 3)
"""
return np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]])
def normalize_image_to_range(image, range=(0, 1)):
min = image.min(dim=(-1))[0].min(dim=-1)[0][...,None,None]
max = image.max(dim=(-1))[0].max(dim=-1)[0][...,None,None]
result = range[0] + range[1] * (image - min) / (max - min)
result[max[...,0,0] == min[...,0,0], :, :] = range[0]
return result
| 3,959 | 33.137931 | 84 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/dataloaders/__init__.py
|
from torch.utils.data import DataLoader, dataset
from dataloaders.datasets import combine_dbs, indexed_dataset
import numpy as np
def make_data_loader(args, proposal_generator=None, **kwargs):
def wrap_dataset(set):
if 'single_image_training' in args and args.single_image_training is not None:
if args.single_image_training >= 0:
indices = [args.single_image_training]
else:
state = np.random.RandomState(575393350)
indices = state.choice(len(set), -args.single_image_training, replace=False)
print("Training on subset of images %s" % (indices,))
set = dataset.Subset(set, indices)
return indexed_dataset.IndexedDataset(set)
if 'train_shuffle' in args:
shuffle = args.train_shuffle
else:
shuffle = False
if args.dataset == 'pascal':
from dataloaders.datasets import pascal
if proposal_generator is None:
train_set = pascal.VOCSegmentation(args, split='train')
else:
train_set = pascal.VOCProposalSegmentation(proposal_generator, args, split='train')
val_set = pascal.VOCSegmentation(args, split='val')
if args.use_sbd:
sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set])
num_class = train_set.NUM_CLASSES
train_set = wrap_dataset(train_set)
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=shuffle, **kwargs)
val_loader = DataLoader(val_set, batch_size=1, shuffle=False, **kwargs)
test_loader = None
elif args.dataset == 'cityscapes':
from dataloaders.datasets import cityscapes
train_set = cityscapes.CityscapesSegmentation(args, split='train')
val_set = cityscapes.CityscapesSegmentation(args, split='val')
test_set = cityscapes.CityscapesSegmentation(args, split='test')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(wrap_dataset(train_set), batch_size=args.batch_size, shuffle=shuffle, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)
elif args.dataset == 'coco':
from dataloaders.datasets import coco
train_set = coco.COCOSegmentation(args, split='train')
val_set = coco.COCOSegmentation(args, split='val')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(wrap_dataset(train_set), batch_size=args.batch_size, shuffle=shuffle, **kwargs)
val_loader = DataLoader(val_set, batch_size=1, shuffle=False, **kwargs)
test_loader = None
else:
raise NotImplementedError
return train_loader, val_loader, test_loader, num_class
| 2,887 | 44.84127 | 113 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/dataloaders/datasets/cityscapes.py
|
import os
import numpy as np
import scipy.misc as m
from PIL import Image
from torch.utils import data
from mypath import Path
from torchvision import transforms
from dataloaders import custom_transforms as tr
class CityscapesSegmentation(data.Dataset):
NUM_CLASSES = 19
def __init__(self, args, root=Path.db_root_dir('cityscapes'), split="train"):
self.root = root
self.split = split
self.args = args
self.files = {}
self.images_base = os.path.join(self.root, 'leftImg8bit', self.split)
self.annotations_base = os.path.join(self.root, 'gtFine_trainvaltest', 'gtFine', self.split)
self.files[split] = self.recursive_glob(rootdir=self.images_base, suffix='.png')
self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]
self.class_names = ['unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence', \
'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain', \
'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', \
'motorcycle', 'bicycle']
self.ignore_index = 255
self.class_map = dict(zip(self.valid_classes, range(self.NUM_CLASSES)))
if not self.files[split]:
raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))
print("Found %d %s images" % (len(self.files[split]), split))
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
img_path = self.files[self.split][index].rstrip()
lbl_path = os.path.join(self.annotations_base,
img_path.split(os.sep)[-2],
os.path.basename(img_path)[:-15] + 'gtFine_labelIds.png')
_img = Image.open(img_path).convert('RGB')
_tmp = np.array(Image.open(lbl_path), dtype=np.uint8)
_tmp = self.encode_segmap(_tmp)
_target = Image.fromarray(_tmp)
sample = {'image': _img, 'label': _target}
if self.split == 'train':
return self.transform_tr(sample)
elif self.split == 'val':
return self.transform_val(sample)
elif self.split == 'test':
return self.transform_ts(sample)
def encode_segmap(self, mask):
# Put all void classes to zero
for _voidc in self.void_classes:
mask[mask == _voidc] = self.ignore_index
for _validc in self.valid_classes:
mask[mask == _validc] = self.class_map[_validc]
return mask
def recursive_glob(self, rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
def transform_tr(self, sample):
composed_transforms = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def transform_val(self, sample):
composed_transforms = transforms.Compose([
tr.FixScaleCrop(crop_size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def transform_ts(self, sample):
composed_transforms = transforms.Compose([
tr.FixedResize(size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
if __name__ == '__main__':
from dataloaders.utils import decode_segmap
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
cityscapes_train = CityscapesSegmentation(args, split='train')
dataloader = DataLoader(cityscapes_train, batch_size=2, shuffle=True, num_workers=2)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='cityscapes')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block=True)
| 5,370 | 35.537415 | 103 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/dataloaders/datasets/pascal.py
|
from __future__ import print_function, division
import os
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset
from mypath import Path
from torchvision import transforms
from dataloaders import custom_transforms as tr
class VOCSegmentation(Dataset):
"""
PascalVoc dataset
"""
NUM_CLASSES = 21
def __init__(self,
args,
base_dir=Path.db_root_dir('pascal'),
split='train',
):
"""
:param base_dir: path to VOC dataset directory
:param split: train/val
:param transform: transform to apply
"""
super().__init__()
self._base_dir = base_dir
self._image_dir = os.path.join(self._base_dir, 'JPEGImages')
if split == 'train':
self._cat_dir_full = os.path.join(self._base_dir, 'SegmentationClassAug')
if 'full_supervision' in args and args.full_supervision:
self._cat_dir = self._cat_dir_full
else:
# weak supervision with scribbles
suffix = args.train_dataset_suffix
if len(suffix) > 0:
print("Loading train masks with suffix '%s'" % suffix)
self._cat_dir = os.path.join(self._base_dir, 'pascal_2012_scribble' + suffix)
elif split == 'val':
self._cat_dir = os.path.join(self._base_dir, 'SegmentationClass')
#self._cat_dir = os.path.join(self._base_dir, 'pascal_2012_scribble_val_full')
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.args = args
#_splits_dir = os.path.join(self._base_dir, 'ImageSets', 'Segmentation')
_splits_dir = os.path.join(self._base_dir, 'ImageSets', 'SegmentationAug')
self.im_ids = []
self.images = []
self.categories = []
self.categories_full = []
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), "r") as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
_image = os.path.join(self._image_dir, line + ".jpg")
_cat = os.path.join(self._cat_dir, line + ".png")
assert os.path.isfile(_image), _image
assert os.path.isfile(_cat), _cat
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_cat)
if split == 'train':
_cat = os.path.join(self._cat_dir_full, line + ".png")
assert os.path.isfile(_cat), _cat
self.categories_full.append(_cat)
assert (len(self.images) == len(self.categories))
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
mean_rgb = tuple((np.array(mean) * 255).astype(np.uint8))
self.normalize = transforms.Compose([
tr.ToTensor(),
tr.Normalize(mean=mean, std=std),
])
self.denormalize = tr.Denormalize(mean=mean, std=std)
if 'no_aug' in self.args and self.args.no_aug:
self.training_transform = transforms.Compose([
tr.RandomScaleCrop(
base_size=self.args.base_size,
crop_size=self.args.crop_size,
image_fill=mean_rgb,
random=False,
),
self.normalize,
])
else:
self.training_transform = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(
base_size=self.args.base_size,
crop_size=self.args.crop_size,
image_fill=mean_rgb,
),
tr.RandomGaussianBlur(),
self.normalize,
])
self.val_transform = transforms.Compose([
self.normalize,
])
# Display stats
print('Number of images in {}: {:d}'.format(split, len(self.images)))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
_img, _target, _target_full = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
if _target_full is not None:
sample['label_full'] = _target_full
for split in self.split:
if split == "train":
return self.transform_tr(sample)
elif split == 'val':
return self.transform_val(sample)
def _make_img_gt_point_pair(self, index):
_img = Image.open(self.images[index]).convert('RGB')
_target = Image.open(self.categories[index])
_target_full = Image.open(self.categories_full[index]) if len(self.categories_full) > 0 else None
return _img, _target, _target_full
def transform_tr(self, sample):
return self.training_transform(sample)
def transform_val(self, sample):
return self.val_transform(sample)
def __str__(self):
return 'VOC2012(split=' + str(self.split) + ')'
class VOCProposalSegmentation(VOCSegmentation):
def __init__(self, proposal_generator, *args, **kwargs):
super().__init__(*args, **kwargs)
self.proposal_generator = proposal_generator
def __getitem__(self, index):
_img, _target, _target_full = self._make_img_gt_point_pair(index)
_proposal, un, sm = self.proposal_generator(
self.normalize({'image': _img})['image'],
torch.tensor(np.array(_target)).byte(),
index
)
# _proposal = Image.fromarray(_proposal[0,0].numpy(), 'L')
sample = {'image': _img, 'label': _target,
'label_proposal': _proposal, 'un': un, 'sm': sm}
if _target_full is not None:
sample['label_full'] = _target_full
for split in self.split:
if split == "train":
return self.transform_tr(sample)
elif split == 'val':
return self.transform_val(sample)
if __name__ == '__main__':
from dataloaders.utils import decode_segmap
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
voc_train = VOCSegmentation(args, split='train')
dataloader = DataLoader(voc_train, batch_size=5, shuffle=True, num_workers=0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='pascal')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block=True)
| 7,403 | 33.598131 | 105 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/dataloaders/datasets/sbd.py
|
from __future__ import print_function, division
import os
import numpy as np
import scipy.io
import torch.utils.data as data
from PIL import Image
from mypath import Path
from torchvision import transforms
from dataloaders import custom_transforms as tr
class SBDSegmentation(data.Dataset):
NUM_CLASSES = 21
def __init__(self,
args,
base_dir=Path.db_root_dir('sbd'),
split='train',
):
"""
:param base_dir: path to VOC dataset directory
:param split: train/val
:param transform: transform to apply
"""
super().__init__()
self._base_dir = base_dir
self._dataset_dir = os.path.join(self._base_dir, 'dataset')
self._image_dir = os.path.join(self._dataset_dir, 'img')
self._cat_dir = os.path.join(self._dataset_dir, 'cls')
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.args = args
# Get list of all images from the split and check that the files exist
self.im_ids = []
self.images = []
self.categories = []
for splt in self.split:
with open(os.path.join(self._dataset_dir, splt + '.txt'), "r") as f:
lines = f.read().splitlines()
for line in lines:
_image = os.path.join(self._image_dir, line + ".jpg")
_categ= os.path.join(self._cat_dir, line + ".mat")
assert os.path.isfile(_image)
assert os.path.isfile(_categ)
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_categ)
assert (len(self.images) == len(self.categories))
# Display stats
print('Number of images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
_img, _target = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
return self.transform(sample)
def __len__(self):
return len(self.images)
def _make_img_gt_point_pair(self, index):
_img = Image.open(self.images[index]).convert('RGB')
_target = Image.fromarray(scipy.io.loadmat(self.categories[index])["GTcls"][0]['Segmentation'][0])
return _img, _target
def transform(self, sample):
composed_transforms = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def __str__(self):
return 'SBDSegmentation(split=' + str(self.split) + ')'
if __name__ == '__main__':
from dataloaders.utils import decode_segmap
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
sbd_train = SBDSegmentation(args, split='train')
dataloader = DataLoader(sbd_train, batch_size=2, shuffle=True, num_workers=2)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='pascal')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block=True)
| 4,081 | 30.643411 | 106 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/dataloaders/datasets/__init__.py
| 0 | 0 | 0 |
py
|
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/dataloaders/datasets/indexed_dataset.py
|
import torch.utils.data.dataset
class IndexedDataset(torch.utils.data.dataset.Dataset):
def __init__(self, base):
self.base = base
def __getitem__(self, index):
sample = self.base[index]
sample["index"] = index
return sample
def __len__(self):
return len(self.base)
| 321 | 22 | 55 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/dataloaders/datasets/combine_dbs.py
|
import torch.utils.data as data
class CombineDBs(data.Dataset):
NUM_CLASSES = 21
def __init__(self, dataloaders, excluded=None):
self.dataloaders = dataloaders
self.excluded = excluded
self.im_ids = []
# Combine object lists
for dl in dataloaders:
for elem in dl.im_ids:
if elem not in self.im_ids:
self.im_ids.append(elem)
# Exclude
if excluded:
for dl in excluded:
for elem in dl.im_ids:
if elem in self.im_ids:
self.im_ids.remove(elem)
# Get object pointers
self.cat_list = []
self.im_list = []
new_im_ids = []
num_images = 0
for ii, dl in enumerate(dataloaders):
for jj, curr_im_id in enumerate(dl.im_ids):
if (curr_im_id in self.im_ids) and (curr_im_id not in new_im_ids):
num_images += 1
new_im_ids.append(curr_im_id)
self.cat_list.append({'db_ii': ii, 'cat_ii': jj})
self.im_ids = new_im_ids
print('Combined number of images: {:d}'.format(num_images))
def __getitem__(self, index):
_db_ii = self.cat_list[index]["db_ii"]
_cat_ii = self.cat_list[index]['cat_ii']
sample = self.dataloaders[_db_ii].__getitem__(_cat_ii)
if 'meta' in sample.keys():
sample['meta']['db'] = str(self.dataloaders[_db_ii])
return sample
def __len__(self):
return len(self.cat_list)
def __str__(self):
include_db = [str(db) for db in self.dataloaders]
exclude_db = [str(db) for db in self.excluded]
return 'Included datasets:'+str(include_db)+'\n'+'Excluded datasets:'+str(exclude_db)
if __name__ == "__main__":
import matplotlib.pyplot as plt
from dataloaders.datasets import pascal, sbd
from dataloaders import sbd
import torch
import numpy as np
from dataloaders.utils import decode_segmap
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
pascal_voc_val = pascal.VOCSegmentation(args, split='val')
sbd = sbd.SBDSegmentation(args, split=['train', 'val'])
pascal_voc_train = pascal.VOCSegmentation(args, split='train')
dataset = CombineDBs([pascal_voc_train, sbd], excluded=[pascal_voc_val])
dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, num_workers=0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='pascal')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block=True)
| 3,310 | 32.11 | 96 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/dataloaders/datasets/coco.py
|
import numpy as np
import torch
from torch.utils.data import Dataset
from mypath import Path
from tqdm import trange
import os
from pycocotools.coco import COCO
from pycocotools import mask
from torchvision import transforms
from dataloaders import custom_transforms as tr
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
class COCOSegmentation(Dataset):
NUM_CLASSES = 21
CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4,
1, 64, 20, 63, 7, 72]
def __init__(self,
args,
base_dir=Path.db_root_dir('coco'),
split='train',
year='2017'):
super().__init__()
ann_file = os.path.join(base_dir, 'annotations/instances_{}{}.json'.format(split, year))
ids_file = os.path.join(base_dir, 'annotations/{}_ids_{}.pth'.format(split, year))
self.img_dir = os.path.join(base_dir, 'images/{}{}'.format(split, year))
self.split = split
self.coco = COCO(ann_file)
self.coco_mask = mask
if os.path.exists(ids_file):
self.ids = torch.load(ids_file)
else:
ids = list(self.coco.imgs.keys())
self.ids = self._preprocess(ids, ids_file)
self.args = args
self.normalize = transforms.Compose([
tr.ToTensor(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])
def __getitem__(self, index):
_img, _target = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
if self.split == "train":
return self.transform_tr(sample)
elif self.split == 'val':
return self.transform_val(sample)
def _make_img_gt_point_pair(self, index):
coco = self.coco
img_id = self.ids[index]
img_metadata = coco.loadImgs(img_id)[0]
path = img_metadata['file_name']
_img = Image.open(os.path.join(self.img_dir, path)).convert('RGB')
cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id))
_target = Image.fromarray(self._gen_seg_mask(
cocotarget, img_metadata['height'], img_metadata['width']))
return _img, _target
def _preprocess(self, ids, ids_file):
print("Preprocessing mask, this will take a while. " + \
"But don't worry, it only run once for each split.")
tbar = trange(len(ids))
new_ids = []
for i in tbar:
img_id = ids[i]
cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
img_metadata = self.coco.loadImgs(img_id)[0]
mask = self._gen_seg_mask(cocotarget, img_metadata['height'],
img_metadata['width'])
# more than 1k pixels
if (mask > 0).sum() > 1000:
new_ids.append(img_id)
tbar.set_description('Doing: {}/{}, got {} qualified images'. \
format(i, len(ids), len(new_ids)))
print('Found number of qualified images: ', len(new_ids))
torch.save(new_ids, ids_file)
return new_ids
def _gen_seg_mask(self, target, h, w):
mask = np.zeros((h, w), dtype=np.uint8)
coco_mask = self.coco_mask
for instance in target:
rle = coco_mask.frPyObjects(instance['segmentation'], h, w)
m = coco_mask.decode(rle)
cat = instance['category_id']
if cat in self.CAT_LIST:
c = self.CAT_LIST.index(cat)
else:
continue
if len(m.shape) < 3:
mask[:, :] += (mask == 0) * (m * c)
else:
mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
return mask
def transform_tr(self, sample):
composed_transforms = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
tr.RandomGaussianBlur(),
self.normalize])
return composed_transforms(sample)
def transform_val(self, sample):
composed_transforms = self.normalize
return composed_transforms(sample)
def __len__(self):
return len(self.ids)
if __name__ == "__main__":
from dataloaders import custom_transforms as tr
from dataloaders.utils import decode_segmap
from torch.utils.data import DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
coco_val = COCOSegmentation(args, split='val', year='2017')
dataloader = DataLoader(coco_val, batch_size=4, shuffle=True, num_workers=0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='coco')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block=True)
| 5,636 | 34.012422 | 96 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/utils/lr_scheduler.py
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: [email protected]
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import math
class LR_Scheduler(object):
"""Learning Rate Scheduler
Step mode: ``lr = baselr * 0.1 ^ {floor(epoch-1 / lr_step)}``
Cosine mode: ``lr = baselr * 0.5 * (1 + cos(iter/maxiter))``
Poly mode: ``lr = baselr * (1 - iter/maxiter) ^ 0.9``
Args:
args:
:attr:`args.lr_scheduler` lr scheduler mode (`cos`, `poly`),
:attr:`args.lr` base learning rate, :attr:`args.epochs` number of epochs,
:attr:`args.lr_step`
iters_per_epoch: number of iterations per epoch
"""
def __init__(self, mode, base_lr, num_epochs, iters_per_epoch=0,
lr_step=0, warmup_epochs=0):
self.mode = mode
print('Using {} LR Scheduler!'.format(self.mode))
self.lr = base_lr
if mode == 'step':
assert lr_step
self.lr_step = lr_step
self.iters_per_epoch = iters_per_epoch
self.N = num_epochs * iters_per_epoch
self.epoch = -1
self.warmup_iters = warmup_epochs * iters_per_epoch
self.actual_lr = base_lr
self.T = 0
def __call__(self, optimizer, i, epoch, best_pred):
T = epoch * self.iters_per_epoch + i
self.T = T
if self.mode == 'cos':
lr = 0.5 * self.lr * (1 + math.cos(1.0 * T / self.N * math.pi))
elif self.mode == 'poly':
lr = self.lr * pow((1 - 1.0 * T / self.N), 0.9)
elif self.mode == 'step':
lr = self.lr * (0.1 ** (epoch // self.lr_step))
else:
raise NotImplemented
# warm up lr schedule
if self.warmup_iters > 0 and T < self.warmup_iters:
lr = lr * 1.0 * T / self.warmup_iters
if epoch > self.epoch:
self.epoch = epoch
assert lr >= 0
self.actual_lr = lr
self._adjust_learning_rate(optimizer, lr)
def _adjust_learning_rate(self, optimizer, lr):
if len(optimizer.param_groups) == 1:
optimizer.param_groups[0]['lr'] = lr
else:
# enlarge the lr at the head
optimizer.param_groups[0]['lr'] = lr
for i in range(1, len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] = lr * 10
| 2,619 | 34.890411 | 83 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/utils/log_lin_softmax.py
|
import torch
from torch.autograd import Function
from torch.autograd import Variable
import torch.nn.functional as F
class LogLinSoftmax(Function):
# computes log(a + b * s_ijkl) where s_ijkl is softmax of the input
@staticmethod
def forward(ctx, a, b, logits, dim):
ctx.dim, ctx.a, ctx.b = dim, a, b
with torch.no_grad():
s = F.softmax(logits, dim=dim)
ctx.save_for_backward(s)
result = torch.log(a + b * s)
return Variable(result, requires_grad=True)
@staticmethod
def backward(ctx, grad_output):
s, = ctx.saved_tensors
m = ctx.b * s
m = m / (ctx.a + m)
gimi = torch.sum(grad_output * m, dim=ctx.dim, keepdim=True)
grad_logits = m * grad_output - s * gimi
return None, None, grad_logits, None
def log_lin_softmax(a, b, input, dim):
# return input.log_softmax(dim)
return LogLinSoftmax.apply(a, b, input, dim)
#return torch.log(a + b * input.softmax(dim))
if __name__ == '__main__':
logits = (torch.rand(2,3,4,5) - 0.5) * 100
input1 = logits.clone().requires_grad_(True)
out1 = log_lin_softmax(0, 1, input1, 1)
out1[:,0].sum().backward()
input2 = logits.clone().requires_grad_(True)
out2 = F.log_softmax(input2, dim=1)
out2[:,0].sum().backward()
input3 = logits.clone().requires_grad_(True)
out3 = torch.log(F.softmax(input3, dim=1))
out3[:,0].sum().backward()
input4 = logits.clone().double().requires_grad_(True)
out4 = log_lin_softmax(0, 1, input4, 1)
out4[:,0].sum().backward()
input5 = logits.clone().double().requires_grad_(True)
out5 = F.log_softmax(input5, dim=1)
out5[:,0].sum().backward()
print("log_lin_softmax", input1.dtype, torch.norm(input1.grad - input5.grad))
print(" log_softmax", input2.dtype, torch.norm(input2.grad - input5.grad))
print("log * softmax", input3.dtype, torch.norm(input3.grad - input5.grad))
print("log_lin_softmax", input4.dtype, torch.norm(input4.grad - input5.grad))
print('---------------------------')
for a in [0, 1e-9, 1e-5, 1e-4, 1e-2, 0.1, 0.2, 0.3, 0.4, 0.5]:
b = 1 - a * 2
input6 = logits.clone().double().requires_grad_(True)
out6 = torch.log(a + b * input6.softmax(1))
out6[:,0].sum().backward()
input7 = logits.clone().double().requires_grad_(True)
out7 = log_lin_softmax(a, b, input7, dim=1)
out7[:,0].sum().backward()
print("log_lin_softmax_ab", input7.dtype, torch.norm(input7.grad - input6.grad))
| 2,569 | 31.948718 | 88 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/utils/proposal_generator.py
|
import multiprocessing as mp
import tempfile, shutil, os
import io, pickle
import torch
import torch.nn.functional as F
import gzip
class AlphaBasedProposalGenerator(object):
def __init__(self, alpha_expansion, eps=0):
self.alpha_expansion = alpha_expansion
self.model = None
self.eps = eps
def get_unary(self, logits):
if self.eps == 0:
return -F.log_softmax(logits, dim=1)
probs = F.softmax(logits, dim=1)
C = logits.shape[1]
return -torch.log((1 - C * self.eps / (C - 1)) * probs + self.eps / (C - 1))
def get_model(self):
# return self.model
if self.cached_model is None:
self.cached_model = torch.load(io.BytesIO(self.model), "cpu")
self.cached_model.cpu()
self.cached_model.eval()
return self.cached_model
def update_model(self, model, read_cache):
self.read_cache = read_cache
self.cached_model = None
f = io.BytesIO()
torch.save(model, f)
self.model = f.getvalue()
def __call__(self, image, target, index):
if self.read_cache:
return self.load(index)
x0 = None
if self.read_cache is not None:
x0 = self.load(index)[0]
x0.unsqueeze_(0)
image.unsqueeze_(0)
target.unsqueeze_(0)
croppings = (target != 254).float()
sz = image.shape[-2:]
with torch.no_grad():
logits = self.get_model()(image)
result = self.alpha_expansion(
self.get_unary(logits), image, croppings, target, x0=x0, index=index)
result[0].squeeze_(0)
self.save(result, index)
return result
class ProposalGeneratorSharedMem(AlphaBasedProposalGenerator):
def __init__(self, alpha_expansion, eps=0):
super().__init__(alpha_expansion, eps)
self.model = None
self.read_cache = False
self.manager = mp.Manager()
self.hidden_label_cache = self.manager.dict()
def load(self, index):
return self.hidden_label_cache[index]
def save(self, obj, index):
self.hidden_label_cache[index] = obj
class ProposalGeneratorFileCache(AlphaBasedProposalGenerator):
def __init__(self, alpha_expansion, eps=0, path=None, del_path=None):
super().__init__(alpha_expansion, eps)
self.cached_model = None
self.read_cache = None
self.del_path = del_path or path is None
self.path = path or tempfile.mkdtemp(dir=os.environ.get("SLURM_TMPDIR"))
print("Saving proposals in %s" % self.path)
def __del__(self):
if self.del_path:
print("Deleting temp directory %s" % self.path)
shutil.rmtree(self.path)
def file_name(self, index):
return '%s/%05d.pt' % (self.path, index)
def load(self, index):
return torch.load(gzip.open(self.file_name(index)))
def save(self, obj, index):
return torch.save(obj, gzip.open(self.file_name(index), 'w'))
| 3,035 | 27.373832 | 84 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/utils/saver.py
|
import os
import shutil
import torch
from collections import OrderedDict
import glob
class Saver(object):
def __init__(self, args):
self.args = args
self.directory = os.path.join('run', args.dataset + args.train_dataset_suffix, args.checkname)
self.runs = sorted(glob.glob(os.path.join(self.directory, 'experiment_?????')))
run_id = int(self.runs[-1].split('_')[-1]) + 1 if self.runs else 0
self.experiment_dir = os.path.join(self.directory, 'experiment_{:05d}'.format(run_id))
if not os.path.exists(self.experiment_dir):
os.makedirs(self.experiment_dir)
print("Saver directory:", self.experiment_dir)
def save_checkpoint(self, state, is_best, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
filename = os.path.join(self.experiment_dir, filename)
torch.save(state, filename)
if is_best:
best_pred = state['best_pred']
with open(os.path.join(self.experiment_dir, 'best_pred.txt'), 'w') as f:
f.write(str(best_pred))
if self.runs:
previous_miou = [0.0]
for run in self.runs:
run_id = run.split('_')[-1]
path = os.path.join(self.directory, 'experiment_{}'.format(str(run_id)), 'best_pred.txt')
if os.path.exists(path):
with open(path, 'r') as f:
miou = float(f.readline())
previous_miou.append(miou)
else:
continue
max_miou = max(previous_miou)
if best_pred > max_miou:
shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth.tar'))
else:
shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth.tar'))
def save_experiment_config(self):
logfile = os.path.join(self.experiment_dir, 'parameters.txt')
log_file = open(logfile, 'w')
p = OrderedDict()
p['datset'] = self.args.dataset
p['backbone'] = self.args.backbone
p['out_stride'] = self.args.out_stride
p['lr'] = self.args.lr
p['lr_scheduler'] = self.args.lr_scheduler
p['loss_type'] = self.args.loss_type
p['epoch'] = self.args.epochs
p['base_size'] = self.args.base_size
p['crop_size'] = self.args.crop_size
for key, val in p.items():
log_file.write(key + ':' + str(val) + '\n')
log_file.close()
| 2,581 | 39.34375 | 109 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/utils/vis.py
|
import torch
def get_edges(seg_map):
edges = torch.zeros_like(seg_map) == 1
edges[..., :-1, :] |= seg_map[..., :-1, :] != seg_map[..., 1:, :]
edges[..., :, :-1] |= seg_map[..., :, :-1] != seg_map[..., :, 1:]
edges[..., 1:, :] |= seg_map[..., :-1, :] != seg_map[..., 1:, :]
edges[..., :, 1:] |= seg_map[..., :, :-1] != seg_map[..., :, 1:]
return edges
| 378 | 36.9 | 69 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/utils/calculate_weights.py
|
import os
from tqdm import tqdm
import numpy as np
from mypath import Path
def calculate_weigths_labels(dataset, dataloader, num_classes):
# Create an instance from the data loader
z = np.zeros((num_classes,))
# Initialize tqdm
tqdm_batch = tqdm(dataloader)
print('Calculating classes weights')
for sample in tqdm_batch:
y = sample['label']
y = y.detach().cpu().numpy()
mask = (y >= 0) & (y < num_classes)
labels = y[mask].astype(np.uint8)
count_l = np.bincount(labels, minlength=num_classes)
z += count_l
tqdm_batch.close()
total_frequency = np.sum(z)
class_weights = []
for frequency in z:
class_weight = 1 / (np.log(1.02 + (frequency / total_frequency)))
class_weights.append(class_weight)
ret = np.array(class_weights)
classes_weights_path = os.path.join(Path.db_root_dir(dataset), dataset+'_classes_weights.npy')
np.save(classes_weights_path, ret)
return ret
| 985 | 33 | 98 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/utils/loss.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SegmentationLosses(object):
def __init__(self, weight=None, reduction_mode='mean', batch_average=True, ignore_index=255, cuda=False):
self.ignore_index = ignore_index
self.weight = weight
self.reduction_mode = reduction_mode
self.batch_average = batch_average
self.cuda = cuda
def build_loss(self, mode='ce'):
"""Choices: ['ce' or 'focal']"""
if mode == 'ce':
return self.CrossEntropyLoss
elif mode == 'focal':
return self.FocalLoss
elif mode == 'l2':
self.itoa = None
self.softmax = nn.Softmax(dim=1)
return self.L2Loss
elif mode == 'l1':
self.itoa = None
self.softmax = nn.Softmax(dim=1)
return self.L1Loss
elif mode == 'margin0':
return self.Margin0Loss
else:
raise NotImplementedError
def CrossEntropyLoss(self, logit, target):
n, c, h, w = logit.size()
if target.ndim == 4 and target.shape[1] == 1:
target = target[:,0]
if target.ndim == 3:
criterion = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index,
reduction=self.reduction_mode)
if self.cuda:
criterion = criterion.cuda()
loss = criterion(logit, target.long())
else:
log_prob = F.log_softmax(logit, dim=1)
good = target[:,0,...] != 255
loss = torch.mean(
torch.sum(-target * log_prob, dim=1)[good],
# dim=(1,2)
)
if self.batch_average:
loss /= n
return loss
def FocalLoss(self, logit, target, gamma=2, alpha=0.5):
n, c, h, w = logit.size()
criterion = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index,
reduction=self.reduction_mode)
if self.cuda:
criterion = criterion.cuda()
logpt = -criterion(logit, target.long())
pt = torch.exp(logpt)
if alpha is not None:
logpt *= alpha
loss = -((1 - pt) ** gamma) * logpt
if self.batch_average:
loss /= n
return loss
def LxLoss(self, logit, target, criterion=nn.MSELoss()):
n, c, h, w = logit.size()
if target.dim() < 4:
target = target[:, None, :, :]
if self.itoa is None or c != self.itoa.shape[1]:
self.itoa = torch.tensor(range(c)).reshape([1, c, 1, 1]).byte()
if self.cuda:
self.itoa = self.itoa.cuda()
good = (target < c).float()
one_hot = (self.itoa == target).float()
prob = self.softmax(logit)
loss = criterion(one_hot * good, prob * good) / good.sum()
return loss
def L2Loss(self, logit, target):
return self.LxLoss(logit, target, nn.MSELoss(reduction='sum'))
def L1Loss(self, logit, target):
return self.LxLoss(logit, target, nn.L1Loss(reduction='sum'))
def Margin0Loss(self, logit, target):
if target.ndim == 3:
target = target[:,None,...]
roi = target != 255
target[target == 255] = 0
C = logit.shape[1]
logit = logit.permute([0,2,3,1]).reshape([-1, C])[roi.reshape(-1), :]
logit_correct = logit.gather(1, target[roi].reshape([-1, 1]).long())
loss = torch.relu(logit).sum() - torch.relu(logit_correct).sum() + torch.relu(-logit_correct).sum()
return loss / logit.shape[0]
if __name__ == "__main__":
loss = SegmentationLosses(cuda=True)
a = torch.rand(1, 3, 7, 7).cuda()
b = torch.rand(1, 7, 7).cuda()
print(loss.CrossEntropyLoss(a, b).item())
print(loss.FocalLoss(a, b, gamma=0, alpha=None).item())
print(loss.FocalLoss(a, b, gamma=2, alpha=0.5).item())
| 3,977 | 33.894737 | 109 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/utils/metrics.py
|
import numpy as np
class Evaluator(object):
def __init__(self, num_class):
self.num_class = num_class
self.confusion_matrix = np.zeros((self.num_class,)*2)
def Pixel_Accuracy(self):
Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()
return Acc
def Pixel_Accuracy_Class(self):
Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)
Acc = np.nanmean(Acc)
return Acc
def Intersection_over_Union(self):
return np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
def Mean_Intersection_over_Union(self):
MIoU = self.Intersection_over_Union()
MIoU = np.nanmean(MIoU)
return MIoU
def Frequency_Weighted_Intersection_over_Union(self):
freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)
iu = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
return FWIoU
def _generate_matrix(self, gt_image, pre_image):
mask = (gt_image >= 0) & (gt_image < self.num_class)
label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]
count = np.bincount(label, minlength=self.num_class**2)
confusion_matrix = count.reshape(self.num_class, self.num_class)
return confusion_matrix
def add_batch(self, gt_image, pre_image):
assert gt_image.shape == pre_image.shape
self.confusion_matrix += self._generate_matrix(gt_image, pre_image)
def reset(self):
self.confusion_matrix = np.zeros((self.num_class,) * 2)
| 1,903 | 37.08 | 99 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/utils/summaries.py
|
import os
import torch
import numpy as np
import scipy.ndimage
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from dataloaders.utils import decode_seg_map_sequence
from utils import vis
class TensorboardSummary(object):
def __init__(self, directory):
self.directory = directory
def create_summary(self):
writer = SummaryWriter(log_dir=os.path.join(self.directory))
return writer
def visualize_image(self, writer, dataset, image, target, output, global_step, prefix=''):
image = image[:9].clone().cpu()
grid_image = make_grid(image.data, 3, normalize=True)
writer.add_image(prefix + 'Image', grid_image, global_step)
seg_map = torch.max(output[:9], 1)[1].detach().cpu()
grid_image = make_grid(decode_seg_map_sequence(seg_map.numpy(),
dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image(prefix + 'Predicted label', grid_image, global_step)
edges = vis.get_edges(seg_map)
mx = image.max()
for i in range(image.shape[0]):
image[i, :, edges[i]] = mx
# image[i, :, scipy.ndimage.binary_dilation(edges[i], iterations=5)] = mx
grid_image = make_grid(image, 3, normalize=True)
writer.add_image(prefix + 'Image_Edges', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:9], 1).detach().cpu().numpy(),
dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image(prefix + 'Groundtruth label', grid_image, global_step)
| 1,691 | 42.384615 | 108 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/aspp.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
class _ASPPModule(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation, BatchNorm):
super(_ASPPModule, self).__init__()
self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = BatchNorm(planes)
self.relu = nn.ReLU()
self._init_weight()
def forward(self, x):
x = self.atrous_conv(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class ASPP(nn.Module):
def __init__(self, backbone, output_stride, BatchNorm):
super(ASPP, self).__init__()
if backbone == 'drn':
inplanes = 512
elif backbone == 'mobilenet':
inplanes = 320
else:
inplanes = 2048
if output_stride == 16:
dilations = [1, 6, 12, 18]
elif output_stride == 8:
dilations = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = _ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0], BatchNorm=BatchNorm)
self.aspp2 = _ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1], BatchNorm=BatchNorm)
self.aspp3 = _ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2], BatchNorm=BatchNorm)
self.aspp4 = _ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3], BatchNorm=BatchNorm)
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(inplanes, 256, 1, stride=1, bias=False),
BatchNorm(256),
nn.ReLU())
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.bn1 = BatchNorm(256)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.5)
self._init_weight()
def forward(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
return self.dropout(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def build_aspp(backbone, output_stride, BatchNorm):
return ASPP(backbone, output_stride, BatchNorm)
| 3,602 | 36.926316 | 116 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/decoder.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
class Decoder(nn.Module):
def __init__(self, num_classes, backbone, BatchNorm, skip=False):
super(Decoder, self).__init__()
if backbone == 'resnet' or backbone == 'drn':
low_level_inplanes = 256
elif backbone == 'xception':
low_level_inplanes = 128
elif backbone == 'mobilenet':
low_level_inplanes = 24
else:
raise NotImplementedError
self.conv1 = nn.Conv2d(low_level_inplanes, 48, 1, bias=False)
self.bn1 = BatchNorm(48)
self.relu = nn.ReLU()
self.last_conv = nn.Sequential(
nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm(256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm(256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(256+320 if skip else 256, num_classes, kernel_size=1, stride=1)
)
self.skip = skip
self._init_weight()
def forward(self, x, low_level_feat, x_encoder):
low_level_feat = self.conv1(low_level_feat)
low_level_feat = self.bn1(low_level_feat)
low_level_feat = self.relu(low_level_feat)
x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x, low_level_feat), dim=1)
for module in self.last_conv[:-1]:
x = module(x)
if self.skip:
x_encoder = F.interpolate(x_encoder, size=x.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x, x_encoder), dim=1)
self.last_layer = x
x = self.last_conv[-1](x)
return x
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# self.class_projection.weight.data *= self.last_proj_factor
class Decoder0(nn.Module):
def __init__(self, num_classes, backbone, BatchNorm, last_proj_factor=1):
super(Decoder0, self).__init__()
print("Setting up decoder 0")
self.class_projection = nn.Conv2d(320, num_classes, kernel_size=1, stride=1)
self._init_weight()
def forward(self, x, low_level_feat, x_encoder):
self.last_layer = x
x = self.class_projection(self.last_layer)
return x
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def build_decoder(num_classes, backbone, BatchNorm, v='3.1', *args, **kwargs):
if v == '3.1':
return Decoder(num_classes, backbone, BatchNorm, *args, **kwargs)
if v == '3.2':
return Decoder(num_classes, backbone, BatchNorm, *args, skip=True, **kwargs)
return Decoder0(num_classes, backbone, BatchNorm, *args, **kwargs)
| 3,606 | 34.019417 | 104 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/deeplab.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
from modeling.aspp import build_aspp
from modeling.decoder import build_decoder
from modeling.backbone import build_backbone
def freeze_batchnorm(self):
for m in self.modules():
if isinstance(m, SynchronizedBatchNorm2d):
m.eval()
elif isinstance(m, nn.BatchNorm2d):
m.eval()
class Identity(nn.Module):
def forward(self, *args):
return args[0]
class DeepLab(nn.Module):
def __init__(self, backbone='resnet', output_stride=16, num_classes=21,
sync_bn=True, freeze_bn=False, v='3.1'):
super(DeepLab, self).__init__()
v = v or '3.1'
if backbone == 'drn':
output_stride = 8
if sync_bn == True:
BatchNorm = SynchronizedBatchNorm2d
else:
BatchNorm = nn.BatchNorm2d
self.backbone = build_backbone(backbone, output_stride, BatchNorm)
if v in ['2', '3', '3.1', '3.2']:
self.aspp = build_aspp(backbone, output_stride, BatchNorm)
else:
self.aspp = Identity()
self.decoder = build_decoder(num_classes, backbone, BatchNorm, v)
if freeze_bn:
self.freeze_bn()
def forward(self, input):
x_encoder, low_level_feat = self.backbone(input)
x = self.aspp(x_encoder)
x = self.decoder(x, low_level_feat, x_encoder)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, SynchronizedBatchNorm2d):
m.eval()
elif isinstance(m, nn.BatchNorm2d):
m.eval()
def get_1x_lr_params(self):
modules = [self.backbone]
for i in range(len(modules)):
for m in modules[i].named_modules():
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
def get_10x_lr_params(self):
modules = [self.aspp, self.decoder]
for i in range(len(modules)):
for m in modules[i].named_modules():
if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \
or isinstance(m[1], nn.BatchNorm2d):
for p in m[1].parameters():
if p.requires_grad:
yield p
if __name__ == "__main__":
model = DeepLab(backbone='mobilenet', output_stride=16)
model.eval()
input = torch.rand(1, 3, 513, 513)
output = model(input)
print(output.size())
| 2,898 | 30.857143 | 93 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/__init__.py
| 0 | 0 | 0 |
py
|
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/backbone/resnet.py
|
import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False)
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, BatchNorm, pretrained=True):
self.inplanes = 64
super(ResNet, self).__init__()
blocks = [1, 2, 4]
if output_stride == 16:
strides = [1, 2, 2, 1]
dilations = [1, 1, 1, 2]
elif output_stride == 8:
strides = [1, 2, 1, 1]
dilations = [1, 1, 2, 4]
else:
raise NotImplementedError
# Modules
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = BatchNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0], BatchNorm=BatchNorm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1], BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2], BatchNorm=BatchNorm)
self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)
self._init_weight()
if pretrained:
self._load_pretrained_model()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample, BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def _make_MG_unit(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation=blocks[0]*dilation,
downsample=downsample, BatchNorm=BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, len(blocks)):
layers.append(block(self.inplanes, planes, stride=1,
dilation=blocks[i]*dilation, BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
low_level_feat = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x, low_level_feat
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/resnet101-5d3b4d8f.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
def ResNet101(output_stride, BatchNorm, pretrained=True):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], output_stride, BatchNorm, pretrained=pretrained)
return model
if __name__ == "__main__":
import torch
model = ResNet101(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=8)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
| 6,222 | 37.41358 | 130 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/backbone/drn.py
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
webroot = 'https://tigress-web.princeton.edu/~fy/drn/models/'
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'drn-c-26': webroot + 'drn_c_26-ddedf421.pth',
'drn-c-42': webroot + 'drn_c_42-9d336e8c.pth',
'drn-c-58': webroot + 'drn_c_58-0a53a92c.pth',
'drn-d-22': webroot + 'drn_d_22-4bd2f8ea.pth',
'drn-d-38': webroot + 'drn_d_38-eebb45f0.pth',
'drn-d-54': webroot + 'drn_d_54-0e0534ff.pth',
'drn-d-105': webroot + 'drn_d_105-12b40979.pth'
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
dilation=(1, 1), residual=True, BatchNorm=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride,
padding=dilation[0], dilation=dilation[0])
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes,
padding=dilation[1], dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.downsample = downsample
self.stride = stride
self.residual = residual
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.residual:
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
dilation=(1, 1), residual=True, BatchNorm=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation[1], bias=False,
dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class DRN(nn.Module):
def __init__(self, block, layers, arch='D',
channels=(16, 32, 64, 128, 256, 512, 512, 512),
BatchNorm=None):
super(DRN, self).__init__()
self.inplanes = channels[0]
self.out_dim = channels[-1]
self.arch = arch
if arch == 'C':
self.conv1 = nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False)
self.bn1 = BatchNorm(channels[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(
BasicBlock, channels[0], layers[0], stride=1, BatchNorm=BatchNorm)
self.layer2 = self._make_layer(
BasicBlock, channels[1], layers[1], stride=2, BatchNorm=BatchNorm)
elif arch == 'D':
self.layer0 = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3,
bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True)
)
self.layer1 = self._make_conv_layers(
channels[0], layers[0], stride=1, BatchNorm=BatchNorm)
self.layer2 = self._make_conv_layers(
channels[1], layers[1], stride=2, BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2, BatchNorm=BatchNorm)
self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2, BatchNorm=BatchNorm)
self.layer5 = self._make_layer(block, channels[4], layers[4],
dilation=2, new_level=False, BatchNorm=BatchNorm)
self.layer6 = None if layers[5] == 0 else \
self._make_layer(block, channels[5], layers[5], dilation=4,
new_level=False, BatchNorm=BatchNorm)
if arch == 'C':
self.layer7 = None if layers[6] == 0 else \
self._make_layer(BasicBlock, channels[6], layers[6], dilation=2,
new_level=False, residual=False, BatchNorm=BatchNorm)
self.layer8 = None if layers[7] == 0 else \
self._make_layer(BasicBlock, channels[7], layers[7], dilation=1,
new_level=False, residual=False, BatchNorm=BatchNorm)
elif arch == 'D':
self.layer7 = None if layers[6] == 0 else \
self._make_conv_layers(channels[6], layers[6], dilation=2, BatchNorm=BatchNorm)
self.layer8 = None if layers[7] == 0 else \
self._make_conv_layers(channels[7], layers[7], dilation=1, BatchNorm=BatchNorm)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1,
new_level=True, residual=True, BatchNorm=None):
assert dilation == 1 or dilation % 2 == 0
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = list()
layers.append(block(
self.inplanes, planes, stride, downsample,
dilation=(1, 1) if dilation == 1 else (
dilation // 2 if new_level else dilation, dilation),
residual=residual, BatchNorm=BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, residual=residual,
dilation=(dilation, dilation), BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def _make_conv_layers(self, channels, convs, stride=1, dilation=1, BatchNorm=None):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(self.inplanes, channels, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(channels),
nn.ReLU(inplace=True)])
self.inplanes = channels
return nn.Sequential(*modules)
def forward(self, x):
if self.arch == 'C':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
elif self.arch == 'D':
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
low_level_feat = x
x = self.layer4(x)
x = self.layer5(x)
if self.layer6 is not None:
x = self.layer6(x)
if self.layer7 is not None:
x = self.layer7(x)
if self.layer8 is not None:
x = self.layer8(x)
return x, low_level_feat
class DRN_A(nn.Module):
def __init__(self, block, layers, BatchNorm=None):
self.inplanes = 64
super(DRN_A, self).__init__()
self.out_dim = 512 * block.expansion
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = BatchNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], BatchNorm=BatchNorm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2, BatchNorm=BatchNorm)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, BatchNorm=BatchNorm)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, BatchNorm=BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,
dilation=(dilation, dilation, ), BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def drn_a_50(BatchNorm, pretrained=True):
model = DRN_A(Bottleneck, [3, 4, 6, 3], BatchNorm=BatchNorm)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def drn_c_26(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='C', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-c-26'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_c_42(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-c-42'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_c_58(BatchNorm, pretrained=True):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-c-58'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_d_22(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-22'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_d_24(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 2, 2], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-24'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_d_38(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-38'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_d_40(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 2, 2], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-40'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_d_54(BatchNorm, pretrained=True):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-54'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
def drn_d_105(BatchNorm, pretrained=True):
model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-105'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
if __name__ == "__main__":
import torch
model = drn_a_50(BatchNorm=nn.BatchNorm2d, pretrained=True)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
| 14,649 | 35.352357 | 100 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/backbone/__init__.py
|
from modeling.backbone import resnet, xception, drn, mobilenet
def build_backbone(backbone, output_stride, BatchNorm):
if backbone == 'resnet':
return resnet.ResNet101(output_stride, BatchNorm)
elif backbone == 'xception':
return xception.AlignedXception(output_stride, BatchNorm)
elif backbone == 'drn':
return drn.drn_d_54(BatchNorm)
elif backbone == 'mobilenet':
return mobilenet.MobileNetV2(output_stride, BatchNorm)
else:
raise NotImplementedError
| 514 | 35.785714 | 65 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/backbone/xception.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, BatchNorm=None):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation,
groups=inplanes, bias=bias)
self.bn = BatchNorm(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])
x = self.conv1(x)
x = self.bn(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, BatchNorm=None,
start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
if planes != inplanes or stride != 1:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = BatchNorm(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
filters = planes
for i in range(reps - 1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters, filters, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if stride != 1:
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 2, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if stride == 1 and is_last:
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 1, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if not start_with_relu:
rep = rep[1:]
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x = x + skip
return x
class AlignedXception(nn.Module):
"""
Modified Alighed Xception
"""
def __init__(self, output_stride, BatchNorm,
pretrained=True):
super(AlignedXception, self).__init__()
if output_stride == 16:
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif output_stride == 8:
entry_block3_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = BatchNorm(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = BatchNorm(64)
self.block1 = Block(64, 128, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False)
self.block2 = Block(128, 256, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False,
grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, BatchNorm=BatchNorm,
start_with_relu=True, grow_first=True, is_last=True)
# Middle flow
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
# Exit flow
self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_dilations[0],
BatchNorm=BatchNorm, start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn3 = BatchNorm(1536)
self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn4 = BatchNorm(1536)
self.conv5 = SeparableConv2d(1536, 2048, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn5 = BatchNorm(2048)
# Init weights
self._init_weight()
# Load pretrained model
if pretrained:
self._load_pretrained_model()
def forward(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
# add relu here
x = self.relu(x)
low_level_feat = x
x = self.block2(x)
x = self.block3(x)
# Middle flow
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
# Exit flow
x = self.block20(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return x, low_level_feat
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in model_dict:
if 'pointwise' in k:
v = v.unsqueeze(-1).unsqueeze(-1)
if k.startswith('block11'):
model_dict[k] = v
model_dict[k.replace('block11', 'block12')] = v
model_dict[k.replace('block11', 'block13')] = v
model_dict[k.replace('block11', 'block14')] = v
model_dict[k.replace('block11', 'block15')] = v
model_dict[k.replace('block11', 'block16')] = v
model_dict[k.replace('block11', 'block17')] = v
model_dict[k.replace('block11', 'block18')] = v
model_dict[k.replace('block11', 'block19')] = v
elif k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
if __name__ == "__main__":
import torch
model = AlignedXception(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=16)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
| 11,553 | 39.118056 | 116 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/backbone/mobilenet.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import math
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
import torch.utils.model_zoo as model_zoo
def conv_bn(inp, oup, stride, BatchNorm):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
BatchNorm(oup),
nn.ReLU6(inplace=True)
)
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, dilation, expand_ratio, BatchNorm):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
self.kernel_size = 3
self.dilation = dilation
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False),
BatchNorm(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, 1, 1, bias=False),
BatchNorm(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, 1, bias=False),
BatchNorm(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False),
BatchNorm(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, 1, bias=False),
BatchNorm(oup),
)
def forward(self, x):
x_pad = fixed_padding(x, self.kernel_size, dilation=self.dilation)
if self.use_res_connect:
x = x + self.conv(x_pad)
else:
x = self.conv(x_pad)
return x
class MobileNetV2(nn.Module):
def __init__(self, output_stride=8, BatchNorm=None, width_mult=1., pretrained=True):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
current_stride = 1
rate = 1
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
input_channel = int(input_channel * width_mult)
self.features = [conv_bn(3, input_channel, 2, BatchNorm)]
current_stride *= 2
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
if current_stride == output_stride:
stride = 1
dilation = rate
rate *= s
else:
stride = s
dilation = 1
current_stride *= s
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, stride, dilation, t, BatchNorm))
else:
self.features.append(block(input_channel, output_channel, 1, dilation, t, BatchNorm))
input_channel = output_channel
self.features = nn.Sequential(*self.features)
self._initialize_weights()
if pretrained:
self._load_pretrained_model()
self.low_level_features = self.features[0:4]
self.high_level_features = self.features[4:]
def forward(self, x):
low_level_feat = self.low_level_features(x)
x = self.high_level_features(low_level_feat)
return x, low_level_feat
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('http://jeff95.me/models/mobilenet_v2-6a65762b.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if __name__ == "__main__":
input = torch.rand(1, 3, 512, 512)
model = MobileNetV2(output_stride=16, BatchNorm=nn.BatchNorm2d)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
| 5,390 | 34.467105 | 110 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/sync_batchnorm/replicate.py
|
# -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| 3,218 | 35.579545 | 115 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/sync_batchnorm/unittest.py
|
# -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import numpy as np
from torch.autograd import Variable
def as_numpy(v):
if isinstance(v, Variable):
v = v.data
return v.cpu().numpy()
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3):
npa, npb = as_numpy(a), as_numpy(b)
self.assertTrue(
np.allclose(npa, npb, atol=atol),
'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
)
| 834 | 26.833333 | 157 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/sync_batchnorm/batchnorm.py
|
# -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from .comm import SyncMaster
__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d']
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dementions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
# Resize the input to (B, C, -1).
input_shape = input.size()
input = input.view(input.size(0), self.num_features, -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# Compute the output.
if self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, bias_var.clamp(self.eps) ** -0.5
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
| 12,932 | 44.861702 | 116 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/sync_batchnorm/comm.py
|
# -*- coding: utf-8 -*-
# File : comm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import queue
import collections
import threading
__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
class FutureResult(object):
"""A thread-safe future implementation. Used only as one-to-one pipe."""
def __init__(self):
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
def put(self, result):
with self._lock:
assert self._result is None, 'Previous result has\'t been fetched.'
self._result = result
self._cond.notify()
def get(self):
with self._lock:
if self._result is None:
self._cond.wait()
res = self._result
self._result = None
return res
_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
class SlavePipe(_SlavePipeBase):
"""Pipe for master-slave communication."""
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret
class SyncMaster(object):
"""An abstract `SyncMaster` object.
- During the replication, as the data parallel will trigger an callback of each module, all slave devices should
call `register(id)` and obtain an `SlavePipe` to communicate with the master.
- During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
and passed to a registered callback.
- After receiving the messages, the master device should gather the information and determine to message passed
back to each slave devices.
"""
def __init__(self, master_callback):
"""
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
"""
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def __getstate__(self):
return {'master_callback': self._master_callback}
def __setstate__(self, state):
self.__init__(state['master_callback'])
def register_slave(self, identifier):
"""
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
"""
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
"""
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
"""
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert results[0][0] == 0, 'The first result should belongs to the master.'
for i, res in results:
if i == 0:
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert self._queue.get() is True
return results[0][1]
@property
def nr_slaves(self):
return len(self._registry)
| 4,440 | 33.161538 | 117 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/modeling/sync_batchnorm/__init__.py
|
# -*- coding: utf-8 -*-
# File : __init__.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
from .replicate import DataParallelWithCallback, patch_replication_callback
| 447 | 36.333333 | 96 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/doc/deeplab_resnet.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
BatchNorm2d = SynchronizedBatchNorm2d
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False)
self.bn2 = BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, nInputChannels, block, layers, os=16, pretrained=False):
self.inplanes = 64
super(ResNet, self).__init__()
if os == 16:
strides = [1, 2, 2, 1]
dilations = [1, 1, 1, 2]
blocks = [1, 2, 4]
elif os == 8:
strides = [1, 2, 1, 1]
dilations = [1, 1, 2, 2]
blocks = [1, 2, 1]
else:
raise NotImplementedError
# Modules
self.conv1 = nn.Conv2d(nInputChannels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2])
self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3])
self._init_weight()
if pretrained:
self._load_pretrained_model()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_MG_unit(self, block, planes, blocks=[1, 2, 4], stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation=blocks[0]*dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, len(blocks)):
layers.append(block(self.inplanes, planes, stride=1, dilation=blocks[i]*dilation))
return nn.Sequential(*layers)
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
low_level_feat = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x, low_level_feat
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/resnet101-5d3b4d8f.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
def ResNet101(nInputChannels=3, os=16, pretrained=False):
model = ResNet(nInputChannels, Bottleneck, [3, 4, 23, 3], os, pretrained=pretrained)
return model
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, dilation):
super(ASPP_module, self).__init__()
if dilation == 1:
kernel_size = 1
padding = 0
else:
kernel_size = 3
padding = dilation
self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = BatchNorm2d(planes)
self.relu = nn.ReLU()
self._init_weight()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class DeepLabv3_plus(nn.Module):
def __init__(self, nInputChannels=3, n_classes=21, os=16, pretrained=False, freeze_bn=False, _print=True):
if _print:
print("Constructing DeepLabv3+ model...")
print("Backbone: Resnet-101")
print("Number of classes: {}".format(n_classes))
print("Output stride: {}".format(os))
print("Number of Input Channels: {}".format(nInputChannels))
super(DeepLabv3_plus, self).__init__()
# Atrous Conv
self.resnet_features = ResNet101(nInputChannels, os, pretrained=pretrained)
# ASPP
if os == 16:
dilations = [1, 6, 12, 18]
elif os == 8:
dilations = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = ASPP_module(2048, 256, dilation=dilations[0])
self.aspp2 = ASPP_module(2048, 256, dilation=dilations[1])
self.aspp3 = ASPP_module(2048, 256, dilation=dilations[2])
self.aspp4 = ASPP_module(2048, 256, dilation=dilations[3])
self.relu = nn.ReLU()
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(2048, 256, 1, stride=1, bias=False),
BatchNorm2d(256),
nn.ReLU())
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.bn1 = BatchNorm2d(256)
# adopt [1x1, 48] for channel reduction.
self.conv2 = nn.Conv2d(256, 48, 1, bias=False)
self.bn2 = BatchNorm2d(48)
self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, n_classes, kernel_size=1, stride=1))
if freeze_bn:
self._freeze_bn()
def forward(self, input):
x, low_level_features = self.resnet_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = F.upsample(x, size=(int(math.ceil(input.size()[-2]/4)),
int(math.ceil(input.size()[-1]/4))), mode='bilinear', align_corners=True)
low_level_features = self.conv2(low_level_features)
low_level_features = self.bn2(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.last_conv(x)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def _freeze_bn(self):
for m in self.modules():
if isinstance(m, BatchNorm2d):
m.eval()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def get_1x_lr_params(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = [model.resnet_features]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = [model.aspp1, model.aspp2, model.aspp3, model.aspp4, model.conv1, model.conv2, model.last_conv]
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
yield k
if __name__ == "__main__":
model = DeepLabv3_plus(nInputChannels=3, n_classes=21, os=16, pretrained=True, _print=True)
model.eval()
image = torch.randn(1, 3, 512, 512)
with torch.no_grad():
output = model.forward(image)
print(output.size())
| 11,247 | 34.594937 | 111 |
py
|
robust_trust_region
|
robust_trust_region-main/pytorch-deeplab_v3_plus/doc/deeplab_xception.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
BatchNorm2d = SynchronizedBatchNorm2d
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=0, dilation=1, bias=False):
super(SeparableConv2d, self)._init_()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation,
groups=inplanes, bias=bias)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class SeparableConv2d_same(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False):
super(SeparableConv2d_same, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation,
groups=inplanes, bias=bias)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
if planes != inplanes or stride != 1:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = BatchNorm2d(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
rep.append(BatchNorm2d(planes))
filters = planes
for i in range(reps - 1):
rep.append(self.relu)
rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation))
rep.append(BatchNorm2d(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
rep.append(BatchNorm2d(planes))
if not start_with_relu:
rep = rep[1:]
if stride != 1:
rep.append(SeparableConv2d_same(planes, planes, 3, stride=2))
if stride == 1 and is_last:
rep.append(SeparableConv2d_same(planes, planes, 3, stride=1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
class Xception(nn.Module):
"""
Modified Alighed Xception
"""
def __init__(self, inplanes=3, os=16, pretrained=False):
super(Xception, self).__init__()
if os == 16:
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif os == 8:
entry_block3_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(inplanes, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = BatchNorm2d(64)
self.block1 = Block(64, 128, reps=2, stride=2, start_with_relu=False)
self.block2 = Block(128, 256, reps=2, stride=2, start_with_relu=True, grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, start_with_relu=True, grow_first=True,
is_last=True)
# Middle flow
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
# Exit flow
self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_dilations[0],
start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d_same(1024, 1536, 3, stride=1, dilation=exit_block_dilations[1])
self.bn3 = BatchNorm2d(1536)
self.conv4 = SeparableConv2d_same(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1])
self.bn4 = BatchNorm2d(1536)
self.conv5 = SeparableConv2d_same(1536, 2048, 3, stride=1, dilation=exit_block_dilations[1])
self.bn5 = BatchNorm2d(2048)
# Init weights
self._init_weight()
# Load pretrained model
if pretrained:
self._load_xception_pretrained()
def forward(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
low_level_feat = x
x = self.block2(x)
x = self.block3(x)
# Middle flow
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
# Exit flow
x = self.block20(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return x, low_level_feat
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_xception_pretrained(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in model_dict:
if 'pointwise' in k:
v = v.unsqueeze(-1).unsqueeze(-1)
if k.startswith('block11'):
model_dict[k] = v
model_dict[k.replace('block11', 'block12')] = v
model_dict[k.replace('block11', 'block13')] = v
model_dict[k.replace('block11', 'block14')] = v
model_dict[k.replace('block11', 'block15')] = v
model_dict[k.replace('block11', 'block16')] = v
model_dict[k.replace('block11', 'block17')] = v
model_dict[k.replace('block11', 'block18')] = v
model_dict[k.replace('block11', 'block19')] = v
elif k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, dilation):
super(ASPP_module, self).__init__()
if dilation == 1:
kernel_size = 1
padding = 0
else:
kernel_size = 3
padding = dilation
self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = BatchNorm2d(planes)
self.relu = nn.ReLU()
self._init_weight()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class DeepLabv3_plus(nn.Module):
def __init__(self, nInputChannels=3, n_classes=21, os=16, pretrained=False, freeze_bn=False, _print=True):
if _print:
print("Constructing DeepLabv3+ model...")
print("Backbone: Xception")
print("Number of classes: {}".format(n_classes))
print("Output stride: {}".format(os))
print("Number of Input Channels: {}".format(nInputChannels))
super(DeepLabv3_plus, self).__init__()
# Atrous Conv
self.xception_features = Xception(nInputChannels, os, pretrained)
# ASPP
if os == 16:
dilations = [1, 6, 12, 18]
elif os == 8:
dilations = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = ASPP_module(2048, 256, dilation=dilations[0])
self.aspp2 = ASPP_module(2048, 256, dilation=dilations[1])
self.aspp3 = ASPP_module(2048, 256, dilation=dilations[2])
self.aspp4 = ASPP_module(2048, 256, dilation=dilations[3])
self.relu = nn.ReLU()
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(2048, 256, 1, stride=1, bias=False),
BatchNorm2d(256),
nn.ReLU())
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.bn1 = BatchNorm2d(256)
# adopt [1x1, 48] for channel reduction.
self.conv2 = nn.Conv2d(128, 48, 1, bias=False)
self.bn2 = BatchNorm2d(48)
self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, n_classes, kernel_size=1, stride=1))
if freeze_bn:
self._freeze_bn()
def forward(self, input):
x, low_level_features = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = F.interpolate(x, size=(int(math.ceil(input.size()[-2]/4)),
int(math.ceil(input.size()[-1]/4))), mode='bilinear', align_corners=True)
low_level_features = self.conv2(low_level_features)
low_level_features = self.bn2(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.last_conv(x)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def _freeze_bn(self):
for m in self.modules():
if isinstance(m, BatchNorm2d):
m.eval()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def get_1x_lr_params(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = [model.xception_features]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = [model.aspp1, model.aspp2, model.aspp3, model.aspp4, model.conv1, model.conv2, model.last_conv]
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
yield k
if __name__ == "__main__":
model = DeepLabv3_plus(nInputChannels=3, n_classes=21, os=16, pretrained=True, _print=True)
model.eval()
image = torch.randn(1, 3, 512, 512)
with torch.no_grad():
output = model.forward(image)
print(output.size())
| 16,199 | 37.117647 | 127 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/python/twatch.py
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| 1,316 | 30.357143 | 75 |
py
|
f2fs-stable-linux-3.18.y
|
f2fs-stable-linux-3.18.y/tools/perf/util/setup.py
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
libapikfs = getenv('LIBAPIKFS')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| 1,543 | 30.510204 | 82 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.