repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
MAX-Toxic-Comment-Classifier
|
MAX-Toxic-Comment-Classifier-master/config.py
|
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Flask settings
DEBUG = False
# Flask-restplus settings
RESTPLUS_MASK_SWAGGER = False
SWAGGER_UI_DOC_EXPANSION = 'none'
# API metadata
API_TITLE = 'MAX Toxic Comment Classifier'
API_DESC = 'Detect 6 types of toxicity in user comments.'
API_VERSION = '2.0.0'
# default model
MODEL_NAME = 'BERT_PyTorch'
DEFAULT_MODEL_PATH = f'assets/{MODEL_NAME}/'
# the output labels
LABEL_LIST = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
# the metadata of the model
MODEL_META_DATA = {
'id': 'max-toxic-comment-classifier',
'name': 'MAX Toxic Comment Classifier',
'description': 'BERT Base finetuned on toxic comments from Wikipedia.',
'type': 'Text Classification',
'source': 'https://developer.ibm.com/exchanges/models/all/max-toxic-comment-classifier/',
'license': 'Apache V2'
}
| 1,432 | 30.844444 | 93 |
py
|
MAX-Toxic-Comment-Classifier
|
MAX-Toxic-Comment-Classifier-master/core/bert_pytorch.py
|
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from torch.nn import BCEWithLogitsLoss
from pytorch_pretrained_bert.modeling import BertPreTrainedModel, BertModel
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class BertForMultiLabelSequenceClassification(BertPreTrainedModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2):
super(BertForMultiLabelSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
self.classifier = torch.nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1, self.num_labels))
return loss
else:
return logits
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, labels=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
labels: (Optional) [string]. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.labels = labels
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
def convert_examples_to_features(examples, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(str(example.text_a))
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
if len(input_ids) != max_seq_length:
raise ValueError(f"input_ids has an invalid length {len(input_ids)}")
if len(input_mask) != max_seq_length:
raise ValueError(f"input_mask has an invalid length {len(input_mask)}")
if len(segment_ids) != max_seq_length:
raise ValueError(f"segment_ids has an invalid length {len(segment_ids)}")
labels_ids = []
for label in example.labels:
labels_ids.append(float(label))
if ex_index < 0:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %s)" % (example.labels, labels_ids))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=labels_ids))
return features
| 8,564 | 43.378238 | 112 |
py
|
MAX-Toxic-Comment-Classifier
|
MAX-Toxic-Comment-Classifier-master/core/model.py
|
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from maxfw.model import MAXModelWrapper
import logging
from config import DEFAULT_MODEL_PATH, LABEL_LIST, MODEL_META_DATA as model_meta
import torch
import time
import numpy as np
from pytorch_pretrained_bert.tokenization import BertTokenizer
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from core.bert_pytorch import BertForMultiLabelSequenceClassification, InputExample, convert_examples_to_features
logger = logging.getLogger()
class ModelWrapper(MAXModelWrapper):
MODEL_META_DATA = model_meta
def __init__(self, path=DEFAULT_MODEL_PATH):
"""Instantiate the BERT model."""
logger.info('Loading model from: {}...'.format(path))
# Load the model
# 1. set the appropriate parameters
self.eval_batch_size = 64
self.max_seq_length = 256
self.do_lower_case = True
# 2. Initialize the PyTorch model
model_state_dict = torch.load(DEFAULT_MODEL_PATH+'pytorch_model.bin', map_location='cpu')
self.tokenizer = BertTokenizer.from_pretrained(DEFAULT_MODEL_PATH, do_lower_case=self.do_lower_case)
self.model = BertForMultiLabelSequenceClassification.from_pretrained(DEFAULT_MODEL_PATH,
num_labels=len(LABEL_LIST),
state_dict=model_state_dict)
self.device = torch.device("cpu")
self.model.to(self.device)
# 3. Set the layers to evaluation mode
self.model.eval()
logger.info('Loaded model')
def _pre_process(self, input):
# Record the time spent in the prediction functions
self.start_time = time.time()
# Converting the input to features
test_examples = [InputExample(guid=i, text_a=x, labels=[]) for i, x in enumerate(input)]
test_features = convert_examples_to_features(test_examples, self.max_seq_length, self.tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)
# Turn input examples into batches
test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
test_sampler = SequentialSampler(test_data)
self.test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=self.eval_batch_size)
return test_examples
def _post_process(self, result):
"""Convert the prediction output to the expected output."""
# Generate the output format for every input string
output = [{LABEL_LIST[0]: p[0],
LABEL_LIST[1]: p[1],
LABEL_LIST[2]: p[2],
LABEL_LIST[3]: p[3],
LABEL_LIST[4]: p[4],
LABEL_LIST[5]: p[5],
} for p in result]
return output
def _predict(self, test_examples):
"""Predict the class probabilities using the BERT model."""
logger.info("***** Running prediction *****")
logger.info(" Num examples = %d", len(test_examples))
logger.info(" Batch size = %d", self.eval_batch_size)
all_logits = None
for step, batch in enumerate(self.test_dataloader):
input_ids, input_mask, segment_ids = batch
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
# Compute the logits
with torch.no_grad():
logits = self.model(input_ids, segment_ids, input_mask)
logits = logits.sigmoid()
# Save the logits
if all_logits is None:
all_logits = logits.detach().cpu().numpy()
else:
all_logits = np.concatenate((all_logits, logits.detach().cpu().numpy()), axis=0)
# Return the predictions
logger.info(f'Inference done for {len(test_examples)} examples in {time.time() - self.start_time} seconds.')
return all_logits
| 4,848 | 39.07438 | 116 |
py
|
MAX-Toxic-Comment-Classifier
|
MAX-Toxic-Comment-Classifier-master/core/__init__.py
|
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 603 | 36.75 | 74 |
py
|
MAX-Toxic-Comment-Classifier
|
MAX-Toxic-Comment-Classifier-master/api/metadata.py
|
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from core.model import ModelWrapper
from maxfw.core import MAX_API, MetadataAPI, METADATA_SCHEMA
class ModelMetadataAPI(MetadataAPI):
@MAX_API.marshal_with(METADATA_SCHEMA)
def get(self):
"""Return the metadata associated with the model"""
return ModelWrapper.MODEL_META_DATA
| 907 | 32.62963 | 74 |
py
|
MAX-Toxic-Comment-Classifier
|
MAX-Toxic-Comment-Classifier-master/api/__init__.py
|
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .metadata import ModelMetadataAPI # noqa
from .predict import ModelPredictAPI # noqa
from .predict import ModelLabelsAPI # noqa
| 740 | 36.05 | 74 |
py
|
MAX-Toxic-Comment-Classifier
|
MAX-Toxic-Comment-Classifier-master/api/predict.py
|
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from core.model import ModelWrapper
from maxfw.core import MAX_API, PredictAPI, MetadataAPI
from flask_restplus import fields
from flask import abort
# Set up parser for input data (http://flask-restplus.readthedocs.io/en/stable/parsing.html)
input_parser = MAX_API.model('ModelInput', {
'text': fields.List(fields.String, required=True,
description='List of user comments (strings) to be analyzed for toxicity.')
})
# Creating a JSON response model: https://flask-restplus.readthedocs.io/en/stable/marshalling.html#the-api-model-factory
label_description = {
'toxic': 'very bad, unpleasant, or harmful',
'severe_toxic': 'extremely bad and offensive',
'obscene': '(of the portrayal or description of sexual matters) offensive or disgusting by accepted standards of '
'morality and decency',
'threat': 'a statement of an intention to inflict pain, injury, damage, or other hostile action on someone in '
'retribution for something done or not done',
'insult': 'speak to or treat with disrespect or scornful abuse',
'identity_hate': 'hatred, hostility, or violence towards members of a race, ethnicity, nation, religion, gender, '
'gender identity, sexual orientation or any other designated sector of society'
}
label_prediction = MAX_API.model('LabelPrediction', {
'toxic': fields.Float(required=True, description=label_description['toxic']),
'severe_toxic': fields.Float(required=True, description=label_description['severe_toxic']),
'obscene': fields.Float(required=True, description=label_description['obscene']),
'threat': fields.Float(required=True, description=label_description['threat']),
'insult': fields.Float(required=True, description=label_description['insult']),
'identity_hate': fields.Float(required=True, description=label_description['identity_hate']),
})
results_response = MAX_API.model("ModelResultResponse", {
'original_text': fields.String(reqired=True, description='User submitted text'),
'predictions': fields.Nested(label_prediction, description='Predicted labels and probabilities')
})
predict_response = MAX_API.model('ModelPredictResponse', {
'status': fields.String(required=True, description='Response status message'),
'results': fields.List(fields.Nested(results_response), description='Original Text, predicted labels, and probabilities')
})
class ModelLabelsAPI(MetadataAPI):
'''API for getting information about the available toxicity tags.'''
@MAX_API.doc('labels')
def get(self):
'''Return the list of labels that can be predicted by the model.'''
result = dict()
result['labels'] = {label: label_description[label] for label in label_description}
result['count'] = len(label_description.keys())
return result
class ModelPredictAPI(PredictAPI):
model_wrapper = ModelWrapper()
@MAX_API.doc('predict')
@MAX_API.expect(input_parser, validate=True)
@MAX_API.marshal_with(predict_response)
def post(self):
"""Make a prediction given input data"""
result = {'status': 'error'}
input_json = MAX_API.payload
# Make sure the input list is not empty
if len(input_json['text']) == 0:
abort(400, 'An empty list was provided. Please put add the input strings to this list.')
try:
output = self.model_wrapper.predict(input_json['text'])
result['results'] = []
for i in range(len(output)):
res = {'original_text': input_json['text'][i],
'predictions': output[i]}
result['results'].append(res)
result['status'] = 'ok'
return result
except: # noqa
abort(500, "Model Inference Failed with valid input")
| 4,453 | 42.666667 | 125 |
py
|
MAX-Toxic-Comment-Classifier
|
MAX-Toxic-Comment-Classifier-master/tests/test.py
|
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import requests
def test_swagger():
model_endpoint = 'http://localhost:5000/swagger.json'
r = requests.get(url=model_endpoint)
assert r.status_code == 200
assert r.headers['Content-Type'] == 'application/json'
json = r.json()
assert 'swagger' in json
assert json.get('info') and json.get('info').get('title') == 'MAX Toxic Comment Classifier'
def test_metadata():
model_endpoint = 'http://localhost:5000/model/metadata'
r = requests.get(url=model_endpoint)
assert r.status_code == 200
metadata = r.json()
assert metadata['id'] == 'max-toxic-comment-classifier'
assert metadata['name'] == 'MAX Toxic Comment Classifier'
assert metadata['description'] == 'BERT Base finetuned on toxic comments from Wikipedia.'
assert metadata['license'] == 'Apache V2'
assert metadata['type'] == 'Text Classification'
assert 'developer.ibm.com' in metadata['source']
def test_invalid_input():
model_endpoint = 'http://localhost:5000/model/predict'
invalid_data = {
"not_text": []
}
invalid_data2 = {}
invalid_data3 = ''
invalid_data4 = 3459000
invalid_data5 = {'text': 45435}
invalid_data6 = {'text': [45435]}
r = requests.post(url=model_endpoint, json=invalid_data)
assert r.status_code == 400
r = requests.post(url=model_endpoint, json=invalid_data2)
assert r.status_code == 400
r = requests.post(url=model_endpoint, json=invalid_data3)
assert r.status_code == 400
r = requests.post(url=model_endpoint, json=invalid_data4)
assert r.status_code == 400
r = requests.post(url=model_endpoint, json=invalid_data5)
assert r.status_code == 400
r = requests.post(url=model_endpoint, json=invalid_data6)
assert r.status_code == 400
def test_labels_reponse():
model_endpoint = 'http://localhost:5000/model/labels'
r = requests.get(url=model_endpoint)
assert r.status_code == 200
response = r.json()
assert response['count'] == 6
assert set(response['labels'].keys()) == {'threat', 'insult', 'toxic', 'severe_toxic', 'identity_hate', 'obscene'}
def test_predict_response():
model_endpoint = 'http://localhost:5000/model/predict'
json_data = {
"text": ["good string",
"dumb string"]
}
r = requests.post(url=model_endpoint, json=json_data)
assert r.status_code == 200
response = r.json()
assert response['status'] == 'ok'
# verify that the input string is being returned
assert response["results"][0]["original_text"] == "good string"
# verify that 'good string' is non-toxic
assert round(float(response['results'][0]['predictions']['toxic'])) == 0
# verify that 'dumb string' is in fact toxic
assert round(float(response['results'][1]['predictions']['toxic'])) == 1
# verify that we have 6 labels
assert len(response['results'][1]['predictions'].keys()) == 6
json_data2 = {
"text": [
"I would like to respectfully punch you in the mouth.",
"The Model Asset Exchange is a crucial element of a developer's toolkit.",
"This code is amongst the ugliest I have ever encountered."
]
}
r = requests.post(url=model_endpoint, json=json_data2)
assert r.status_code == 200
response = r.json()
assert response['status'] == 'ok'
# verify the outcome of the first comment
assert round(float(response['results'][0]['predictions']['toxic'])) == 1
# verify the outcome of the second comment
assert round(float(response['results'][1]['predictions']['toxic'])) == 0
# verify the outcome of the third comment
assert round(float(response['results'][2]['predictions']['toxic'])) == 1
# The last entry of samples/test_examples.csv contains all types of toxicity. This is verified here.
with open('samples/test_examples.csv', 'rb') as fh:
for line in fh:
pass
json_data3 = {
"text": [str(line).split(',')[0]]
}
r = requests.post(url=model_endpoint, json=json_data3)
assert r.status_code == 200
response = r.json()
assert response['status'] == 'ok'
assert round(float(response['results'][0]['predictions']['toxic'])) == 1
assert round(float(response['results'][0]['predictions']['severe_toxic'])) == 1
assert round(float(response['results'][0]['predictions']['obscene'])) == 1
assert round(float(response['results'][0]['predictions']['insult'])) == 1
assert round(float(response['results'][0]['predictions']['threat'])) == 1
assert round(float(response['results'][0]['predictions']['identity_hate'])) == 1
# Test different input batch sizes
for input_size in [4, 16, 32, 64, 75]:
json_data4 = {
"text": ["good string"]*input_size
}
r = requests.post(url=model_endpoint, json=json_data4)
assert r.status_code == 200
response = r.json()
assert response['status'] == 'ok'
assert len(response['results']) == len(json_data4["text"])
if __name__ == '__main__':
pytest.main([__file__])
| 5,738 | 30.190217 | 118 |
py
|
Vita-CLIP
|
Vita-CLIP-main/training/VitaCLIP_text_encoder.py
|
import torch
import torch.nn as nn
import copy
from collections import OrderedDict
from typing import Union, List
from pkg_resources import packaging
from VitaCLIP_text_encoder_utils import SimpleTokenizer as _Tokenizer
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> Union[torch.IntTensor, torch.LongTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
"""
if isinstance(texts, str):
texts = [texts]
_tokenizer = _Tokenizer()
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
else:
result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.ModuleList([ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor, maple_prompts=None):
if maple_prompts:
num_prompts = maple_prompts[0].shape[0]
for i, blk in enumerate(self.resblocks):
if i == 0:
x = blk(x)
else:
prefix = x[:1, :, :]
suffix = x[1 + num_prompts:, :, :]
# Create/configure learnable tokens of this layer
textual_context = maple_prompts[i-1]
textual_context = textual_context.expand(x.shape[1], -1, -1).permute(1, 0, 2)
# Add the learnable tokens of this layer with the input, replaced by previous
# layer learnable tokens
x = torch.cat([prefix, textual_context, suffix], dim=0)
# then do forward pass from transformer
x = blk(x)
else:
for blk in self.resblocks:
x = blk(x)
return x
class CLIPTextEncoder(nn.Module):
def __init__(
self,
embed_dim: int = 512,
context_length: int = 77,
vocab_size: int = 49408,
transformer_width: int = 512,
transformer_heads: int = 8,
transformer_layers: int = 12,
):
super().__init__()
self.context_length = context_length
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def forward(self, prompts, tokenized_prompts, maple_prompts=None):
x = prompts + self.positional_embedding
x = x.permute(1, 0, 2) # NLD -> LND
if maple_prompts:
x = self.transformer(x, maple_prompts)
else:
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), tokenized_prompts.argmax(dim=-1)] @ self.text_projection
return x
class TextPromptLearner(nn.Module):
def __init__(self, classnames, text_model, num_prompts, prompts_init='', CSC=False, ctx_pos='end'):
super().__init__()
_tokenizer = _Tokenizer()
n_cls = len(classnames)
n_ctx = num_prompts
ctx_init = prompts_init
ctx_dim = text_model.ln_final.weight.shape[0]
if ctx_init:
# use given words to initialize context vectors
ctx_init = ctx_init.replace("_", " ")
n_ctx = len(ctx_init.split(" "))
prompt = tokenize(ctx_init)
with torch.no_grad():
embedding = text_model.token_embedding(prompt)
ctx_vectors = embedding[0, 1 : 1 + n_ctx, :]
prompt_prefix = ctx_init
else:
# random initialization
if CSC:
print("Initializing class-specific contexts")
ctx_vectors = torch.empty(n_cls, n_ctx, ctx_dim)
else:
print("Initializing a generic context")
ctx_vectors = torch.empty(n_ctx, ctx_dim)
nn.init.normal_(ctx_vectors, std=0.02)
prompt_prefix = " ".join(["X"] * n_ctx)
print(f'Initial context: "{prompt_prefix}"')
print(f"Number of context words (tokens): {n_ctx}")
self.ctx = nn.Parameter(ctx_vectors) # to be optimized
classnames = [name.replace("_", " ") for name in classnames]
name_lens = [len(_tokenizer.encode(name)) for name in classnames]
prompts = [prompt_prefix + " " + name + "." for name in classnames]
tokenized_prompts = torch.cat([tokenize(p) for p in prompts])
# print(tokenized_prompts.shape)
with torch.no_grad():
embedding = text_model.token_embedding(tokenized_prompts)
# These token vectors will be saved when in save_model(),
# but they should be ignored in load_model() as we want to use
# those computed using the current class names
self.register_buffer("token_prefix", embedding[:, :1, :]) # SOS
self.register_buffer("token_suffix", embedding[:, 1 + n_ctx :, :]) # CLS, EOS
self.n_cls = n_cls
self.n_ctx = n_ctx
self.tokenized_prompts = tokenized_prompts # torch.Tensor
self.name_lens = name_lens
self.class_token_position = ctx_pos
def forward(self):
ctx = self.ctx
if ctx.dim() == 2:
ctx = ctx.unsqueeze(0).expand(self.n_cls, -1, -1)
prefix = self.token_prefix
suffix = self.token_suffix
if self.class_token_position == "end":
prompts = torch.cat(
[
prefix, # (n_cls, 1, dim)
ctx, # (n_cls, n_ctx, dim)
suffix, # (n_cls, *, dim)
],
dim=1,
)
elif self.class_token_position == "middle":
half_n_ctx = self.n_ctx // 2
prompts = []
for i in range(self.n_cls):
name_len = self.name_lens[i]
prefix_i = prefix[i : i + 1, :, :]
class_i = suffix[i : i + 1, :name_len, :]
suffix_i = suffix[i : i + 1, name_len:, :]
ctx_i_half1 = ctx[i : i + 1, :half_n_ctx, :]
ctx_i_half2 = ctx[i : i + 1, half_n_ctx:, :]
prompt = torch.cat(
[
prefix_i, # (1, 1, dim)
ctx_i_half1, # (1, n_ctx//2, dim)
class_i, # (1, name_len, dim)
ctx_i_half2, # (1, n_ctx//2, dim)
suffix_i, # (1, *, dim)
],
dim=1,
)
prompts.append(prompt)
prompts = torch.cat(prompts, dim=0)
elif self.class_token_position == "front":
prompts = []
for i in range(self.n_cls):
name_len = self.name_lens[i]
prefix_i = prefix[i : i + 1, :, :]
class_i = suffix[i : i + 1, :name_len, :]
suffix_i = suffix[i : i + 1, name_len:, :]
ctx_i = ctx[i : i + 1, :, :]
prompt = torch.cat(
[
prefix_i, # (1, 1, dim)
class_i, # (1, name_len, dim)
ctx_i, # (1, n_ctx, dim)
suffix_i, # (1, *, dim)
],
dim=1,
)
prompts.append(prompt)
prompts = torch.cat(prompts, dim=0)
else:
raise ValueError
return prompts
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
| 11,343 | 37.454237 | 137 |
py
|
Vita-CLIP
|
Vita-CLIP-main/training/checkpoint.py
|
#!/usr/bin/env python
import argparse
import os
import torch
import torch.distributed as dist
def setup_arg_parser(parser: argparse.ArgumentParser):
parser.add_argument('--checkpoint_dir', type=str,
help='checkpoint output path')
parser.add_argument('--auto_resume', action='store_true',
help='auto resume from the last checkpoint from checkpoint_dir')
parser.add_argument('--resume_path', type=str,
help='resume from manually specified checkpoint file, overriding auto_resume')
parser.add_argument('--pretrain', type=str,
help='path to pretrained weights. will NOT override auto_resume of resume_path, '
'load optimizer state or enforce strict matching of checkpoint and model weights.')
def _find_autoresume_path(args: argparse.Namespace):
print('Trying to auto resume from path:', args.checkpoint_dir)
if os.path.isdir(args.checkpoint_dir):
checkpoint_files = [x for x in os.listdir(args.checkpoint_dir) if x.startswith('checkpoint-') and x.endswith('.pth')]
checkpoint_iters = []
for x in checkpoint_files:
try:
x = x[len('checkpoint-'): -len('.pth')]
x = int(x)
except ValueError:
continue
checkpoint_iters.append(x)
else:
checkpoint_iters = []
if len(checkpoint_iters) == 0:
print('Did not find a valid checkpoint file.')
else:
checkpoint_iters.sort()
args.resume_path = os.path.join(args.checkpoint_dir, 'checkpoint-%d.pth' % checkpoint_iters[-1])
print(f'Found {len(checkpoint_iters)} checkpoint file(s).')
def resume_from_checkpoint(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
lr_sched: torch.optim.lr_scheduler._LRScheduler,
loss_scaler: torch.cuda.amp.grad_scaler.GradScaler,
args: argparse.Namespace,
) -> int:
if args.pretrain is not None:
print(f'Loading pretrain model: {args.pretrain}')
ckpt = torch.load(args.pretrain, map_location='cpu')
print(model.load_state_dict(ckpt['model'], strict=False))
# returns resume_step on successful resume, or 0 otherwise.
if args.auto_resume and args.resume_path is None:
_find_autoresume_path(args)
if args.resume_path is None:
print('Not resuming from a checkpoint.')
return 0
else:
print(f'Resuming from checkpoint file {args.resume_path}')
ckpt = torch.load(args.resume_path, map_location='cpu')
model.load_state_dict(ckpt['model'], strict=True)
if 'optimizer' in ckpt:
optimizer.load_state_dict(ckpt['optimizer'])
lr_sched.load_state_dict(ckpt['lr_sched'])
loss_scaler.load_state_dict(ckpt['loss_scaler'])
return ckpt['next_step']
else:
print('Optimizer state is NOT found in checkpoint.')
return 0
def save_checkpoint(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
lr_sched: torch.optim.lr_scheduler._LRScheduler,
loss_scaler: torch.cuda.amp.grad_scaler.GradScaler,
next_step: int,
args: argparse.Namespace,
):
if args.checkpoint_dir is None:
return
if not os.path.isdir(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
to_save = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_sched': lr_sched.state_dict(),
'loss_scaler': loss_scaler.state_dict(),
'next_step': next_step,
}
torch.save(to_save, os.path.join(args.checkpoint_dir, f'checkpoint-{next_step}.pth'))
| 3,710 | 35.742574 | 125 |
py
|
Vita-CLIP
|
Vita-CLIP-main/training/VitaCLIP_vision_encoder.py
|
from typing import Tuple
import numpy as np
from einops import rearrange
import torch
import torch.nn as nn
import torch.nn.functional as F
from operator import mul
from functools import reduce
import math
from VitaCLIP_vision_encoder_utils import QuickGELU, LayerNorm, TransformerEncoderLayer, ImagePatchEmbed2D
class CLIPVisionEncoder(nn.Module):
def __init__(
self,
# data shape
input_size: Tuple[int, int] = (224, 224),
num_frames: int = 8,
# model def
feature_dim: int = 768,
patch_size: Tuple[int, int] = (16, 16),
num_heads: int = 12,
num_layers: int = 12,
mlp_factor: float = 4.0,
act: nn.Module = QuickGELU,
embed_dim: int = 512,
# use summary token
use_summary_token: bool = False,
# use local prompts
use_local_prompts: bool = False,
# use global prompts
use_global_prompts: bool = False,
num_global_prompts: int = 8,
):
super().__init__()
self.feature_dim = feature_dim
self.patch_embed = ImagePatchEmbed2D(img_size=input_size[0], patch_size=patch_size[0], in_chans=3, embed_dim=feature_dim)
self.num_patches = np.prod([x // y for x, y in zip(input_size, patch_size)]) + 1
self.cls_token = nn.Parameter(torch.zeros([feature_dim]))
self.pos_embed = nn.Parameter(torch.zeros([self.num_patches, feature_dim]))
self.time_embed = nn.Parameter(torch.zeros([num_frames, feature_dim]))
self.blocks = nn.ModuleList([
TransformerEncoderLayer(
in_feature_dim=feature_dim, qkv_dim=feature_dim, num_heads=num_heads,
mlp_factor=mlp_factor, act=act, use_summary_token=use_summary_token,
use_local_prompts=use_local_prompts, num_frames=num_frames, patch_size=patch_size
) for _ in range(num_layers)
])
self.ln_pre = LayerNorm(feature_dim)
self.ln_post = LayerNorm(feature_dim)
scale = feature_dim ** -0.5
self.proj = nn.Parameter(scale * torch.randn(feature_dim, embed_dim))
# global prompts
self.use_global_prompts = use_global_prompts
self.num_global_prompts = num_global_prompts
if self.use_global_prompts:
self.global_prompts = nn.Parameter(torch.zeros(num_layers, self.num_global_prompts, feature_dim))
self._initialize_global_prompts(patch_size, feature_dim)
self._initialize_weights()
def _initialize_weights(self):
nn.init.normal_(self.cls_token, std=0.02)
nn.init.normal_(self.pos_embed, std=0.02)
nn.init.normal_(self.time_embed, std=0.02)
def _initialize_global_prompts(self, patch_size, prompt_dim):
val = math.sqrt(6. / float(3 * reduce(mul, patch_size, 1) + prompt_dim))
# xavier_uniform initialization
nn.init.uniform_(self.global_prompts.data, -val, val)
def temporal_encoding(self, x, T, B):
## Time Embeddings
x = rearrange(x, '(b t) n m -> (b n) t m',b=B,t=T)
## Resizing time embeddings in case they don't match
if T != self.time_embed.size(0):
time_embed = self.time_embed.unsqueeze(0).transpose(1,2)
new_time_embed = F.interpolate(time_embed, size=(T), mode='nearest')
new_time_embed = new_time_embed.transpose(1, 2).squeeze(0)
x = x + new_time_embed
else:
x = x + self.time_embed
x = rearrange(x, '(b n) t m -> (b t) n m',b=B,t=T)
return x
def forward(self, x: torch.Tensor):
B, C, T, H, W = x.size()
x = x.permute(0, 2, 1, 3, 4).flatten(0, 1)
x = self.patch_embed(x)
x = torch.cat([self.cls_token.view(1, 1, -1).repeat(x.size(0), 1, 1), x], dim=1)
x = x + self.pos_embed
x = self.temporal_encoding(x, T, B)
x = self.ln_pre(x)
if self.use_global_prompts:
for i, blk in enumerate(self.blocks):
global_prompts = self.global_prompts[i].expand(B*T, -1, -1)
x = torch.cat((x[:, :1, :], global_prompts, x[:, 1:, :]), dim=1)
x = blk(x)
x = torch.cat((x[:, :1, :], x[:, self.num_global_prompts+1:, :]), dim=1)
else:
for blk in self.blocks:
x = blk(x)
cls_x = self.ln_post(x[:, 0, :])
cls_x = cls_x @ self.proj
cls_x = rearrange(cls_x, '(b t) e -> b t e', b=B,t=T).mean(dim=1)
return cls_x
| 4,541 | 34.209302 | 129 |
py
|
Vita-CLIP
|
Vita-CLIP-main/training/VitaCLIP_text_encoder_utils.py
|
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| 4,628 | 33.804511 | 144 |
py
|
Vita-CLIP
|
Vita-CLIP-main/training/VitaCLIP_model.py
|
#!/usr/bin/env python
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
from VitaCLIP_vision_encoder import CLIPVisionEncoder
from VitaCLIP_text_encoder import CLIPTextEncoder, TextPromptLearner
class VitaCLIP(nn.Module):
def __init__(
self,
# load weights
backbone_path: str = '',
# data shape
input_size: Tuple[int, int] = (224, 224),
num_frames: int = 16,
# model def
feature_dim: int = 768,
patch_size: Tuple[int, int] = (16, 16),
num_heads: int = 12,
num_layers: int = 12,
mlp_factor: float = 4.0,
embed_dim: int = 512,
# use summary token
use_summary_token: bool = False,
# use local prompts
use_local_prompts: bool = False,
# use global prompts
use_global_prompts: bool = False,
num_global_prompts: int = 8,
# use text prompt learning
use_text_prompt_learning: bool = False,
text_context_length: int = 77,
text_vocab_size: int = 49408,
text_transformer_width: int = 512,
text_transformer_heads: int = 8,
text_transformer_layers: int = 12,
text_num_prompts: int = 8,
text_prompt_pos: str = 'end',
text_prompt_init: str = '',
text_prompt_CSC: bool = False,
text_prompt_classes_path: str = '',
# zeroshot eval
zeroshot_evaluation: bool = False,
zeroshot_text_features_path: str = '',
):
super().__init__()
# frames and tubelet
self.num_frames = num_frames
# use summary token
self.use_summary_token = use_summary_token
# clip loss logit_scale
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
# zeroshot text_features
self.zeroshot_evaluation = zeroshot_evaluation
if self.zeroshot_evaluation:
self.text_features = torch.load(zeroshot_text_features_path, map_location='cpu')
# visual model
self.visual = CLIPVisionEncoder(
# data shape
input_size=input_size,
num_frames=num_frames,
# model def
feature_dim=feature_dim,
patch_size=patch_size,
num_heads=num_heads,
num_layers=num_layers,
mlp_factor=mlp_factor,
embed_dim=embed_dim,
# use summary token
use_summary_token=use_summary_token,
# use local prompts
use_local_prompts=use_local_prompts,
# use global prompts
use_global_prompts=use_global_prompts,
num_global_prompts=num_global_prompts,
)
self.use_text_prompt_learning = use_text_prompt_learning
# text prompt learning
if self.use_text_prompt_learning:
self.textual = CLIPTextEncoder(
embed_dim=embed_dim,
context_length=text_context_length,
vocab_size=text_vocab_size,
transformer_width=text_transformer_width,
transformer_heads=text_transformer_heads,
transformer_layers=text_transformer_layers,
)
if backbone_path:
ckpt = torch.load(backbone_path)
self.load_state_dict(ckpt, strict=False)
if self.use_text_prompt_learning:
with open(text_prompt_classes_path, 'r') as f:
classes = f.read().strip().split('\n')
self.prompt_learner = TextPromptLearner(
classnames=classes,
text_model=self.textual,
num_prompts=text_num_prompts,
prompts_init=text_prompt_init,
CSC=text_prompt_CSC,
ctx_pos=text_prompt_pos
)
self.tokenized_prompts = self.prompt_learner.tokenized_prompts
# freeze encoders
self._freeze_visual_except_prompts_time_embed()
self._freeze_textual()
def _freeze_visual_except_prompts_time_embed(self):
for name, param in self.visual.named_parameters():
if 'summary' in name or 'local' in name or 'global' in name or 'time_embed' in name:
pass
else:
param.requires_grad = False
def _freeze_textual(self):
for name, param in self.textual.named_parameters():
param.requires_grad = False
def forward(self, x: torch.Tensor):
B, C, T, H, W = x.size()
# used in training
if self.use_text_prompt_learning:
# text side
prompts = self.prompt_learner()
tokenized_prompts = self.tokenized_prompts
text_features = self.textual(prompts, tokenized_prompts)
# vision side
video_features = self.visual(x)
# used in zeroshot evaluation
else:
# vision side
video_features = self.visual(x)
# text side
text_features = self.text_features.to(video_features.device)
# normalized features
video_features = video_features / video_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits = logit_scale * video_features @ text_features.t()
return logits
| 5,576 | 32.8 | 100 |
py
|
Vita-CLIP
|
Vita-CLIP-main/training/__init__.py
|
#!/usr/bin/env python
| 21 | 21 | 21 |
py
|
Vita-CLIP
|
Vita-CLIP-main/training/VitaCLIP_vision_encoder_utils.py
|
#!/usr/bin/env python
from collections import OrderedDict
from typing import Tuple
import torch
import torch.nn as nn
from operator import mul
from functools import reduce
import math
'''
QuickGELU and LayerNorm w/ fp16 from official CLIP repo
(https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py)
'''
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class Attention(nn.Module):
'''
A generalized attention module with more flexibility.
'''
def __init__(
self, q_in_dim: int, k_in_dim: int, v_in_dim: int,
qk_proj_dim: int, v_proj_dim: int, num_heads: int,
out_dim: int
):
super().__init__()
self.q_proj = nn.Linear(q_in_dim, qk_proj_dim)
self.k_proj = nn.Linear(k_in_dim, qk_proj_dim)
self.v_proj = nn.Linear(v_in_dim, v_proj_dim)
self.out_proj = nn.Linear(v_proj_dim, out_dim)
self.num_heads = num_heads
assert qk_proj_dim % num_heads == 0 and v_proj_dim % num_heads == 0
self._initialize_weights()
def _initialize_weights(self):
for m in (self.q_proj, self.k_proj, self.v_proj, self.out_proj):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.)
def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor):
assert q.ndim == 3 and k.ndim == 3 and v.ndim == 3
N = q.size(0); assert k.size(0) == N and v.size(0) == N
Lq, Lkv = q.size(1), k.size(1); assert v.size(1) == Lkv
q, k, v = self.q_proj(q), self.k_proj(k), self.v_proj(v)
H = self.num_heads
Cqk, Cv = q.size(-1) // H, v.size(-1) // H
q = q.view(N, Lq, H, Cqk)
k = k.view(N, Lkv, H, Cqk)
v = v.view(N, Lkv, H, Cv)
aff = torch.einsum('nqhc,nkhc->nqkh', q / (Cqk ** 0.5), k)
aff = aff.softmax(dim=-2)
mix = torch.einsum('nqlh,nlhc->nqhc', aff, v)
out = self.out_proj(mix.flatten(-2))
return out
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
# attention def
in_feature_dim: int = 768,
qkv_dim: int = 768,
num_heads: int = 12,
mlp_factor: float = 4.0,
mlp_dropout: float = 0.0,
act: nn.Module = QuickGELU,
# use summary token
use_summary_token: bool = False,
# use local prompts
use_local_prompts: bool = False,
# model def
num_frames: int = 8,
patch_size: Tuple[int, int] = (16, 16),
):
super().__init__()
self.attn = Attention(
q_in_dim=in_feature_dim, k_in_dim=in_feature_dim, v_in_dim=in_feature_dim,
qk_proj_dim=qkv_dim, v_proj_dim=qkv_dim, num_heads=num_heads, out_dim=in_feature_dim
)
mlp_dim = round(mlp_factor * in_feature_dim)
self.mlp = nn.Sequential(OrderedDict([
('fc1', nn.Linear(in_feature_dim, mlp_dim)),
('act', act()),
('dropout', nn.Dropout(mlp_dropout)),
('fc2', nn.Linear(mlp_dim, in_feature_dim)),
]))
self.norm1 = LayerNorm(in_feature_dim)
self.norm2 = LayerNorm(in_feature_dim)
self.use_summary_token = use_summary_token
self.use_local_prompts = use_local_prompts
# for both summary token and local prompts we need the cls_proj layer and the num_frames
if self.use_summary_token or self.use_local_prompts:
self.cls_proj = nn.Linear(in_feature_dim, in_feature_dim)
self.num_frames = num_frames
# for summary token we need a layer norm and attention
if self.use_summary_token:
self.summary_ln = LayerNorm(in_feature_dim)
self.summary_attn_layer = Attention(
q_in_dim=in_feature_dim, k_in_dim=in_feature_dim, v_in_dim=in_feature_dim,
qk_proj_dim=qkv_dim, v_proj_dim=qkv_dim, num_heads=num_heads, out_dim=in_feature_dim
)
# for local prompts we init learnable tokens
if self.use_local_prompts:
self.local_prompts = nn.Parameter(torch.zeros(1, self.num_frames, in_feature_dim))
self._initialize_cls_prompts(patch_size, in_feature_dim)
self._initialize_weights()
def _initialize_weights(self):
for m in (self.mlp[0], self.mlp[-1]):
nn.init.xavier_uniform_(m.weight)
nn.init.normal_(m.bias, std=1e-6)
def _initialize_cls_prompts(self, patch_size, prompt_dim):
val = math.sqrt(6. / float(3 * reduce(mul, patch_size, 1) + prompt_dim))
# xavier_uniform initialization
nn.init.uniform_(self.local_prompts.data, -val, val)
def forward(self, x: torch.Tensor):
# get the cls tokens and apply fc
# which is required for both summaru token
# and local prompts
if self.use_summary_token or self.use_local_prompts:
BT, N, C = x.shape
T = self.num_frames
B = BT//T
cls_token = x[:, 0, :].view(B, T, C)
cls_token_proj = self.cls_proj(cls_token)
# then apply ln and attn if summary token being used
if self.use_summary_token:
summary_token_norm = self.summary_ln(cls_token_proj)
summary_token_attn = cls_token_proj + self.summary_attn_layer(summary_token_norm, summary_token_norm, summary_token_norm)
summary_token_attn_reshape = summary_token_attn.view(BT, 1, C)
x = torch.cat([x, summary_token_attn_reshape], dim=1)
# then if local prompts are being used
if self.use_local_prompts:
local_prompts = self.local_prompts.expand(B, -1, -1)
# If train time frames and
# test time frames are not equal
if T != self.num_frames:
token_multiplier = T//self.num_frames
local_prompts = local_prompts.repeat(1,token_multiplier,1)
# use additive conditioning
local_prompts = local_prompts + cls_token_proj
# repeat across frames
local_prompts = local_prompts.repeat_interleave(repeats=T, dim=0)
x = torch.cat((x[:, :1, :], local_prompts, x[:, 1:, :]), dim=1)
x_norm = self.norm1(x)
x = x + self.attn(x_norm, x_norm, x_norm)
# remove the tokens after self attention
if self.use_summary_token:
x = x[:, :-1, :]
if self.use_local_prompts:
x = torch.cat((x[:, :1, :], x[:, local_prompts.shape[1]+1:, :]), dim=1)
x = x + self.mlp(self.norm2(x))
return x
class ImagePatchEmbed2D(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
| 7,573 | 34.064815 | 133 |
py
|
Vita-CLIP
|
Vita-CLIP-main/training/train.py
|
#!/usr/bin/env python
import argparse
from datetime import datetime
import builtins
import torch
import torch.distributed as dist
import sys
sys.path.append('./')
import video_dataset
import checkpoint
from VitaCLIP_model import VitaCLIP
from collections import OrderedDict
def setup_print(is_master: bool):
"""
This function disables printing when not in master process
"""
builtin_print = builtins.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
now = datetime.now().time()
builtin_print('[{}] '.format(now), end='') # print with time stamp
builtin_print(*args, **kwargs)
builtins.print = print
def main():
# torch.autograd.set_detect_anomaly(True)
parser = argparse.ArgumentParser()
video_dataset.setup_arg_parser(parser)
checkpoint.setup_arg_parser(parser)
# train settings
parser.add_argument('--num_steps', type=int,
help='number of training steps')
parser.add_argument('--eval_only', action='store_true',
help='run evaluation only')
parser.add_argument('--save_freq', type=int, default=5000,
help='save a checkpoint every N steps')
parser.add_argument('--eval_freq', type=int, default=5000,
help='evaluate every N steps')
parser.add_argument('--print_freq', type=int, default=10,
help='print log message every N steps')
parser.add_argument('--lr', type=float, default=4e-4,
help='learning rate')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='optimizer weight decay')
parser.add_argument('--batch_split', type=int, default=1,
help='optionally split the batch into smaller shards and forward/backward one shard '
'at a time to avoid out-of-memory error.')
# backbone and checkpoint paths
parser.add_argument('--backbone_path', type=str,
help='path to pretrained backbone weights', default='')
parser.add_argument('--checkpoint_path', type=str,
help='path to pretrained checkpoint weights', default=None)
# model params
parser.add_argument('--patch_size', type=int, default=16,
help='patch size of patch embedding')
parser.add_argument('--num_heads', type=int, default=12,
help='number of transformer heads')
parser.add_argument('--num_layers', type=int, default=12,
help='number of transformer layers')
parser.add_argument('--feature_dim', type=int, default=768,
help='transformer feature dimension')
parser.add_argument('--embed_dim', type=int, default=512,
help='clip projection embedding size')
parser.add_argument('--mlp_factor', type=float, default=4.0,
help='transformer mlp factor')
parser.add_argument('--cls_dropout', type=float, default=0.5,
help='dropout rate applied before the final classification linear projection')
# zeroshot evaluation
parser.add_argument('--zeroshot_evaluation', action='store_true', dest='zeroshot_evaluation',
help='set into zeroshot evaluation mode')
parser.add_argument('--zeroshot_text_features_path', type=str, default='./ucf101_text_features_B16/class-only.pth',
help='path to saved clip text features to be used for zeroshot evaluation')
#fp16
parser.add_argument('--use_fp16', action='store_true', dest='fp16',
help='disable fp16 during training or inference')
parser.set_defaults(fp16=False)
# use summary token attn
parser.add_argument('--use_summary_token', action='store_true', dest='use_summary_token',
help='use summary token')
# use local prompts
parser.add_argument('--use_local_prompts', action='store_true', dest='use_local_prompts',
help='use local (frame-level conditioned) prompts')
# use global prompts
parser.add_argument('--use_global_prompts', action='store_true', dest='use_global_prompts',
help='use global (video-level unconditioned) prompts')
parser.add_argument('--num_global_prompts', type=int, default=8,
help='number of global prompts')
# set defaults
parser.set_defaults(use_summary_token=False, use_local_prompts=False, use_global_prompts=False)
# text prompt learning
parser.add_argument('--use_text_prompt_learning', action='store_true', dest='use_text_prompt_learning',
help='use coop text prompt learning')
parser.add_argument('--text_context_length', type=int, default=77,
help='text model context length')
parser.add_argument('--text_vocab_size', type=int, default=49408,
help='text model vocab size')
parser.add_argument('--text_transformer_width', type=int, default=512,
help='text transformer width')
parser.add_argument('--text_transformer_heads', type=int, default=8,
help='text transformer heads')
parser.add_argument('--text_transformer_layers', type=int, default=12,
help='text transformer layers')
parser.add_argument('--text_num_prompts', type=int, default=16,
help='number of text prompts')
parser.add_argument('--text_prompt_pos', type=str, default='end',
help='postion of text prompt')
parser.add_argument('--text_prompt_init', type=str, default='',
help='initialization to be used for text prompt. Leave empty for random')
parser.add_argument('--use_text_prompt_CSC', action='store_true', dest='text_prompt_CSC',
help='use Class Specific Context in text prompt')
parser.add_argument('--text_prompt_classes_path', type=str, default='./classes/k400_classes.txt',
help='path of classnames txt file')
args = parser.parse_args()
dist.init_process_group('nccl')
setup_print(dist.get_rank() == 0)
cuda_device_id = dist.get_rank() % torch.cuda.device_count()
torch.cuda.set_device(cuda_device_id)
model = VitaCLIP(
# load weights
backbone_path=args.backbone_path,
# data shape
input_size=(args.spatial_size, args.spatial_size),
num_frames=args.num_frames,
# model def
feature_dim=args.feature_dim,
patch_size=(args.patch_size, args.patch_size),
num_heads=args.num_heads,
num_layers=args.num_layers,
mlp_factor=args.mlp_factor,
embed_dim=args.embed_dim,
# use summary token
use_summary_token=args.use_summary_token,
# use local prompts
use_local_prompts=args.use_local_prompts,
# use global prompts
use_global_prompts=args.use_global_prompts,
num_global_prompts=args.num_global_prompts,
# use text prompt learning
use_text_prompt_learning=args.use_text_prompt_learning,
text_context_length=args.text_context_length,
text_vocab_size=args.text_vocab_size,
text_transformer_width=args.text_transformer_width,
text_transformer_heads=args.text_transformer_heads,
text_transformer_layers=args.text_transformer_layers,
text_num_prompts=args.text_num_prompts,
text_prompt_pos=args.text_prompt_pos,
text_prompt_init=args.text_prompt_init,
text_prompt_CSC=args.text_prompt_CSC,
text_prompt_classes_path=args.text_prompt_classes_path,
# zeroshot eval
zeroshot_evaluation=args.zeroshot_evaluation,
zeroshot_text_features_path=args.zeroshot_text_features_path,
)
if args.checkpoint_path:
print('loading checkpoint')
ckpt = torch.load(args.checkpoint_path, map_location='cpu')
renamed_ckpt = OrderedDict((k[len("module."):], v) for k, v in ckpt['model'].items() if k.startswith("module."))
model.load_state_dict(renamed_ckpt, strict=True)
print(model)
print('----------------------------------------------------')
print('Trainable Parameters')
for name, param in model.named_parameters():
if param.requires_grad == True:
print(name)
print('----------------------------------------------------')
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[cuda_device_id], output_device=cuda_device_id,
)
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
lr_sched = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.num_steps)
loss_scaler = torch.cuda.amp.grad_scaler.GradScaler(enabled=args.fp16)
criterion = torch.nn.CrossEntropyLoss()
resume_step = checkpoint.resume_from_checkpoint(model, optimizer, lr_sched, loss_scaler, args)
val_loader = video_dataset.create_val_loader(args)
if args.eval_only:
print('Running in eval_only mode.')
model.eval()
evaluate(model, val_loader)
return
else:
assert args.train_list_path is not None, 'Train list path must be specified if not in eval_only mode.'
train_loader = video_dataset.create_train_loader(args, resume_step=resume_step)
assert len(train_loader) == args.num_steps - resume_step
batch_st, train_st = datetime.now(), datetime.now()
for i, (data, labels) in enumerate(train_loader, resume_step):
data, labels = data.cuda(), labels.cuda()
data_ed = datetime.now()
optimizer.zero_grad()
assert data.size(0) % args.batch_split == 0
split_size = data.size(0) // args.batch_split
hit1, hit5, loss_value = 0, 0, 0
for j in range(args.batch_split):
data_slice = data[split_size * j: split_size * (j + 1)]
labels_slice = labels[split_size * j: split_size * (j + 1)]
with torch.cuda.amp.autocast(args.fp16):
logits = model(data_slice)
loss = criterion(logits, labels_slice)
if labels.dtype == torch.long: # no mixup, can calculate accuracy
hit1 += (logits.topk(1, dim=1)[1] == labels_slice.view(-1, 1)).sum().item()
hit5 += (logits.topk(5, dim=1)[1] == labels_slice.view(-1, 1)).sum().item()
loss_value += loss.item() / args.batch_split
loss_scaler.scale(loss / args.batch_split).backward()
loss_scaler.step(optimizer)
loss_scaler.update()
lr_sched.step()
batch_ed = datetime.now()
if i % args.print_freq == 0:
sync_tensor = torch.Tensor([loss_value, hit1 / data.size(0), hit5 / data.size(0)]).cuda()
dist.all_reduce(sync_tensor)
sync_tensor = sync_tensor.cpu() / dist.get_world_size()
loss_value, acc1, acc5 = sync_tensor.tolist()
print(
f'batch_time: {(batch_ed - batch_st).total_seconds():.3f} '
f'data_time: {(data_ed - batch_st).total_seconds():.3f} '
f'ETA: {(batch_ed - train_st) / (i - resume_step + 1) * (args.num_steps - i - 1)} | '
f'lr: {optimizer.param_groups[0]["lr"]:.6f} '
f'loss: {loss_value:.6f}' + (
f' acc1: {acc1 * 100:.2f}% acc5: {acc5 * 100:.2f}%' if labels.dtype == torch.long else ''
)
)
if (i + 1) % args.eval_freq == 0:
print('Start model evaluation at step', i + 1)
model.eval()
evaluate(model, val_loader)
model.train()
if (i + 1) % args.save_freq == 0 and dist.get_rank() == 0:
checkpoint.save_checkpoint(model, optimizer, lr_sched, loss_scaler, i + 1, args)
batch_st = datetime.now()
def evaluate(model: torch.nn.Module, loader: torch.utils.data.DataLoader):
tot, hit1, hit5 = 0, 0, 0
eval_st = datetime.now()
for data, labels in loader:
data, labels = data.cuda(), labels.cuda()
assert data.size(0) == 1
if data.ndim == 6:
data = data[0] # now the first dimension is number of views
with torch.no_grad():
logits = model(data)
scores = logits.softmax(dim=-1).mean(dim=0)
tot += 1
hit1 += (scores.topk(1)[1] == labels).sum().item()
hit5 += (scores.topk(5)[1] == labels).sum().item()
if tot % 20 == 0:
print(f'[Evaluation] num_samples: {tot} '
f'ETA: {(datetime.now() - eval_st) / tot * (len(loader) - tot)} '
f'cumulative_acc1: {hit1 / tot * 100.:.2f}% '
f'cumulative_acc5: {hit5 / tot * 100.:.2f}%')
sync_tensor = torch.LongTensor([tot, hit1, hit5]).cuda()
dist.all_reduce(sync_tensor)
tot, hit1, hit5 = sync_tensor.cpu().tolist()
print(f'Accuracy on validation set: top1={hit1 / tot * 100:.2f}%, top5={hit5 / tot * 100:.2f}%')
if __name__ == '__main__': main()
| 13,336 | 42.161812 | 120 |
py
|
Vita-CLIP
|
Vita-CLIP-main/video_dataset/dataloader.py
|
#!/usr/bin/env python
import argparse
from typing import Dict
import torch
import torch.distributed as dist
from .dataset import VideoDataset, DummyDataset
def setup_arg_parser(parser: argparse.ArgumentParser):
parser.add_argument('--train_list_path', type=str,
help='path to training data list')
parser.add_argument('--val_list_path', type=str,
help='path to validation data list')
parser.add_argument('--train_data_root', type=str,
help='training samples root directory')
parser.add_argument('--val_data_root', type=str,
help='validation samples root directory')
parser.add_argument('--data_root', type=str, default='',
help='training and validation samples root directory, might be overrided by --train_data_root or --val_data_root')
parser.add_argument('--batch_size', type=int,
help='training batch size on a all GPUs')
parser.add_argument('--num_spatial_views', type=int, default=1,
help='number of spatial crops used for testing (total views = num_spatial_views * num_temporal_views)')
parser.add_argument('--num_temporal_views', type=int, default=3,
help='number of temporal crops used for testing (total views = num_spatial_views * num_temporal_views)')
parser.add_argument('--num_frames', type=int, default=8,
help='number of frames used for each view')
parser.add_argument('--sampling_rate', type=int, default=16,
help='temporal stride for frame sampling, only valid when tsn_sampling is not enabled')
parser.add_argument('--tsn_sampling', action='store_true',
help='enable TSN-style sampling (i.e. sample frames with dynamic stride to cover the whole video)')
parser.add_argument('--spatial_size', type=int, default=224,
help='frame height and width in pixels')
parser.add_argument('--mean', type=float, nargs='+',
help='pixel mean used to normalize the image.')
parser.add_argument('--std', type=float, nargs='+',
help='pixel std used to normalize the image')
parser.add_argument('--num_workers', type=int, default=10,
help='number of DataLoader worker threads')
parser.add_argument('--dummy_dataset', action='store_true',
help='use fake datasets that generate all 0 (use for speed test only)')
parser.add_argument('--auto_augment', type=str,
help='auto augment configuration')
parser.add_argument('--interpolation', type=str, default='bicubic',
help='interpolation mode')
parser.add_argument('--no_mirror', action='store_false', dest='mirror',
help='disable mirror for training (frequently used for the something-something dataset)')
parser.set_defaults(mirror=True)
def _parse_mean_and_std(args: argparse.Namespace) -> Dict[str, torch.Tensor]:
def parse_mean_or_std(arg, default_value):
if arg is None:
return torch.Tensor([default_value] * 3)
elif len(arg) == 1:
return torch.Tensor(arg * 3)
elif len(arg) == 3:
return torch.Tensor(arg)
else:
raise NotImplementedError()
return {
'mean': parse_mean_or_std(args.mean, 0.45),
'std': parse_mean_or_std(args.std, 0.225),
}
def create_train_dataset(args: argparse.Namespace) -> torch.utils.data.Dataset:
if args.dummy_dataset:
return DummyDataset(
list_path=args.train_list_path,
num_frames=args.num_frames,
num_views=1,
spatial_size=args.spatial_size,
)
return VideoDataset(
list_path=args.train_list_path,
data_root=args.train_data_root or args.data_root,
num_spatial_views=1, num_temporal_views=1, random_sample=True,
auto_augment=args.auto_augment,
interpolation=args.interpolation,
mirror=args.mirror,
num_frames=args.num_frames,
sampling_rate=-1 if args.tsn_sampling else args.sampling_rate,
spatial_size=args.spatial_size,
**_parse_mean_and_std(args),
)
def create_train_loader(args: argparse.Namespace, resume_step: int = 0) -> torch.utils.data.DataLoader:
dataset = create_train_dataset(args)
rank, world_size = (0, 1) if not dist.is_initialized() else (dist.get_rank(), dist.get_world_size())
assert args.batch_size % world_size == 0
batch_size_per_gpu = args.batch_size // world_size
# manually create a step-based sampler
sampler = []
while len(sampler) * len(dataset) < args.num_steps * args.batch_size:
g = torch.Generator()
g.manual_seed(len(sampler))
indices = torch.randperm(len(dataset), generator=g)
sampler.append(indices)
sampler = torch.cat(sampler, dim=0)[:args.num_steps * args.batch_size].view(args.num_steps, args.batch_size)
sampler = sampler[resume_step:, batch_size_per_gpu * rank: batch_size_per_gpu * (rank + 1)].flatten().tolist()
loader = torch.utils.data.DataLoader(
dataset, sampler=sampler, batch_size=batch_size_per_gpu,
num_workers=args.num_workers, pin_memory=False, drop_last=True,
)
return loader
def create_val_dataset(args: argparse.Namespace) -> torch.utils.data.Dataset:
if args.dummy_dataset:
return DummyDataset(
list_path=args.val_list_path,
num_frames=args.num_frames,
num_views=args.num_spatial_views * args.num_temporal_views,
spatial_size=args.spatial_size,
)
return VideoDataset(
list_path=args.val_list_path,
data_root=args.val_data_root or args.data_root,
num_spatial_views=args.num_spatial_views,
num_temporal_views=args.num_temporal_views,
random_sample=False,
num_frames=args.num_frames,
sampling_rate=-1 if args.tsn_sampling else args.sampling_rate,
spatial_size=args.spatial_size,
**_parse_mean_and_std(args),
)
def create_val_loader(args: argparse.Namespace) -> torch.utils.data.Dataset:
dataset = create_val_dataset(args)
rank, world_size = (0, 1) if not dist.is_initialized() else (dist.get_rank(), dist.get_world_size())
# sampler for distribued eval
sampler = list(range(rank, len(dataset), world_size))
loader = torch.utils.data.DataLoader(
dataset, sampler=sampler, batch_size=1,
num_workers=args.num_workers, pin_memory=False,
)
return loader
| 6,717 | 41.518987 | 138 |
py
|
Vita-CLIP
|
Vita-CLIP-main/video_dataset/transform.py
|
#!/usr/bin/env python3
# Originate from: https://github.com/facebookresearch/SlowFast/blob/fee19d699c49a81f33b890c5ff592bbb11aa5c54/slowfast/datasets/transform.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import math
import numpy as np
# import cv2
import random
import torch
import torchvision as tv
import torchvision.transforms.functional as F
from PIL import Image, ImageFilter
from torchvision import transforms
from .rand_augment import rand_augment_transform
from .random_erasing import RandomErasing
_pil_interpolation_to_str = {
Image.NEAREST: "PIL.Image.NEAREST",
Image.BILINEAR: "PIL.Image.BILINEAR",
Image.BICUBIC: "PIL.Image.BICUBIC",
Image.LANCZOS: "PIL.Image.LANCZOS",
Image.HAMMING: "PIL.Image.HAMMING",
Image.BOX: "PIL.Image.BOX",
}
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
def _pil_interp(method):
if method == "bicubic":
return Image.BICUBIC
elif method == "lanczos":
return Image.LANCZOS
elif method == "hamming":
return Image.HAMMING
else:
return Image.BILINEAR
logger = logging.getLogger(__name__)
def random_short_side_scale_jitter(
images, min_size, max_size, boxes=None, inverse_uniform_sampling=False
):
"""
Perform a spatial short scale jittering on the given images and
corresponding boxes.
Args:
images (tensor): images to perform scale jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
min_size (int): the minimal size to scale the frames.
max_size (int): the maximal size to scale the frames.
boxes (ndarray): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale, max_scale].
Returns:
(tensor): the scaled images with dimension of
`num frames` x `channel` x `new height` x `new width`.
(ndarray or None): the scaled boxes with dimension of
`num boxes` x 4.
"""
if inverse_uniform_sampling:
size = int(
round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size))
)
else:
size = int(round(np.random.uniform(min_size, max_size)))
height = images.shape[2]
width = images.shape[3]
if (width <= height and width == size) or (
height <= width and height == size
):
return images, boxes
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
if boxes is not None:
boxes = boxes * float(new_height) / height
else:
new_width = int(math.floor((float(width) / height) * size))
if boxes is not None:
boxes = boxes * float(new_width) / width
return (
torch.nn.functional.interpolate(
images,
size=(new_height, new_width),
mode="bilinear",
align_corners=False,
),
boxes,
)
def crop_boxes(boxes, x_offset, y_offset):
"""
Peform crop on the bounding boxes given the offsets.
Args:
boxes (ndarray or None): bounding boxes to peform crop. The dimension
is `num boxes` x 4.
x_offset (int): cropping offset in the x axis.
y_offset (int): cropping offset in the y axis.
Returns:
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped_boxes = boxes.copy()
cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
return cropped_boxes
def random_crop(images, size, boxes=None):
"""
Perform random spatial crop on the given images and corresponding boxes.
Args:
images (tensor): images to perform random crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): the size of height and width to crop on the image.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (tensor): cropped images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
if images.shape[2] == size and images.shape[3] == size:
return images, boxes
height = images.shape[2]
width = images.shape[3]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
return cropped, cropped_boxes
def horizontal_flip(prob, images, boxes=None):
"""
Perform horizontal flip on the given images and corresponding boxes.
Args:
prob (float): probility to flip the images.
images (tensor): images to perform horizontal flip, the dimension is
`num frames` x `channel` x `height` x `width`.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
images (tensor): images with dimension of
`num frames` x `channel` x `height` x `width`.
flipped_boxes (ndarray or None): the flipped boxes with dimension of
`num boxes` x 4.
"""
if boxes is None:
flipped_boxes = None
else:
flipped_boxes = boxes.copy()
if np.random.uniform() < prob:
images = images.flip((-1))
if len(images.shape) == 3:
width = images.shape[2]
elif len(images.shape) == 4:
width = images.shape[3]
else:
raise NotImplementedError("Dimension does not supported")
if boxes is not None:
flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1
return images, flipped_boxes
def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
scale_size (int): optinal. If not None, resize the images to scale_size before
performing any crop.
Returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
assert spatial_idx in [0, 1, 2]
ndim = len(images.shape)
if ndim == 3:
images = images.unsqueeze(0)
height = images.shape[2]
width = images.shape[3]
if scale_size is not None:
if width <= height:
width, height = scale_size, int(height / width * scale_size)
else:
width, height = int(width / height * scale_size), scale_size
images = torch.nn.functional.interpolate(
images,
size=(height, width),
mode="bilinear",
align_corners=False,
)
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
if ndim == 3:
cropped = cropped.squeeze(0)
return cropped, cropped_boxes
def clip_boxes_to_image(boxes, height, width):
"""
Clip an array of boxes to an image with the given height and width.
Args:
boxes (ndarray): bounding boxes to perform clipping.
Dimension is `num boxes` x 4.
height (int): given image height.
width (int): given image width.
Returns:
clipped_boxes (ndarray): the clipped boxes with dimension of
`num boxes` x 4.
"""
clipped_boxes = boxes.copy()
clipped_boxes[:, [0, 2]] = np.minimum(
width - 1.0, np.maximum(0.0, boxes[:, [0, 2]])
)
clipped_boxes[:, [1, 3]] = np.minimum(
height - 1.0, np.maximum(0.0, boxes[:, [1, 3]])
)
return clipped_boxes
def blend(images1, images2, alpha):
"""
Blend two images with a given weight alpha.
Args:
images1 (tensor): the first images to be blended, the dimension is
`num frames` x `channel` x `height` x `width`.
images2 (tensor): the second images to be blended, the dimension is
`num frames` x `channel` x `height` x `width`.
alpha (float): the blending weight.
Returns:
(tensor): blended images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
return images1 * alpha + images2 * (1 - alpha)
def grayscale(images):
"""
Get the grayscale for the input images. The channels of images should be
in order BGR.
Args:
images (tensor): the input images for getting grayscale. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
img_gray (tensor): blended images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
# R -> 0.299, G -> 0.587, B -> 0.114.
img_gray = torch.tensor(images)
gray_channel = (
0.299 * images[:, 2] + 0.587 * images[:, 1] + 0.114 * images[:, 0]
)
img_gray[:, 0] = gray_channel
img_gray[:, 1] = gray_channel
img_gray[:, 2] = gray_channel
return img_gray
def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0):
"""
Perfrom a color jittering on the input images. The channels of images
should be in order BGR.
Args:
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
img_brightness (float): jitter ratio for brightness.
img_contrast (float): jitter ratio for contrast.
img_saturation (float): jitter ratio for saturation.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
jitter = []
if img_brightness != 0:
jitter.append("brightness")
if img_contrast != 0:
jitter.append("contrast")
if img_saturation != 0:
jitter.append("saturation")
if len(jitter) > 0:
order = np.random.permutation(np.arange(len(jitter)))
for idx in range(0, len(jitter)):
if jitter[order[idx]] == "brightness":
images = brightness_jitter(img_brightness, images)
elif jitter[order[idx]] == "contrast":
images = contrast_jitter(img_contrast, images)
elif jitter[order[idx]] == "saturation":
images = saturation_jitter(img_saturation, images)
return images
def brightness_jitter(var, images):
"""
Perfrom brightness jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for brightness.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_bright = torch.zeros(images.shape)
images = blend(images, img_bright, alpha)
return images
def contrast_jitter(var, images):
"""
Perfrom contrast jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for contrast.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_gray = grayscale(images)
img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True)
images = blend(images, img_gray, alpha)
return images
def saturation_jitter(var, images):
"""
Perfrom saturation jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for saturation.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_gray = grayscale(images)
images = blend(images, img_gray, alpha)
return images
def lighting_jitter(images, alphastd, eigval, eigvec):
"""
Perform AlexNet-style PCA jitter on the given images.
Args:
images (tensor): images to perform lighting jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
alphastd (float): jitter ratio for PCA jitter.
eigval (list): eigenvalues for PCA jitter.
eigvec (list[list]): eigenvectors for PCA jitter.
Returns:
out_images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if alphastd == 0:
return images
# generate alpha1, alpha2, alpha3.
alpha = np.random.normal(0, alphastd, size=(1, 3))
eig_vec = np.array(eigvec)
eig_val = np.reshape(eigval, (1, 3))
rgb = np.sum(
eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0),
axis=1,
)
out_images = torch.zeros_like(images)
if len(images.shape) == 3:
# C H W
channel_dim = 0
elif len(images.shape) == 4:
# T C H W
channel_dim = 1
else:
raise NotImplementedError(f"Unsupported dimension {len(images.shape)}")
for idx in range(images.shape[channel_dim]):
# C H W
if len(images.shape) == 3:
out_images[idx] = images[idx] + rgb[2 - idx]
# T C H W
elif len(images.shape) == 4:
out_images[:, idx] = images[:, idx] + rgb[2 - idx]
else:
raise NotImplementedError(
f"Unsupported dimension {len(images.shape)}"
)
return out_images
def color_normalization(images, mean, stddev):
"""
Perform color nomration on the given images.
Args:
images (tensor): images to perform color normalization. Dimension is
`num frames` x `channel` x `height` x `width`.
mean (list): mean values for normalization.
stddev (list): standard deviations for normalization.
Returns:
out_images (tensor): the noramlized images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if len(images.shape) == 3:
assert (
len(mean) == images.shape[0]
), "channel mean not computed properly"
assert (
len(stddev) == images.shape[0]
), "channel stddev not computed properly"
elif len(images.shape) == 4:
assert (
len(mean) == images.shape[1]
), "channel mean not computed properly"
assert (
len(stddev) == images.shape[1]
), "channel stddev not computed properly"
else:
raise NotImplementedError(f"Unsupported dimension {len(images.shape)}")
out_images = torch.zeros_like(images)
for idx in range(len(mean)):
# C H W
if len(images.shape) == 3:
out_images[idx] = (images[idx] - mean[idx]) / stddev[idx]
elif len(images.shape) == 4:
out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx]
else:
raise NotImplementedError(
f"Unsupported dimension {len(images.shape)}"
)
return out_images
def _get_param_spatial_crop(
scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False
):
"""
Given scale, ratio, height and width, return sampled coordinates of the videos.
"""
for _ in range(num_repeat):
area = height * width
target_area = random.uniform(*scale) * area
if log_scale:
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
else:
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if np.random.uniform() < 0.5 and switch_hw:
w, h = h, w
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def random_resized_crop(
images,
target_height,
target_width,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
):
"""
Crop the given images to random size and aspect ratio. A crop of random
size (default: of 0.08 to 1.0) of the original size and a random aspect
ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This
crop is finally resized to given size. This is popularly used to train the
Inception networks.
Args:
images: Images to perform resizing and cropping.
target_height: Desired height after cropping.
target_width: Desired width after cropping.
scale: Scale range of Inception-style area based random resizing.
ratio: Aspect ratio range of Inception-style area based random resizing.
"""
height = images.shape[2]
width = images.shape[3]
i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width)
cropped = images[:, :, i : i + h, j : j + w]
return torch.nn.functional.interpolate(
cropped,
size=(target_height, target_width),
mode="bilinear",
align_corners=False,
)
def random_resized_crop_with_shift(
images,
target_height,
target_width,
scale=(0.8, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
):
"""
This is similar to random_resized_crop. However, it samples two different
boxes (for cropping) for the first and last frame. It then linearly
interpolates the two boxes for other frames.
Args:
images: Images to perform resizing and cropping.
target_height: Desired height after cropping.
target_width: Desired width after cropping.
scale: Scale range of Inception-style area based random resizing.
ratio: Aspect ratio range of Inception-style area based random resizing.
"""
t = images.shape[1]
height = images.shape[2]
width = images.shape[3]
i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width)
i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width)
i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()]
j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()]
h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()]
w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()]
out = torch.zeros((3, t, target_height, target_width))
for ind in range(t):
out[:, ind : ind + 1, :, :] = torch.nn.functional.interpolate(
images[
:,
ind : ind + 1,
i_s[ind] : i_s[ind] + h_s[ind],
j_s[ind] : j_s[ind] + w_s[ind],
],
size=(target_height, target_width),
mode="bilinear",
align_corners=False,
)
return out
def create_random_augment(
input_size,
auto_augment=None,
interpolation="bilinear",
):
"""
Get video randaug transform.
Args:
input_size: The size of the input video in tuple.
auto_augment: Parameters for randaug. An example:
"rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number
of operations to apply).
interpolation: Interpolation method.
"""
if isinstance(input_size, tuple):
img_size = input_size[-2:]
else:
img_size = input_size
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, tuple):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = {"translate_const": int(img_size_min * 0.45)}
if interpolation and interpolation != "random":
aa_params["interpolation"] = _pil_interp(interpolation)
if auto_augment.startswith("rand"):
return transforms.Compose(
[rand_augment_transform(auto_augment, aa_params)]
)
raise NotImplementedError
def random_sized_crop_img(
im,
size,
jitter_scale=(0.08, 1.0),
jitter_aspect=(3.0 / 4.0, 4.0 / 3.0),
max_iter=10,
):
"""
Performs Inception-style cropping (used for training).
"""
assert (
len(im.shape) == 3
), "Currently only support image for random_sized_crop"
h, w = im.shape[1:3]
i, j, h, w = _get_param_spatial_crop(
scale=jitter_scale,
ratio=jitter_aspect,
height=h,
width=w,
num_repeat=max_iter,
log_scale=False,
switch_hw=True,
)
cropped = im[:, i : i + h, j : j + w]
return torch.nn.functional.interpolate(
cropped.unsqueeze(0),
size=(size, size),
mode="bilinear",
align_corners=False,
).squeeze(0)
# The following code are modified based on timm lib, we will replace the following
# contents with dependency from PyTorchVideo.
# https://github.com/facebookresearch/pytorchvideo
class RandomResizedCropAndInterpolation:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation="bilinear",
):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
print("range should be of kind (min, max)")
if interpolation == "random":
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for _ in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = " ".join(
[_pil_interpolation_to_str[x] for x in self.interpolation]
)
else:
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + "(size={0}".format(self.size)
format_string += ", scale={0}".format(
tuple(round(s, 4) for s in self.scale)
)
format_string += ", ratio={0}".format(
tuple(round(r, 4) for r in self.ratio)
)
format_string += ", interpolation={0})".format(interpolate_str)
return format_string
| 27,344 | 33.18125 | 139 |
py
|
Vita-CLIP
|
Vita-CLIP-main/video_dataset/dataset.py
|
#!/usr/bin/env python
import os, sys
from typing import Optional
import av
import io
import numpy as np
import torch
from torchvision import transforms
from .transform import create_random_augment, random_resized_crop
class VideoDataset(torch.utils.data.Dataset):
def __init__(
self, list_path: str, data_root: str,
num_spatial_views: int, num_temporal_views: int, random_sample: bool,
num_frames: int, sampling_rate: int, spatial_size: int,
mean: torch.Tensor, std: torch.Tensor,
auto_augment: Optional[str] = None, interpolation: str = 'bicubic',
mirror: bool = False,
):
self.data_root = data_root
self.interpolation = interpolation
self.spatial_size = spatial_size
self.mean, self.std = mean, std
self.num_frames, self.sampling_rate = num_frames, sampling_rate
if random_sample:
assert num_spatial_views == 1 and num_temporal_views == 1
self.random_sample = True
self.mirror = mirror
self.auto_augment = auto_augment
else:
assert auto_augment is None and not mirror
self.random_sample = False
self.num_temporal_views = num_temporal_views
self.num_spatial_views = num_spatial_views
with open(list_path) as f:
self.data_list = f.read().splitlines()
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
line = self.data_list[idx]
path, label = line.split(',')
path = os.path.join(self.data_root, path)
label = int(label)
container = av.open(path)
frames = {}
for frame in container.decode(video=0):
frames[frame.pts] = frame
container.close()
frames = [frames[k] for k in sorted(frames.keys())]
if self.random_sample:
frame_idx = self._random_sample_frame_idx(len(frames))
frames = [frames[x].to_rgb().to_ndarray() for x in frame_idx]
frames = torch.as_tensor(np.stack(frames)).float() / 255.
if self.auto_augment is not None:
aug_transform = create_random_augment(
input_size=(frames.size(1), frames.size(2)),
auto_augment=self.auto_augment,
interpolation=self.interpolation,
)
frames = frames.permute(0, 3, 1, 2) # T, C, H, W
frames = [transforms.ToPILImage()(frames[i]) for i in range(frames.size(0))]
frames = aug_transform(frames)
frames = torch.stack([transforms.ToTensor()(img) for img in frames])
frames = frames.permute(0, 2, 3, 1)
frames = (frames - self.mean) / self.std
frames = frames.permute(3, 0, 1, 2) # C, T, H, W
frames = random_resized_crop(
frames, self.spatial_size, self.spatial_size,
)
else:
frames = [x.to_rgb().to_ndarray() for x in frames]
frames = torch.as_tensor(np.stack(frames))
frames = frames.float() / 255.
frames = (frames - self.mean) / self.std
frames = frames.permute(3, 0, 1, 2) # C, T, H, W
if frames.size(-2) < frames.size(-1):
new_width = frames.size(-1) * self.spatial_size // frames.size(-2)
new_height = self.spatial_size
else:
new_height = frames.size(-2) * self.spatial_size // frames.size(-1)
new_width = self.spatial_size
frames = torch.nn.functional.interpolate(
frames, size=(new_height, new_width),
mode='bilinear', align_corners=False,
)
frames = self._generate_spatial_crops(frames)
frames = sum([self._generate_temporal_crops(x) for x in frames], [])
if len(frames) > 1:
frames = torch.stack(frames)
return frames, label
def _generate_temporal_crops(self, frames):
seg_len = (self.num_frames - 1) * self.sampling_rate + 1
if frames.size(1) < seg_len:
frames = torch.cat([frames, frames[:, -1:].repeat(1, seg_len - frames.size(1), 1, 1)], dim=1)
slide_len = frames.size(1) - seg_len
crops = []
for i in range(self.num_temporal_views):
if self.num_temporal_views == 1:
st = slide_len // 2
else:
st = round(slide_len / (self.num_temporal_views - 1) * i)
crops.append(frames[:, st: st + self.num_frames * self.sampling_rate: self.sampling_rate])
return crops
def _generate_spatial_crops(self, frames):
if self.num_spatial_views == 1:
assert min(frames.size(-2), frames.size(-1)) >= self.spatial_size
h_st = (frames.size(-2) - self.spatial_size) // 2
w_st = (frames.size(-1) - self.spatial_size) // 2
h_ed, w_ed = h_st + self.spatial_size, w_st + self.spatial_size
return [frames[:, :, h_st: h_ed, w_st: w_ed]]
elif self.num_spatial_views == 3:
assert min(frames.size(-2), frames.size(-1)) == self.spatial_size
crops = []
margin = max(frames.size(-2), frames.size(-1)) - self.spatial_size
for st in (0, margin // 2, margin):
ed = st + self.spatial_size
if frames.size(-2) > frames.size(-1):
crops.append(frames[:, :, st: ed, :])
else:
crops.append(frames[:, :, :, st: ed])
return crops
else:
raise NotImplementedError()
def _random_sample_frame_idx(self, len):
frame_indices = []
if self.sampling_rate < 0: # tsn sample
seg_size = (len - 1) / self.num_frames
for i in range(self.num_frames):
start, end = round(seg_size * i), round(seg_size * (i + 1))
frame_indices.append(np.random.randint(start, end + 1))
elif self.sampling_rate * (self.num_frames - 1) + 1 >= len:
for i in range(self.num_frames):
frame_indices.append(i * self.sampling_rate if i * self.sampling_rate < len else frame_indices[-1])
else:
start = np.random.randint(len - self.sampling_rate * (self.num_frames - 1))
frame_indices = list(range(start, start + self.sampling_rate * self.num_frames, self.sampling_rate))
return frame_indices
class DummyDataset(torch.utils.data.Dataset):
def __init__(self, list_path: str, num_frames: int, num_views: int, spatial_size: int):
with open(list_path) as f:
self.len = len(f.read().splitlines())
self.num_frames = num_frames
self.num_views = num_views
self.spatial_size = spatial_size
def __len__(self):
return self.len
def __getitem__(self, _):
shape = [3, self.num_frames, self.spatial_size, self.spatial_size]
if self.num_views != 1:
shape = [self.num_views] + shape
return torch.zeros(shape), 0
| 7,185 | 36.821053 | 115 |
py
|
Vita-CLIP
|
Vita-CLIP-main/video_dataset/__init__.py
|
#!/usr/bin/env python
from .dataloader import setup_arg_parser, create_train_loader, create_val_loader
| 103 | 33.666667 | 80 |
py
|
Vita-CLIP
|
Vita-CLIP-main/video_dataset/random_erasing.py
|
#!/usr/bin/env python
# Originates from: https://github.com/facebookresearch/SlowFast/blob/fee19d699c49a81f33b890c5ff592bbb11aa5c54/slowfast/datasets/random_erasing.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This implementation is based on
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/random_erasing.py
pulished under an Apache License 2.0.
COMMENT FROM ORIGINAL:
Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0
Copyright Zhun Zhong & Liang Zheng
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import random
import torch
def _get_pixels(
per_pixel, rand_color, patch_size, dtype=torch.float32, device="cuda"
):
# NOTE I've seen CUDA illegal memory access errors being caused by the normal_()
# paths, flip the order so normal is run on CPU if this becomes a problem
# Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508
if per_pixel:
return torch.empty(patch_size, dtype=dtype, device=device).normal_()
elif rand_color:
return torch.empty(
(patch_size[0], 1, 1), dtype=dtype, device=device
).normal_()
else:
return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device)
class RandomErasing:
"""Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
This variant of RandomErasing is intended to be applied to either a batch
or single image tensor after it has been normalized by dataset mean and std.
Args:
probability: Probability that the Random Erasing operation will be performed.
min_area: Minimum percentage of erased area wrt input image area.
max_area: Maximum percentage of erased area wrt input image area.
min_aspect: Minimum aspect ratio of erased area.
mode: pixel color mode, one of 'const', 'rand', or 'pixel'
'const' - erase block is constant color of 0 for all channels
'rand' - erase block is same per-channel random (normal) color
'pixel' - erase block is per-pixel random (normal) color
max_count: maximum number of erasing blocks per image, area per box is scaled by count.
per-image count is randomly chosen between 1 and this value.
"""
def __init__(
self,
probability=0.5,
min_area=0.02,
max_area=1 / 3,
min_aspect=0.3,
max_aspect=None,
mode="const",
min_count=1,
max_count=None,
num_splits=0,
device="cuda",
cube=True,
):
self.probability = probability
self.min_area = min_area
self.max_area = max_area
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
self.min_count = min_count
self.max_count = max_count or min_count
self.num_splits = num_splits
mode = mode.lower()
self.rand_color = False
self.per_pixel = False
self.cube = cube
if mode == "rand":
self.rand_color = True # per block random normal
elif mode == "pixel":
self.per_pixel = True # per pixel random normal
else:
assert not mode or mode == "const"
self.device = device
def _erase(self, img, chan, img_h, img_w, dtype):
if random.random() > self.probability:
return
area = img_h * img_w
count = (
self.min_count
if self.min_count == self.max_count
else random.randint(self.min_count, self.max_count)
)
for _ in range(count):
for _ in range(10):
target_area = (
random.uniform(self.min_area, self.max_area) * area / count
)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
img[:, top : top + h, left : left + w] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def _erase_cube(
self,
img,
batch_start,
batch_size,
chan,
img_h,
img_w,
dtype,
):
if random.random() > self.probability:
return
area = img_h * img_w
count = (
self.min_count
if self.min_count == self.max_count
else random.randint(self.min_count, self.max_count)
)
for _ in range(count):
for _ in range(100):
target_area = (
random.uniform(self.min_area, self.max_area) * area / count
)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
for i in range(batch_start, batch_size):
img_instance = img[i]
img_instance[
:, top : top + h, left : left + w
] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def __call__(self, input):
if len(input.size()) == 3:
self._erase(input, *input.size(), input.dtype)
else:
batch_size, chan, img_h, img_w = input.size()
# skip first slice of batch if num_splits is set (for clean portion of samples)
batch_start = (
batch_size // self.num_splits if self.num_splits > 1 else 0
)
if self.cube:
self._erase_cube(
input,
batch_start,
batch_size,
chan,
img_h,
img_w,
input.dtype,
)
else:
for i in range(batch_start, batch_size):
self._erase(input[i], chan, img_h, img_w, input.dtype)
return input
| 7,056 | 37.353261 | 145 |
py
|
Vita-CLIP
|
Vita-CLIP-main/video_dataset/rand_augment.py
|
#!/usr/bin/env python
# Originates from: https://github.com/facebookresearch/SlowFast/blob/fee19d699c49a81f33b890c5ff592bbb11aa5c54/slowfast/datasets/rand_augment.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This implementation is based on
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py
pulished under an Apache License 2.0.
COMMENT FROM ORIGINAL:
AutoAugment, RandAugment, and AugMix for PyTorch
This code implements the searched ImageNet policies with various tweaks and
improvements and does not include any of the search code. AA and RA
Implementation adapted from:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py
AugMix adapted from:
https://github.com/google-research/augmix
Papers:
AutoAugment: Learning Augmentation Policies from Data
https://arxiv.org/abs/1805.09501
Learning Data Augmentation Strategies for Object Detection
https://arxiv.org/abs/1906.11172
RandAugment: Practical automated data augmentation...
https://arxiv.org/abs/1909.13719
AugMix: A Simple Data Processing Method to Improve Robustness and
Uncertainty https://arxiv.org/abs/1912.02781
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_PIL_VER = tuple([int(x) for x in PIL.__version__.split(".")[:2]])
_FILL = (128, 128, 128)
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.0
_HPARAMS_DEFAULT = {
"translate_const": 250,
"img_mean": _FILL,
}
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
def _interpolation(kwargs):
interpolation = kwargs.pop("resample", Image.BILINEAR)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
def _check_args_tf(kwargs):
if "fillcolor" in kwargs and _PIL_VER < (5, 0):
kwargs.pop("fillcolor")
kwargs["resample"] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs
)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs
)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs
)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs
)
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs
)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs
)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
elif _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0],
-rotn_center[1] - post_trans[1],
matrix,
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs["resample"])
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
def invert(img, **__):
return ImageOps.invert(img)
def equalize(img, **__):
return ImageOps.equalize(img)
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _rotate_level_to_arg(level, _hparams):
# range [-30, 30]
level = (level / _MAX_LEVEL) * 30.0
level = _randomly_negate(level)
return (level,)
def _enhance_level_to_arg(level, _hparams):
# range [0.1, 1.9]
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _enhance_increasing_level_to_arg(level, _hparams):
# the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend
# range [0.1, 1.9]
level = (level / _MAX_LEVEL) * 0.9
level = 1.0 + _randomly_negate(level)
return (level,)
def _shear_level_to_arg(level, _hparams):
# range [-0.3, 0.3]
level = (level / _MAX_LEVEL) * 0.3
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams["translate_const"]
level = (level / _MAX_LEVEL) * float(translate_const)
level = _randomly_negate(level)
return (level,)
def _translate_rel_level_to_arg(level, hparams):
# default range [-0.45, 0.45]
translate_pct = hparams.get("translate_pct", 0.45)
level = (level / _MAX_LEVEL) * translate_pct
level = _randomly_negate(level)
return (level,)
def _posterize_level_to_arg(level, _hparams):
# As per Tensorflow TPU EfficientNet impl
# range [0, 4], 'keep 0 up to 4 MSB of original image'
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 4),)
def _posterize_increasing_level_to_arg(level, hparams):
# As per Tensorflow models research and UDA impl
# range [4, 0], 'keep 4 down to 0 MSB of original image',
# intensity/severity of augmentation increases with level
return (4 - _posterize_level_to_arg(level, hparams)[0],)
def _posterize_original_level_to_arg(level, _hparams):
# As per original AutoAugment paper description
# range [4, 8], 'keep 4 up to 8 MSB of image'
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 4) + 4,)
def _solarize_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 256),)
def _solarize_increasing_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation increases with level
return (256 - _solarize_level_to_arg(level, _hparams)[0],)
def _solarize_add_level_to_arg(level, _hparams):
# range [0, 110]
return (int((level / _MAX_LEVEL) * 110),)
LEVEL_TO_ARG = {
"AutoContrast": None,
"Equalize": None,
"Invert": None,
"Rotate": _rotate_level_to_arg,
# There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers
"Posterize": _posterize_level_to_arg,
"PosterizeIncreasing": _posterize_increasing_level_to_arg,
"PosterizeOriginal": _posterize_original_level_to_arg,
"Solarize": _solarize_level_to_arg,
"SolarizeIncreasing": _solarize_increasing_level_to_arg,
"SolarizeAdd": _solarize_add_level_to_arg,
"Color": _enhance_level_to_arg,
"ColorIncreasing": _enhance_increasing_level_to_arg,
"Contrast": _enhance_level_to_arg,
"ContrastIncreasing": _enhance_increasing_level_to_arg,
"Brightness": _enhance_level_to_arg,
"BrightnessIncreasing": _enhance_increasing_level_to_arg,
"Sharpness": _enhance_level_to_arg,
"SharpnessIncreasing": _enhance_increasing_level_to_arg,
"ShearX": _shear_level_to_arg,
"ShearY": _shear_level_to_arg,
"TranslateX": _translate_abs_level_to_arg,
"TranslateY": _translate_abs_level_to_arg,
"TranslateXRel": _translate_rel_level_to_arg,
"TranslateYRel": _translate_rel_level_to_arg,
}
NAME_TO_OP = {
"AutoContrast": auto_contrast,
"Equalize": equalize,
"Invert": invert,
"Rotate": rotate,
"Posterize": posterize,
"PosterizeIncreasing": posterize,
"PosterizeOriginal": posterize,
"Solarize": solarize,
"SolarizeIncreasing": solarize,
"SolarizeAdd": solarize_add,
"Color": color,
"ColorIncreasing": color,
"Contrast": contrast,
"ContrastIncreasing": contrast,
"Brightness": brightness,
"BrightnessIncreasing": brightness,
"Sharpness": sharpness,
"SharpnessIncreasing": sharpness,
"ShearX": shear_x,
"ShearY": shear_y,
"TranslateX": translate_x_abs,
"TranslateY": translate_y_abs,
"TranslateXRel": translate_x_rel,
"TranslateYRel": translate_y_rel,
}
class AugmentOp:
"""
Apply for video.
"""
def __init__(self, name, prob=0.5, magnitude=10, hparams=None):
hparams = hparams or _HPARAMS_DEFAULT
self.aug_fn = NAME_TO_OP[name]
self.level_fn = LEVEL_TO_ARG[name]
self.prob = prob
self.magnitude = magnitude
self.hparams = hparams.copy()
self.kwargs = {
"fillcolor": hparams["img_mean"]
if "img_mean" in hparams
else _FILL,
"resample": hparams["interpolation"]
if "interpolation" in hparams
else _RANDOM_INTERPOLATION,
}
# If magnitude_std is > 0, we introduce some randomness
# in the usually fixed policy and sample magnitude from a normal distribution
# with mean `magnitude` and std-dev of `magnitude_std`.
# NOTE This is my own hack, being tested, not in papers or reference impls.
self.magnitude_std = self.hparams.get("magnitude_std", 0)
def __call__(self, img_list):
if self.prob < 1.0 and random.random() > self.prob:
return img_list
magnitude = self.magnitude
if self.magnitude_std and self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range
level_args = (
self.level_fn(magnitude, self.hparams)
if self.level_fn is not None
else ()
)
if isinstance(img_list, list):
return [
self.aug_fn(img, *level_args, **self.kwargs) for img in img_list
]
else:
return self.aug_fn(img_list, *level_args, **self.kwargs)
_RAND_TRANSFORMS = [
"AutoContrast",
"Equalize",
"Invert",
"Rotate",
"Posterize",
"Solarize",
"SolarizeAdd",
"Color",
"Contrast",
"Brightness",
"Sharpness",
"ShearX",
"ShearY",
"TranslateXRel",
"TranslateYRel",
]
_RAND_INCREASING_TRANSFORMS = [
"AutoContrast",
"Equalize",
"Invert",
"Rotate",
"PosterizeIncreasing",
"SolarizeIncreasing",
"SolarizeAdd",
"ColorIncreasing",
"ContrastIncreasing",
"BrightnessIncreasing",
"SharpnessIncreasing",
"ShearX",
"ShearY",
"TranslateXRel",
"TranslateYRel",
]
# These experimental weights are based loosely on the relative improvements mentioned in paper.
# They may not result in increased performance, but could likely be tuned to so.
_RAND_CHOICE_WEIGHTS_0 = {
"Rotate": 0.3,
"ShearX": 0.2,
"ShearY": 0.2,
"TranslateXRel": 0.1,
"TranslateYRel": 0.1,
"Color": 0.025,
"Sharpness": 0.025,
"AutoContrast": 0.025,
"Solarize": 0.005,
"SolarizeAdd": 0.005,
"Contrast": 0.005,
"Brightness": 0.005,
"Equalize": 0.005,
"Posterize": 0,
"Invert": 0,
}
def _select_rand_weights(weight_idx=0, transforms=None):
transforms = transforms or _RAND_TRANSFORMS
assert weight_idx == 0 # only one set of weights currently
rand_weights = _RAND_CHOICE_WEIGHTS_0
probs = [rand_weights[k] for k in transforms]
probs /= np.sum(probs)
return probs
def rand_augment_ops(magnitude=10, hparams=None, transforms=None):
hparams = hparams or _HPARAMS_DEFAULT
transforms = transforms or _RAND_TRANSFORMS
return [
AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams)
for name in transforms
]
class RandAugment:
def __init__(self, ops, num_layers=2, choice_weights=None):
self.ops = ops
self.num_layers = num_layers
self.choice_weights = choice_weights
def __call__(self, img):
# no replacement when using weighted choice
ops = np.random.choice(
self.ops,
self.num_layers,
replace=self.choice_weights is None,
p=self.choice_weights,
)
for op in ops:
img = op(img)
return img
def rand_augment_transform(config_str, hparams):
"""
RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719
Create a RandAugment transform
:param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by
dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining
sections, not order sepecific determine
'm' - integer magnitude of rand augment
'n' - integer num layers (number of transform ops selected per image)
'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)
'mstd' - float std deviation of magnitude noise applied
'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)
Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5
'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2
:param hparams: Other hparams (kwargs) for the RandAugmentation scheme
:return: A PyTorch compatible Transform
"""
magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10)
num_layers = 2 # default to 2 ops per image
weight_idx = None # default to no probability weights for op choice
transforms = _RAND_TRANSFORMS
config = config_str.split("-")
assert config[0] == "rand"
config = config[1:]
for c in config:
cs = re.split(r"(\d.*)", c)
if len(cs) < 2:
continue
key, val = cs[:2]
if key == "mstd":
# noise param injected via hparams for now
hparams.setdefault("magnitude_std", float(val))
elif key == "inc":
if bool(val):
transforms = _RAND_INCREASING_TRANSFORMS
elif key == "m":
magnitude = int(val)
elif key == "n":
num_layers = int(val)
elif key == "w":
weight_idx = int(val)
else:
assert NotImplementedError
ra_ops = rand_augment_ops(
magnitude=magnitude, hparams=hparams, transforms=transforms
)
choice_weights = (
None if weight_idx is None else _select_rand_weights(weight_idx)
)
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
| 16,366 | 29.478585 | 143 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/tools/simmc_dataset.py
|
import json
import pdb
import re
import string
import torch
from nltk.tokenize import WordPunctTokenizer
from torch.utils.data import Dataset
"""
The dialog intents have the shapes:
DA:<DIALOG_ACT>:<ACTIVITY>:<OBJECT> or DA:<DIALOG_ACT>:<ACTIVITY>:<OBJECT>.<attribute>
Examples:
DA:INFORM:GET:CLOTHING.embellishment
The <DIALOG_ACT> values are shared between fashion and furniture dataset. <ACTIVITY> values are dataset specific (see paper fig.3).
"""
DIALOG_ACT = {'ASK', 'CONFIRM', 'INFORM', 'PROMPT', 'REQUEST'}
ACTIVITY = {'ADD_TO_CART', 'CHECK', 'COMPARE', 'COUNT', 'DISPREFER', 'GET', 'PREFER', 'REFINE'}
class SIMMCDataset(Dataset):
"""Dataset wrapper for SIMMC Fashion
(list) self.ids[idx] = <dialogue_id>
(dict) self.id2dialog[<dialogue_id>].keys() = ['dialogue', 'dialogue_coref_map', 'dialogue_idx', 'domains', 'dialogue_task_id']
(dict) self.id2dialog[<dialogue_id>]['dialogue'][<dialogue_turn>].keys() = ['belief_state', 'domain', 'state_graph_0', 'state_graph_1', 'state_graph_2',
'system_transcript', 'system_transcript_annotated', 'system_turn_label',
'transcript', 'transcript_annotated', 'turn_idx', 'turn_label',
'visual_objects', 'raw_assistant_keystrokes']
(list) self.transcripts[idx] = 'dialogueid_turn' (e.g., '3094_3', '3094_0')
(dict) self.task_mapping[<task_id>].keys() = ['task_id', 'image_ids', 'focus_image', 'memory_images', 'database_images']
(dict) self.processed_turns[<dialogue_id>][turn] = {'transcript': <tokenized_transcript>, 'system_transcript': <tokenized_system_transcript>}
"""
def __init__(self, data_path, metadata_path, verbose=True):
"""Dataset constructor.
Args:
path (str): path to dataset json file
metadata_path (str): path to metadata json file
"""
data_fp = open(data_path)
raw_data = json.load(data_fp)
metadata_fp = open(metadata_path)
self.metadata = json.load(metadata_fp)
self.split = raw_data['split']
self.version = raw_data['version']
self.year = raw_data['year']
self.domain = raw_data['domain']
self.verbose = verbose
if self.verbose:
print('Creating dataset index ...')
self.create_index(raw_data)
if self.verbose:
print('Skipped dialogs: {}'.format(self.skipped_dialogs))
print(' ... index created')
def __len__(self):
return len(self.transcripts)
def __getitem__(self, index):
dial_id, turn = self.transcripts[index].split('_')
dial_id = int(dial_id)
turn = int(turn)
user_req = self.id2dialog[dial_id]['dialogue'][turn]['transcript']
wizard_resp = self.id2dialog[dial_id]['dialogue'][turn]['system_transcript']
# extract dialogue history
turn_str = '{} [SEP] {}'
history = [turn_str.format(self.id2dialog[dial_id]['dialogue'][t]['transcript'],
self.id2dialog[dial_id]['dialogue'][t]['transcript'])
for t in range(turn)]
# dispatch data across different dataset instantiation
if isinstance(self, SIMMCDatasetForActionPrediction,) or isinstance(self, SIMMCDatasetForResponseGeneration,):
focus_item = self.id2focus[dial_id][turn]
attributes = []
if self.id2act[dial_id][turn]['action_supervision'] is not None:
attributes = self.id2act[dial_id][turn]['action_supervision']['attributes']
return_tuple = (dial_id, turn, user_req, wizard_resp, history, focus_item, self.id2act[dial_id][turn]['action'], attributes)
if isinstance(self, SIMMCDatasetForResponseGeneration,):
return_tuple += (self.id2candidates[dial_id][turn]['retrieval_candidates'],)
return return_tuple
def extract_visual_context(self, dial_id):
task_id = self.id2dialog[dial_id]['dialogue_task_id']
init_focus = self.task_mapping[task_id]['focus_image']
focus_items = [init_focus]
for act_annotation in self.id2act[dial_id]:
#force object permanence
if act_annotation['action_supervision'] is None or 'focus' not in act_annotation['action_supervision']:
focus_items.append(focus_items[-1])
else:
focus_items.append(act_annotation['action_supervision']['focus'])
return focus_items
def create_index(self, raw_data):
self.ids = []
self.id2dialog = {}
self.transcripts = []
self.skipped_dialogs = set()
for dialog in raw_data['dialogue_data']:
if 'dialogue_task_id' in dialog:
self.ids.append(dialog['dialogue_idx'])
dialog_obj = {
'dialogue': dialog['dialogue'],
'dialogue_coref_map': dialog['dialogue_coref_map'],
'dialogue_idx': dialog['dialogue_idx'],
'domains': dialog['domains'],
'dialogue_task_id': dialog['dialogue_task_id']}
transcripts = ['{}_{}'.format(dialog['dialogue_idx'], turn) for turn, _ in enumerate(dialog['dialogue'])]
self.id2dialog[dialog['dialogue_idx']] = dialog_obj
self.transcripts.extend(transcripts)
else:
if self.verbose:
#print('id: {} ; is dialogue_task_id missing: {}'.format(dialog['dialogue_idx'], not 'dialogue_task_id' in dialog))
self.skipped_dialogs.add(dialog['dialogue_idx'])
self.task_mapping = {}
for task in raw_data['task_mapping']:
self.task_mapping[task['task_id']] = task
def getmetadata(self, obj_id):
"""Return metadata for the object with the specified id
Args:
obj_id (str): id of the object
Returns:
dict: returns a dict with the following shape
{'metadata':
{'availability': [],
'availableSizes': "['L', 'XXL']",
'brand': '212 Local',
'color': ['black'],
'customerRating': '2.06',
'embellishments': ['layered'],
'hemLength': ['knee_length'],
'pattern': [],
'price': '$269',
'size': [],
'skirtStyle': ['asymmetrical', 'fit_and_flare', 'loose'],
'type': 'skirt'
},
'url': 'GByeggJtfhLUq9UGAAAAAABqViN1btAUAAAB'
}
"""
return self.metadata[obj_id]
def __str__(self):
return '{}_{}_{}_v{}'.format(self.domain, self.split, self.year, self.version)
class SIMMCDatasetForResponseGeneration(SIMMCDataset):
# conversion from attribute and action annotations format to english string
_ATTRS = {'embellishment', 'skirtStyle', 'availableSizes', 'dressStyle', 'material', 'clothingStyle', 'jacketStyle',
'sleeveLength', 'soldBy', 'price', 'ageRange', 'hemLength', 'size', 'warmthRating', 'sweaterStyle',
'forGender', 'madeIn', 'info', 'customerRating', 'hemStyle', 'hasPart', 'pattern', 'clothingCategory',
'forOccasion', 'waistStyle', 'sleeveStyle', 'amountInStock', 'waterResistance', 'necklineStyle', 'skirtLength',
'color', 'brand', 'sequential'}
_ATTR2STR = {'skirtstyle': 'skirt style', 'availablesizes': 'available sizes', 'dressstyle': 'dress style', 'clothingstyle': 'clothing style',
'jacketstyle': 'jacket style', 'sleevelength': 'sleeve length', 'soldby': 'sold by', 'agerange': 'age range', 'hemlength': 'hem length',
'warmthrating': 'warmth rating', 'sweaterstyle': 'sweater style', 'forgender': 'for gender', 'madein': 'made in', 'customerrating': 'customer rating',
'hemstyle': 'hem style', 'haspart': 'has part', 'clothingcategory': 'clothing category', 'foroccasion': 'for occasion', 'waiststyle': 'waist style',
'sleevestyle': 'sleeve style', 'amountinstock': 'amount in stock', 'waterresistance': 'water resistance', 'necklinestyle': 'neckline style',
'skirtlength': 'skirt length'}
_ACT2STR = {'none': 'none', 'searchdatabase': 'search database', 'searchmemory': 'search memory', 'specifyinfo': 'specify info', 'addtocart': 'add to cart'}
#map attribute names to metadata fields
_ATTR2FIELD = {'embellishment': 'embellishments', 'skirtStyle': 'skirtStyle', 'availableSizes': 'availableSizes', 'dressStyle': 'dressStyle', 'jacketStyle': 'jacketStyle',
'sleeveLength': 'sleeveStyle', 'soldBy': 'brand', 'price': 'price', 'hemLength': 'hemLength', 'size': 'availableSizes', 'sweaterStyle': 'sweaterStyle',
'customerRating': 'customerRating', 'hemStyle': 'hemStyle', 'hasPart': 'embellishments', 'pattern': 'pattern', 'clothingCategory': 'type',
'waistStyle': 'waistStyle', 'sleeveStyle': 'sleeveStyle', 'necklineStyle': 'necklineStyle', 'skirtLength': 'skirtStyle', 'color': 'color', 'brand': 'brand'}
def __init__(self, data_path, metadata_path, actions_path, candidates_path, verbose=True):
super(SIMMCDatasetForResponseGeneration, self).__init__(data_path=data_path, metadata_path=metadata_path, verbose=verbose)
self.task = 'response_generation'
self.load_actions(actions_path)
self.load_candidates(candidates_path)
self.id2focus = {}
for id in self.ids:
#for response generation the context is shifted right (response based on the item chosen by the wizard)
self.id2focus[id] = self.extract_visual_context(id)[1:]
assert len(self.id2dialog[id]['dialogue']) == len(self.id2focus[id]), 'Focus items do not match dialogue {} length'.format(id)
self.processed_metadata = {}
self.process_metadata_items()
def process_metadata_items(self):
"""This method process the data inside metadata fields and make each field values a list
(avoiding mixing up single values and lists)
Args:
tokenizer ([type]): [description]
"""
for item_id, item in self.metadata.items():
assert item_id not in self.processed_metadata, 'Item {} presents twice'.format(item_id)
self.processed_metadata[item_id] = {}
for field, field_vals in item['metadata'].items():
curr_field = ''
# availability field is always empty
if field == 'availability' or field == 'url':
continue
values = field_vals
if field == 'availableSizes' and not isinstance(values, list,):
values = self.repair_size_list(values)
#field_tokens = tokenizer.tokenize(field)
field_tokens = re.split('_|\s', field)
for tok in field_tokens:
cleaned_tok = self._ATTR2STR[tok.lower()] if tok.lower() in self._ATTR2STR else tok.lower()
curr_field += cleaned_tok + ' '
curr_field = curr_field[:-1]
curr_val = ''
proc_values = []
if isinstance(values, list,):
for val in values:
curr_val = ''
#value_tokens = tokenizer.tokenize(val)
value_tokens = re.split('_|\s', val)
proc_values.append(' '.join(value_tokens))
else:
value_tokens = re.split('_|\s', values)
proc_values.append(' '.join(value_tokens))
#metadata JSON files contains different samples having hemLenght field twice.
# In this case just discard the one with no values.
if curr_field == 'hem length' and curr_field in self.processed_metadata[item_id]:
if not len(self.processed_metadata[item_id][curr_field]):
self.processed_metadata[item_id][curr_field] = proc_values
continue
assert curr_field not in self.processed_metadata[item_id], 'Field {} presents twice in item {}. Please remove one of them (preferably the empty one)'.format(curr_field, item_id)
self.processed_metadata[item_id][curr_field] = proc_values
def repair_size_list(self, str_val):
"""fixes availableSizes when it is a stringified list (e.g., "[' xl ', ' m ']"
Args:
str_val ([type]): [description]
"""
return [word for word in str_val[2:-2].split('\', \'')]
def __getitem__(self, index):
dial_id, turn, user_req, wizard_resp, history, focus, action, attributes, candidates_ids = super().__getitem__(index)
#convert actions and attributes to english strings
action = action.lower() if action.lower() not in self._ACT2STR else self._ACT2STR[action.lower()]
raw_fields = [attr if attr not in self._ATTR2FIELD else self._ATTR2FIELD[attr] for attr in attributes]
fields = [field.lower() if field.lower() not in self._ATTR2STR else self._ATTR2STR[field.lower()] for field in raw_fields]
item_attributes = []
if not len(fields):
item_attributes.append([])
for field in fields:
if field in self.processed_metadata[str(focus)] and len(self.processed_metadata[str(focus)][field]):
item_attributes.append(self.processed_metadata[str(focus)][field])
else:
item_attributes.append([])
retrieval_candidates = [self.candidates[candidate_id] for candidate_id in candidates_ids]
return dial_id, turn, user_req, wizard_resp, history, focus, action, item_attributes, retrieval_candidates
def __len__(self):
return super().__len__()
def __str__(self):
return '{}_subtask({})'.format(super().__str__(), self.task)
def load_candidates(self, candidates_path):
self.candidates = []
self.id2candidates = {}
with open(candidates_path) as fp:
raw_candidates = json.load(fp)
for candidate in raw_candidates['system_transcript_pool']:
self.candidates.append(candidate)
for candidates_per_dial in raw_candidates['retrieval_candidates']:
self.id2candidates[candidates_per_dial['dialogue_idx']] = candidates_per_dial['retrieval_candidates']
#check if all the candidate ids correspond to a valid candidate in the candidate pool
for (_, candidates_per_dial) in self.id2candidates.items():
for candidates_per_turn in candidates_per_dial:
for candidate_id in candidates_per_turn['retrieval_candidates']:
assert candidate_id < len(self.candidates), 'Candidate with id {} not present in candidate pool'.format(candidate_id)
def load_actions(self, actions_path):
self.id2act = {}
self.id2actfocus = {}
with open(actions_path) as fp:
raw_actions = json.load(fp)
for action in raw_actions:
if action['dialog_id'] in self.skipped_dialogs:
continue
assert len(action['actions']) == len(action['focus_images']), 'focus_images has different length than number of actions'
self.id2act[action['dialog_id']] = action['actions']
self.id2actfocus[action['dialog_id']] = action['focus_images']
#check if we have actions for all the turns
for dial_id in self.ids:
assert len(self.id2dialog[dial_id]['dialogue']) == len(self.id2act[dial_id]),\
'Actions number does not match dialogue turns in dialogue {}'.format(dial_id)
class SIMMCDatasetForActionPrediction(SIMMCDataset):
"""Dataset wrapper for SIMMC Fashion for api call prediction subtask
"""
_ACT2LABEL = {'None': 0,'SearchDatabase': 1, 'SearchMemory': 2, 'SpecifyInfo': 3, 'AddToCart': 4}
_LABEL2ACT = ['None','SearchDatabase', 'SearchMemory', 'SpecifyInfo', 'AddToCart']
"""
_ATTR2LABEL = {'embellishment': 0, 'skirtStyle': 1, 'availableSizes': 2, 'dressStyle': 3, 'material': 4, 'clothingStyle': 5, 'jacketStyle': 6,
'sleeveLength': 7, 'soldBy': 8, 'price': 9, 'ageRange': 10, 'hemLength': 11, 'size': 12, 'warmthRating': 13, 'sweaterStyle': 14,
'forGender': 15, 'madeIn': 16, 'info': 17, 'customerRating': 18, 'hemStyle': 19, 'hasPart': 20, 'pattern': 21, 'clothingCategory': 22,
'forOccasion': 23, 'waistStyle': 24, 'sleeveStyle': 25, 'amountInStock': 26, 'waterResistance': 27, 'necklineStyle': 28, 'skirtLength': 29,
'color': 30, 'brand': 31, 'sequential': 32}
_ATTRS = ['embellishment', 'skirtStyle', 'availableSizes', 'dressStyle', 'material', 'clothingStyle', 'jacketStyle',
'sleeveLength', 'soldBy', 'price', 'ageRange', 'hemLength', 'size', 'warmthRating', 'sweaterStyle',
'forGender', 'madeIn', 'info', 'customerRating', 'hemStyle', 'hasPart', 'pattern', 'clothingCategory',
'forOccasion', 'waistStyle', 'sleeveStyle', 'amountInStock', 'waterResistance', 'necklineStyle', 'skirtLength',
'color', 'brand', 'sequential']
"""
_ATTR2LABEL = {'embellishment': 0, 'availableSizes': 1, 'price': 2, 'info': 3, 'customerRating': 4,
'pattern': 5, 'color': 6, 'brand': 7, 'other': 8}
_ATTRS = ['embellishment', 'availableSizes', 'price', 'info', 'customerRating', 'pattern', 'color', 'brand', 'other']
def __init__(self, data_path, metadata_path, actions_path, verbose=True):
super(SIMMCDatasetForActionPrediction, self).__init__(data_path=data_path, metadata_path=metadata_path, verbose=verbose)
self.task = 'api_call_prediction'
self.load_actions(actions_path)
self.id2focus = {}
for id in self.ids:
#for action prediction do not use the item context after the last turn
self.id2focus[id] = self.extract_visual_context(id)[:-1]
assert len(self.id2dialog[id]['dialogue']) == len(self.id2focus[id]), 'Focus items do not match dialogue {} length'.format(id)
def __getitem__(self, index):
dial_id, turn, transcript, history, visual_context, action, attributes = super().__getitem__(index)
one_hot_attrs = [0]*(len(self._ATTR2LABEL))
for attr in attributes:
#assert attr in self._ATTR2LABEL, 'Unkown attribute \'{}\''.format(attr)
curr_attr = attr if attr in self._ATTR2LABEL else 'other'
#assert one_hot_attrs[self._ATTR2LABEL[curr_attr]] == 0, 'Attribute \'{}\' is present multiple times'.format(attr)
one_hot_attrs[self._ATTR2LABEL[curr_attr]] = 1
return dial_id, turn, transcript, history, visual_context, self._ACT2LABEL[action], one_hot_attrs
def __len__(self):
return super().__len__()
def __str__(self):
return '{}_subtask({})'.format(super().__str__(), self.task)
def load_actions(self, actions_path):
#TODO sort id2act based on 'turn_idx' field
self.id2act = {}
with open(actions_path) as fp:
raw_actions = json.load(fp)
for action in raw_actions:
if action['dialog_id'] in self.skipped_dialogs:
continue
self.id2act[action['dialog_id']] = action['actions']
#check if we have actions for all the turns
for dial_id in self.ids:
assert len(self.id2dialog[dial_id]['dialogue']) == len(self.id2act[dial_id]),\
'Actions number does not match dialogue turns in dialogue {}'.format(dial_id)
#compute frequency for actions
act_freq = [0]*len(self._LABEL2ACT)
freq_sum = 0
for dial_id in self.ids:
for act in self.id2act[dial_id]:
act_freq[self._ACT2LABEL[act['action']]] += 1
freq_sum += 1
self.act_support = {'per_class_frequency': act_freq, 'tot_samples': freq_sum}
#compute frequency for attributes
attr_freq = [0] * len(self._ATTRS)
freq_sum = 0
for dial_id in self.ids:
for act in self.id2act[dial_id]:
if act['action_supervision'] != None:
for attr in act['action_supervision']['attributes']:
if attr in self._ATTR2LABEL:
attr_freq[self._ATTR2LABEL[attr]] += 1
else:
attr_freq[self._ATTR2LABEL['other']] += 1
freq_sum += 1
self.attr_support = {'per_class_frequency': attr_freq, 'tot_samples': freq_sum}
"""
#print actions distribution
print('_______________________')
print('[ACTIONS DISTRIBUTION]:')
tot_samples = self.act_support['tot_samples']
for idx, freq in enumerate(self.act_support['per_class_frequency']):
print('{}: \t\t[{}%]: {}'.format(self._LABEL2ACT[idx], round(100*freq/tot_samples), freq))
print('Total support sum: {}'.format(tot_samples))
print('_______________________')
#print attributes distribution
print('[ATTRIBUTES DISTRIBUTION]:')
tot_samples = self.attr_support['tot_samples']
for idx, freq in enumerate(self.attr_support['per_class_frequency']):
print('{}: \t\t[{}%]: {}'.format(self._ATTRS[idx], round(100*freq/tot_samples), freq))
print('Total support sum: {}'.format(tot_samples))
print('_______________________')
pdb.set_trace()
"""
| 22,029 | 47.955556 | 193 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/tools/dialogue_visualizer.py
|
import pdb
from simmc_dataset import SIMMCDataset
dataset = SIMMCDataset(data_path='data/simmc_fashion/dev/fashion_dev_dials.json',
metadata_path='data/simmc_fashion/fashion_metadata.json')
printed=False
for dial_id, dial in dataset.id2dialog.items():
coref_map = dial['dialogue_coref_map']
#inverted_coref = {value: key for key, value in coref_map.items()}
task_id = dial['dialogue_task_id']
task = dataset.task_mapping[task_id]
if printed:
print('\n\n**********************************\n\n')
for turn in dial['dialogue']:
# print only dialogues with memory images
if not len(task['memory_images']):
printed=False
continue
print('-----------')
print('+U: {}\n+W: {}\n-V: {}\n@Coref: {}\n*FOC: {}\n*MEM: {}\n*DB: {}\n*KEYSTROKE: {}'.format(
turn['transcript'],
turn['system_transcript'],
turn['visual_objects'],
coref_map,
task['focus_image'],
task['memory_images'],
task['database_images'],
turn['raw_assistant_keystrokes']))
print('-----------')
printed=True
if printed:
pdb.set_trace()
| 1,388 | 38.685714 | 103 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/tools/embed_metadata.py
|
import argparse
import json
import pdb
import re
import string
import numpy as np
from nltk.tokenize import WordPunctTokenizer
from simmc_dataset import SIMMCDatasetForResponseGeneration
# for single embedding
FIELDS_TO_EMBED = ['type', 'color', 'embellishments', 'pattern', 'brand']
FIELD2STR = SIMMCDatasetForResponseGeneration._ATTR2STR
def load_embeddings_from_file(embeddings_path):
glove = {}
with open(embeddings_path) as fp:
for l in fp:
line_tokens = l.split()
word = line_tokens[0]
if word in glove:
raise Exception('Repeated words in {} embeddings file'.format(embeddings_path))
vector = np.asarray(line_tokens[1:], "float32")
glove[word] = vector
embedding_size = vector.size
return glove, embedding_size
def clean_value(value, tokenizer):
results = []
tokenized_val = tokenizer.tokenize(value.lower())
for v in tokenized_val:
results.extend(re.split('_|-', v))
return results
def extract_single_metadata_embeddings(metadata_path, embeddings_path, save_path):
with open(metadata_path) as fp:
metadata_dict = json.load(fp)
glove, embedding_size = load_embeddings_from_file(embeddings_path=embeddings_path)
item_embeddings = {}
tokenizer = WordPunctTokenizer()
for item_id, item in metadata_dict.items():
fields_embeddings = []
for field in FIELDS_TO_EMBED:
assert field in item['metadata'], '{} field not in item {}'.format(field, item_id)
cleaned_values = []
if isinstance(item['metadata'][field], list,):
for value in item['metadata'][field]:
cleaned_values.extend(clean_value(value, tokenizer))
else:
cleaned_values = clean_value(item['metadata'][field], tokenizer)
emb = []
for v in cleaned_values:
if v in glove:
emb.append(np.array(glove[v]))
else:
emb.append(np.random.rand(300,))
print('Unknown word \'{}\' initiated with a random embedding'.format(v))
emb = np.stack(emb)
fields_embeddings.append(emb.mean(0))
assert fields_embeddings[-1].size == embedding_size, 'Wrong embedding dimension'
assert len(fields_embeddings) == len(FIELDS_TO_EMBED), 'Wrong number of embeddings'
item_embeddings[item_id] = np.concatenate(fields_embeddings)
np.save(
save_path,
{
'embedding_size': embedding_size*len(FIELDS_TO_EMBED),
'embeddings': item_embeddings
}
)
"""
def extract_list_metadata_embeddings(metadata_path, embeddings_path, save_path):
with open(metadata_path) as fp:
metadata_dict = json.load(fp)
glove, embedding_size = load_embeddings_from_file(embeddings_path=embeddings_path)
unknown_words = set()
item_ids = []
item_embeddings = []
tokenizer = WordPunctTokenizer()
for item_id, item in metadata_dict.items():
for key in item['metadata']:
# availability field is always an empty list
if key == 'availability':
continue
field_name = FIELD2STR[key.lower()] if key.lower() in FIELD2STR else key.lower()
field_tokens = clean_value(field_name, tokenizer)
cleaned_values = []
if isinstance(item['metadata'][key], list,):
if not len(item['metadata'][key]):
cleaned_values.extend('none') #for empty lists
for value in item['metadata'][key]:
cleaned_values.extend(clean_value(value, tokenizer))
else:
cleaned_values = clean_value(item['metadata'][key], tokenizer)
fields_emb = []
for t in field_tokens:
if t in glove:
fields_emb.append(np.array(glove[t]))
else:
if t in string.punctuation:
continue
fields_emb.append(np.random.rand(300,))
unknown_words.add(t)
values_emb = []
for v in cleaned_values:
if v in glove:
values_emb.append(np.array(glove[v]))
else:
if v in string.punctuation:
continue
values_emb.append(np.random.rand(300,))
unknown_words.add(v)
item_ids.append(item_id)
pdb.set_trace()
item_embeddings.append([np.stack(fields_emb).mean(0), np.stack(values_emb).mean(0)])
print('UNKNOWN WORDS: {}'.format(unknown_words))
np.save(
save_path,
{
'embedding_size': embedding_size,
'item_ids': item_ids,
'embeddings': item_embeddings
}
)
print('embeddings saved in {}'.format(save_path))
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--metadata",
type=str,
required=True,
help="Path to metadata JSON file")
parser.add_argument(
"--embeddings",
type=str,
required=True,
help="Path to embeddings file"
)
parser.add_argument(
"--save_path",
type=str,
required=True,
help="Path where to save the embeddings"
)
parser.add_argument(
"--type",
type=str,
choices=['single', 'list'],
required=True,
help="Type of embedding for each item (options: 'single', 'list')"
)
args = parser.parse_args()
if args.type == 'single':
extract_single_metadata_embeddings(args.metadata, args.embeddings, args.save_path)
else:
pass#extract_list_metadata_embeddings(args.metadata, args.embeddings, args.save_path)
| 5,973 | 32.943182 | 99 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/preprocessing.py
|
import argparse
import datetime
import math
import os
import pdb
import random
import sys
import time
import numpy as np
import torch
from torch.utils.data import DataLoader
sys.path.append('.')
from config import special_toks
from tools.simmc_dataset import SIMMCDatasetForResponseGeneration
from transformers import BertTokenizer
class Collate():
ACT2STR = SIMMCDatasetForResponseGeneration._ACT2STR
UNK_WORDS = set()
def __init__(self, word2id, unk_token):
self.word2id = word2id
self.unk_token = unk_token
def metadata2ids(self, processed_metadata, word2id, unk_token):
unknown_words = set()
metadata_ids = {}
for item_id, item in processed_metadata.items():
metadata_ids[int(item_id)] = []
for field, values in item.items():
curr_field = []
for word in field.split():
if word not in word2id:
unknown_words.add(word)
curr_field.append(word2id[word] if word in word2id else unk_token)
curr_values = []
for value in values:
curr_value = []
for word in value.split():
if word not in word2id:
unknown_words.add(word)
curr_value.append(word2id[word] if word in word2id else unk_token)
curr_values.append(torch.tensor(curr_value))
if len(curr_values):
curr_values = torch.cat(curr_values)
else:
#insert none for field for which we do not have values
curr_values = torch.tensor([word2id['none']], dtype=torch.long)
metadata_ids[int(item_id)].append((torch.tensor(curr_field, dtype=torch.long), curr_values))
print('UNKNOWN METADATA WORDS: {}'.format(len(unknown_words)))
return metadata_ids
def collate_fn(self, batch):
dial_ids = [item[0] for item in batch]
turns = [item[1] for item in batch]
utterances = [item[2] for item in batch]
history = [item[3] for item in batch]
focus = [item[4] for item in batch]
actions = [item[5] for item in batch]
attributes = [item[6] for item in batch]
responses_pool = [item[7] for item in batch]
# words to ids for the current utterance
utterance_seq_ids = []
for utt in utterances:
curr_seq = []
for word in utt.split():
word_id = self.word2id[word] if word in self.word2id else self.word2id[self.unk_token]
if word not in self.word2id:
self.UNK_WORDS.add(word)
curr_seq.append(word_id)
utterance_seq_ids.append(curr_seq)
# words to ids for the history
history_seq_ids = []
for turn, item in zip(turns, history):
assert len(item) == turn, 'Number of turns does not match history length'
curr_turn_ids = []
for t in range(turn):
concat_sentences = item[t][0] + ' ' + item[t][1] #? separator token
curr_seq = []
for word in concat_sentences.split():
word_id = self.word2id[word] if word in self.word2id else self.word2id[self.unk_token]
if word not in self.word2id:
self.UNK_WORDS.add(word)
curr_seq.append(word_id)
curr_turn_ids.append(torch.tensor(curr_seq))
history_seq_ids.append(curr_turn_ids)
# convert response candidates to word ids
resp_ids = []
for resps in responses_pool:
curr_candidate = []
for resp in resps:
curr_seq = []
for word in resp.split():
word_id = self.word2id[word] if word in self.word2id else self.word2id[self.unk_token]
if word not in self.word2id:
self.UNK_WORDS.add(word)
curr_seq.append(word_id)
curr_candidate.append(torch.tensor(curr_seq, dtype=torch.long))
resp_ids.append(curr_candidate)
#convert actions and attributes to word ids
act_ids = []
for act in actions:
curr_seq = []
# todo collapse searchdatabase and searchmemory to one single action called search
act_tokens = act.split() if 'search' not in act else ['search']
for word in act_tokens:
word_id = self.word2id[word] if word in self.word2id else self.word2id[self.unk_token]
if word not in self.word2id:
self.UNK_WORDS.add(word)
curr_seq.append(word_id)
act_ids.append(torch.tensor(curr_seq, dtype=torch.long))
attr_ids = []
for attrs in attributes:
curr_attributes = []
for attr in attrs:
curr_seq = []
for word in attr.split():
word_id = self.word2id[word] if word in self.word2id else self.word2id[self.unk_token]
if word not in self.word2id:
self.UNK_WORDS.add(word)
curr_seq.append(word_id)
curr_attributes.append(torch.tensor(curr_seq, dtype=torch.long))
attr_ids.append(curr_attributes)
assert len(utterance_seq_ids) == 1, 'Only unitary batch sizes allowed'
assert len(utterance_seq_ids) == len(dial_ids), 'Batch sizes do not match'
assert len(utterance_seq_ids) == len(turns), 'Batch sizes do not match'
assert len(utterance_seq_ids) == len(history_seq_ids), 'Batch sizes do not match'
assert len(utterance_seq_ids) == len(resp_ids), 'Batch sizes do not match'
assert len(utterance_seq_ids) == len(attr_ids), 'Batch sizes do not match'
assert len(utterance_seq_ids) == len(focus)
batch_dict = {}
batch_dict['utterances'] = utterance_seq_ids
batch_dict['history'] = history_seq_ids
batch_dict['actions'] = act_ids
batch_dict['attributes'] = attr_ids
batch_dict['focus'] = focus[0] #only one focus per turn
return dial_ids, turns, batch_dict, resp_ids
class BertCollate():
def __init__(self, pretrained_model):
self.tokenizer = BertTokenizer.from_pretrained(pretrained_model)
self.tokenizer_vocab = self.tokenizer.vocab
self.bert2genid = {}
self.bert2genid[self.tokenizer.convert_tokens_to_ids('[PAD]')] = 0
self.bert2genid[self.tokenizer.convert_tokens_to_ids('[SEP]')] = 1
self.bert2genid[self.tokenizer.convert_tokens_to_ids('[UNK]')] = 2
self.avail_id = 3
self.id_occur = [1, 1, 1]
def add_tensor_ids_to_vocab(self, tensor_ids):
ids = [id for id in tensor_ids.view(-1).tolist()]
for id in ids:
# skip the [CLS]. Never in the generated output
if id == 101:
continue
if id not in self.bert2genid:
self.bert2genid[id] = self.avail_id
self.avail_id += 1
self.id_occur.append(1)
else:
self.id_occur[self.bert2genid[id]] += 1
def get_vocab_and_inv_frequencies(self):
#avoid frequency computation for padding
tot_sum = sum(self.id_occur[1:])
word_inv_freqs = [tot_sum/occur for occur in self.id_occur[1:]]
#insert 0 inverse frequency for padding
word_inv_freqs.insert(0, 0)
assert len(self.bert2genid) == len(word_inv_freqs)
return self.bert2genid, word_inv_freqs
def metadata2ids(self, processed_metadata):
"""Each item is represented by the plain string of all its attributes
'key1: val1, val2. key2: val1. ...'
"""
id2pos = {}
items_strings = []
for idx, (item_id, item) in enumerate(processed_metadata.items()):
id2pos[int(item_id)] = idx
curr_item_strings = []
for field, values in item.items():
if len(values):
curr_str = '{}: {}'.format(field, ', '.join(values))
else:
curr_str = '{}: {}'.format(field, 'none')
curr_item_strings.append(curr_str)
items_strings.append('. '.join(curr_item_strings))
items_tensors = self.tokenizer(items_strings, padding='longest', return_tensors='pt')
self.add_tensor_ids_to_vocab(items_tensors['input_ids'])
res_dict = {'id2pos': id2pos, 'items_tensors': items_tensors}
return res_dict
def collate_fn(self, batch):
dial_ids = [item[0] for item in batch]
turns = [item[1] for item in batch]
utterances = [item[2] for item in batch]
wizard_resp = [item[3] for item in batch]
history = [item[4] for item in batch]
focus = [item[5] for item in batch]
actions = [item[6] for item in batch]
attributes = [item[7][0] for item in batch]
retr_candidates = [item[8] for item in batch]
#each results has three keys: 'input_ids', 'token_type_ids', 'attention_mask'
utterances_tensors = self.tokenizer(utterances, padding='longest', return_tensors='pt')
self.add_tensor_ids_to_vocab(utterances_tensors['input_ids'])
responses_tensors = self.tokenizer(wizard_resp, padding='longest', return_tensors='pt')
self.add_tensor_ids_to_vocab(responses_tensors['input_ids'])
history_seq_ids = []
for turn, item in zip(turns, history):
assert len(item) == turn, 'Number of turns does not match history length'
if not len(item):
no_history = {'input_ids': torch.zeros(utterances_tensors['input_ids'].shape[1]),
'token_type_ids': torch.zeros(utterances_tensors['input_ids'].shape[1]),
'attention_mask': torch.zeros(utterances_tensors['input_ids'].shape[1])}
history_seq_ids.append(no_history)
continue
history_seq_ids.append(self.tokenizer(item, padding='longest', return_tensors='pt'))
actions_tensors = self.tokenizer(actions, padding='longest', return_tensors='pt')
all_candidates = [candidate for pool in retr_candidates for candidate in pool]
candidates_tensors = self.tokenizer(all_candidates, padding='longest', return_tensors='pt')
candidates_tensors = {'input_ids': candidates_tensors['input_ids'].view(len(dial_ids), 100, -1),
'token_type_ids': candidates_tensors['token_type_ids'].view(len(dial_ids), 100, -1),
'attention_mask': candidates_tensors['attention_mask'].view(len(dial_ids), 100, -1)}
assert utterances_tensors['input_ids'].shape[0] == len(dial_ids), 'Batch sizes do not match'
assert utterances_tensors['input_ids'].shape[0] == len(turns), 'Batch sizes do not match'
assert utterances_tensors['input_ids'].shape[0] == responses_tensors['input_ids'].shape[0], 'Batch sizes do not match'
assert utterances_tensors['input_ids'].shape[0] == len(history_seq_ids), 'Batch sizes do not match'
assert utterances_tensors['input_ids'].shape[0] == actions_tensors['input_ids'].shape[0], 'Batch sizes do not match'
assert utterances_tensors['input_ids'].shape[0] == len(attributes)
assert utterances_tensors['input_ids'].shape[0] == candidates_tensors['input_ids'].shape[0]
assert utterances_tensors['input_ids'].shape[0] == len(focus), 'Batch sizes do not match'
data_dict = {}
data_dict['utterances'] = utterances_tensors
data_dict['responses'] = responses_tensors
data_dict['history'] = history_seq_ids
data_dict['actions'] = actions_tensors
data_dict['attributes'] = attributes
data_dict['focus'] = focus
data_dict['candidates'] = candidates_tensors
return dial_ids, turns, data_dict
def save_data_on_file(loader, save_path):
dial_ids, turns, data_dict = iter(loader).next()
torch.save(
{
'dial_ids': dial_ids,
'turns': turns,
'data_dict': data_dict,
},
save_path
)
def preprocess(train_dataset, dev_dataset, test_dataset, args):
save_path = '{}/{}'
collate = BertCollate('bert-base-uncased')
metadata_ids = collate.metadata2ids(train_dataset.processed_metadata)
torch.save(metadata_ids, save_path.format(args.save_path, 'metadata_ids.dat'))
# prepare DataLoader
params = {'batch_size': len(train_dataset),
'shuffle': False,
'num_workers': 0}
assert params['batch_size'] == len(train_dataset) and not params['shuffle'], 'Keep batch size to max and shuffle to False to avoid problems during training'
trainloader = DataLoader(train_dataset, **params, collate_fn=collate.collate_fn)
devloader = DataLoader(dev_dataset, **params, collate_fn=collate.collate_fn)
testloader = DataLoader(test_dataset, **params, collate_fn=collate.collate_fn)
start_t = time.time()
save_data_on_file(loader=trainloader, save_path=save_path.format(args.save_path, 'train_response_retrieval_data.dat'))
#save vocab and inverse word frequencies only for training data
vocab, inv_freqs = collate.get_vocab_and_inv_frequencies()
torch.save({'vocab': vocab, 'inv_freqs': torch.tensor(inv_freqs)}, save_path.format(args.save_path, 'generative_vocab.dat'))
save_data_on_file(loader=devloader, save_path=save_path.format(args.save_path, 'dev_response_retrieval_data.dat'))
save_data_on_file(loader=testloader, save_path=save_path.format(args.save_path, 'devtest_response_retrieval_data.dat'))
#print('UNKNOWN DATASET WORDS: {}'.format(len(collate.UNK_WORDS)))
end_t = time.time()
h_count = (end_t-start_t) /60 /60
m_count = ((end_t-start_t)/60) % 60
s_count = (end_t-start_t) % 60
print('preprocessing time: {}h:{}m:{}s'.format(round(h_count), round(m_count), round(s_count)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--simmc_folder",
type=str,
required=True,
help="Path to simmc fashion dataset folder")
parser.add_argument(
"--actions_folder",
type=str,
required=True,
help="Path to simmc fashion actions folder")
parser.add_argument(
"--metadata",
type=str,
required=True,
help="Path to metadata JSON file")
parser.add_argument(
"--save_path",
type=str,
required=True,
help="Path to save processed files")
args = parser.parse_args()
dataset_path = '{}/fashion_{}_dials.json'
actions_path = '{}/fashion_{}_dials_api_calls.json'
candidates_path = '{}/fashion_{}_dials_retrieval_candidates.json'
train_dataset = SIMMCDatasetForResponseGeneration(data_path=dataset_path.format(args.simmc_folder, 'train'),
metadata_path=args.metadata,
actions_path=actions_path.format(args.actions_folder, 'train'),
candidates_path=candidates_path.format(args.simmc_folder, 'train'))
dev_dataset = SIMMCDatasetForResponseGeneration(data_path=dataset_path.format(args.simmc_folder, 'dev'),
metadata_path=args.metadata,
actions_path=actions_path.format(args.actions_folder, 'dev'),
candidates_path=candidates_path.format(args.simmc_folder, 'dev'))
test_dataset = SIMMCDatasetForResponseGeneration(data_path=dataset_path.format(args.simmc_folder, 'devtest'),
metadata_path=args.metadata,
actions_path=actions_path.format(args.actions_folder, 'devtest'),
candidates_path=candidates_path.format(args.simmc_folder, 'devtest'))
preprocess(train_dataset, dev_dataset, test_dataset, args)
| 16,410 | 42.762667 | 160 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/config.py
|
"""
class SIMMCFashionConfig():
#? not used
_FASHION_ACTION_NO = 5
_FASHION_ATTRS_NO = 33
"""
model_conf = {
'dropout_prob': 0.5,
'freeze_bert': True,
'n_decoders': 2,
'decoder_heads': 6 #todo must be a perfect divisor of 768
}
special_toks = {
'pad_token': '[PAD]',
'start_token': '[CLS]',
'end_token': '[SEP]',
'unk_token': '[UNK]',
}
train_conf = {
'seed': 240797,
'distractors_sampling': -1, #-1 to avoid sampling
'lr': 1e-3,
'weight_decay': 1e-3,
'ckpt_folder': 'mm_response_generation/checkpoints'
}
| 573 | 18.793103 | 61 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/eval.py
|
import argparse
import json
import os
import pdb
import sys
import time
import string
import torch
from torch.utils.data import DataLoader
sys.path.append('.')
from config import special_toks, train_conf
from dataset import FastDataset
from models import BlindStatelessLSTM, MultiAttentiveTransformer
from tools.simmc_dataset import SIMMCDatasetForResponseGeneration
"""expected form for model output
[
{
"dialog_id": <dialog_id>,
"candidate_scores": [
<list of 100 scores for 100 candidates for round 1>
<list of 100 scores for 100 candidates for round 2>
...
]
}
...
]
"""
def instantiate_model(args, model_configurations, out_vocab, device):
if args.model == 'blindstateless':
return BlindStatelessLSTM(word_embeddings_path=args.embeddings,
pad_token=special_toks['pad_token'],
unk_token=special_toks['unk_token'],
seed=train_conf['seed'],
OOV_corrections=False,
freeze_embeddings=True)
elif args.model == 'matransformer':
return MultiAttentiveTransformer(**model_configurations,
seed=train_conf['seed'],
device=device,
out_vocab=out_vocab,
retrieval_eval=args.retrieval_eval,
gen_eval=args.gen_eval,
beam_size=args.beam_size,
mode='inference',
**special_toks,
)
else:
raise Exception('Model not present!')
def create_eval_dicts(dataset):
dataset.create_id2turns()
gen_eval_dict = {}
retr_eval_dict = {}
for dial_id, num_turns in dataset.id2turns.items():
gen_eval_dict[dial_id] = {'dialog_id': dial_id, 'predictions': []}
retr_eval_dict[dial_id] = {'dialog_id': dial_id, 'candidate_scores': []}
return gen_eval_dict, retr_eval_dict
def move_batch_to_device(batch, device):
for key in batch.keys():
if key == 'history':
raise Exception('Not implemented')
if key != 'attributes':
batch[key] = batch[key].to(device)
def visualize_result(utt_ids, item_ids, id2word, gen_ids=None):
item = [id2word[id.item()] for id in item_ids if id != 0]
words_request = [id2word[id.item()] for id in utt_ids if id != 0]
if gen_ids is not None:
words_resp = [id2word[id] for id in gen_ids]
#cleaned_req = clean_response(words_request)
#cleaned_resp = clean_response(words_resp)
print('USER: {}'.format(words_request))
if gen_ids is not None:
print('GEN: {}'.format(words_resp))
print('Item: {}'.format(item))
def eval(model, test_dataset, args, save_folder, device):
model.eval()
model.to(device)
#print('MODEL: {}'.format(model))
# prepare DataLoader
params = {'batch_size': 1,
'shuffle': False,
'num_workers': 0}
testloader = DataLoader(test_dataset, **params, collate_fn=model.collate_fn)
gen_eval_dict, retr_eval_dict = create_eval_dicts(test_dataset)
with torch.no_grad():
for curr_step, (dial_ids, turns, batch) in enumerate(testloader):
assert len(dial_ids) == 1, 'Only unitary batch size is allowed during testing'
dial_id = dial_ids[0]
turn = turns[0]
move_batch_to_device(batch, device)
res = model(**batch,
history=None,
actions=None)
if args.gen_eval:
gen_eval_dict[dial_id]['predictions'].append({'turn_id': turn, 'response': res['generation']['string']})
#visualize_result(batch['utterances'][0], batch['focus_items'][0], id2word, res['generation']['string'])
if args.retrieval_eval:
retr_eval_dict[dial_id]['candidate_scores'].append({'turn_id': turn, 'scores': res['retrieval'].squeeze(0).tolist()})
#todo here adjust candidates scores based on semantic attribute informations
if args.gen_eval:
gen_eval_list = []
for key in gen_eval_dict:
gen_eval_list.append(gen_eval_dict[key])
save_file = os.path.join(save_folder, 'eval_gen.json')
try:
with open(save_file, 'w+') as fp:
json.dump(gen_eval_list, fp)
print('generation results saved in {}'.format(save_file))
except:
print('Error in writing the resulting JSON')
if args.retrieval_eval:
retr_eval_list = []
for key in retr_eval_dict:
retr_eval_list.append(retr_eval_dict[key])
save_file = os.path.join(save_folder, 'eval_retr.json')
try:
with open(save_file, 'w+') as fp:
json.dump(retr_eval_list, fp)
print('retrieval results saved in {}'.format(save_file))
except:
print('Error in writing the resulting JSON')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
choices=['blindstateless', 'blindstateful', 'matransformer'],
required=True,
help="Type of the model (options: 'blindstateless', 'blindstateful', 'matransformer')")
parser.add_argument(
"--model_path",
default=None,
type=str,
required=True,
help="Path to the weights of the model")
parser.add_argument(
"--model_conf",
default=None,
type=str,
required=True,
help="Path to the model configuration JSON file")
parser.add_argument(
"--vocabulary",
default=None,
type=str,
required=True,
help="Path to output vocabulary for the model")
parser.add_argument(
"--data",
default=None,
type=str,
required=True,
help="Path to test dataset json file")
parser.add_argument(
"--metadata_ids",
type=str,
required=True,
help="Path to metadata ids file")
parser.add_argument(
"--beam_size",
type=int,
required=True,
help="Size of the beam for the beam search at inference time")
parser.add_argument(
"--retrieval_eval",
action='store_true',
default=False,
required=False,
help="Flag to enable retrieval evaluation")
parser.add_argument(
"--gen_eval",
action='store_true',
default=False,
required=False,
help="Flag to enable generation evaluation")
parser.add_argument(
"--cuda",
default=None,
required=False,
type=int,
help="id of device to use")
start_t = time.time()
args = parser.parse_args()
test_dataset = FastDataset(dat_path=args.data,
metadata_ids_path= args.metadata_ids,
retrieval=args.retrieval_eval)
device = torch.device('cuda:{}'.format(args.cuda) if torch.cuda.is_available() and args.cuda is not None else "cpu")
print('EVAL DATASET: {}'.format(test_dataset))
# prepare model
with open(args.model_conf) as fp:
model_configurations = json.load(fp)
with open(args.vocabulary, 'rb') as fp:
bert2genid = torch.load(fp)
model = instantiate_model(args,
model_configurations=model_configurations,
out_vocab=bert2genid,
device=device)
model.load_state_dict(torch.load(args.model_path))
model_folder = '/'.join(args.model_path.split('/')[:-1])
print('model loaded from {}'.format(model_folder))
eval(model, test_dataset, args, save_folder=model_folder, device=device)
end_t = time.time()
m_count = ((end_t-start_t)/60) % 60
s_count = (end_t-start_t) % 60
print('evaluation time: {}m:{}s'.format(round(m_count), round(s_count)))
| 8,110 | 32.795833 | 133 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/train.py
|
import argparse
import datetime
import json
import math
import os
import pdb
import pickle
import random
import sys
import time
import numpy as np
import torch
from torch.utils.data import DataLoader
from config import model_conf, special_toks, train_conf
from dataset import FastDataset
from models import BlindStatelessLSTM, MultiAttentiveTransformer
from utilities import DataParallelV2, Logger, plotting_loss
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0,2,3,4,5" # specify which GPU(s) to be used
torch.autograd.set_detect_anomaly(True)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
def instantiate_model(args, out_vocab, device):
"""
if args.model == 'blindstateless':
return BlindStatelessLSTM(word_embeddings_path=args.embeddings,
pad_token=special_toks['pad_token'],
unk_token=special_toks['unk_token'],
seed=train_conf['seed'],
OOV_corrections=False,
freeze_embeddings=True)
"""
if args.model == 'matransformer':
if args.from_checkpoint is not None:
with open(os.path.join(args.from_checkpoint, 'state_dict.pt'), 'rb') as fp:
state_dict = torch.load(fp)
with open(os.path.join(args.from_checkpoint, 'model_conf.json'), 'rb') as fp:
loaded_conf = json.load(fp)
loaded_conf.pop('dropout_prob')
model_conf.update(loaded_conf)
model = MultiAttentiveTransformer(**model_conf,
seed=train_conf['seed'],
device=device,
out_vocab=out_vocab,
**special_toks)
if args.from_checkpoint is not None:
model.load_state_dict(state_dict)
print('Model loaded from {}'.format(args.from_checkpoint))
return model
else:
raise Exception('Model not present!')
def plotting(epochs, losses_trend, checkpoint_dir=None):
epoch_list = np.arange(1, epochs+1)
losses = [(losses_trend['train'], 'blue', 'train'),
(losses_trend['dev'], 'red', 'validation')]
loss_path = os.path.join(checkpoint_dir, 'global_loss_plot') if checkpoint_dir is not None else None
plotting_loss(x_values=epoch_list, save_path=loss_path, functions=losses, plot_title='Global loss trend', x_label='epochs', y_label='loss')
def move_batch_to_device(batch, device):
for key in batch.keys():
if key == 'history':
raise Exception('Not implemented')
batch[key] = batch[key].to(device)
def visualize_output(request, responses, item, id2word, genid2word, vocab_logits, device):
shifted_targets = torch.cat((responses[:, 1:], torch.zeros((responses.shape[0], 1), dtype=torch.long).to(device)), dim=-1)
rand_idx = random.randint(0, shifted_targets.shape[0]-1)
eff_len = shifted_targets[rand_idx][shifted_targets[rand_idx] != 0].shape[0]
"""
inp = ' '.join([id2word[inp_id.item()] for inp_id in responses[rand_idx] if inp_id != vocab['[PAD]']])
print('input: {}'.format(inp))
"""
req = ' '.join([id2word[req_id.item()] for req_id in request[rand_idx] if req_id != 0])
print('user: {}'.format(req))
out = ' '.join([id2word[out_id.item()] for out_id in shifted_targets[rand_idx] if out_id !=0])
print('wizard: {}'.format(out))
item = ' '.join([id2word[item_id.item()] for item_id in item[rand_idx] if item_id !=0])
print('item: {}'.format(item))
gens = torch.argmax(torch.nn.functional.softmax(vocab_logits, dim=-1), dim=-1)
gen = ' '.join([genid2word[gen_id.item()] for gen_id in gens[:, :eff_len][rand_idx]])
print('generated: {}'.format(gen))
def forward_step(model, batch, generative_targets, response_criterion, device):
move_batch_to_device(batch, device)
generative_targets = generative_targets.to(device)
vocab_logits = model(**batch,
history=None,
actions=None,
attributes=None,
candidates=None,
candidates_mask=None,
candidates_token_type=None)
#keep the loss outside the forward: complex to compute the mean with a weighted loss
response_loss = response_criterion(vocab_logits.view(vocab_logits.shape[0]*vocab_logits.shape[1], -1),
generative_targets.view(vocab_logits.shape[0]*vocab_logits.shape[1]))
p = random.randint(0, 9)
if p > 8:
try:
vocab = model.vocab
id2word = model.id2word
genid2word = model.genid2word
except:
vocab = model.module.vocab
id2word = model.module.id2word
genid2word = model.module.genid2word
visualize_output(request=batch['utterances'], responses=batch['responses'], item=batch['focus'], id2word=id2word, genid2word=genid2word, vocab_logits=vocab_logits, device=device)
return response_loss
def train(train_dataset, dev_dataset, args, device):
# prepare checkpoint folder
if args.checkpoints:
curr_date = datetime.datetime.now().isoformat().split('.')[0]
checkpoint_dir = os.path.join(train_conf['ckpt_folder'], curr_date)
os.makedirs(checkpoint_dir, exist_ok=True)
# prepare logger to redirect both on file and stdout
sys.stdout = Logger(os.path.join(checkpoint_dir, 'train.log'))
sys.stderr = Logger(os.path.join(checkpoint_dir, 'err.log'))
print('device used: {}'.format(str(device)))
print('batch used: {}'.format(args.batch_size))
print('lr used: {}'.format(train_conf['lr']))
print('weight decay: {}'.format(train_conf['weight_decay']))
print('TRAINING DATASET: {}'.format(train_dataset))
print('VALIDATION DATASET: {}'.format(dev_dataset))
with open(args.generative_vocab, 'rb') as fp:
gen_vocab = dict(torch.load(fp))
bert2genid, inv_freqs = gen_vocab['vocab'], gen_vocab['inv_freqs']
if args.checkpoints:
torch.save(bert2genid, os.path.join(checkpoint_dir, 'bert2genid.pkl'))
print('GENERATIVE VOCABULARY SIZE: {}'.format(len(bert2genid)))
# prepare model
#response_criterion = torch.nn.CrossEntropyLoss(ignore_index=0, weight=inv_freqs/inv_freqs.sum()).to(device)
response_criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device)
model = instantiate_model(args, out_vocab=bert2genid, device=device)
vocab = model.vocab
if args.checkpoints:
with open(os.path.join(checkpoint_dir, 'model_conf.json'), 'w+') as fp:
json.dump(model_conf, fp)
# work on multiple GPUs when available
if torch.cuda.device_count() > 1:
model = DataParallelV2(model)
model.to(device)
print('using {} GPU(s): {}'.format(torch.cuda.device_count(), os.environ["CUDA_VISIBLE_DEVICES"]))
print('MODEL NAME: {}'.format(args.model))
print('NETWORK: {}'.format(model))
# prepare DataLoader
params = {'batch_size': args.batch_size,
'shuffle': True,
'num_workers': 0,
'pin_memory': True}
collate_fn = model.collate_fn if torch.cuda.device_count() <= 1 else model.module.collate_fn
trainloader = DataLoader(train_dataset, **params, collate_fn=collate_fn)
devloader = DataLoader(dev_dataset, **params, collate_fn=collate_fn)
#prepare optimizer
#optimizer = torch.optim.Adam(params=model.parameters(), lr=train_conf['lr'])
optimizer = torch.optim.Adam(params=model.parameters(), lr=train_conf['lr'], weight_decay=train_conf['weight_decay'])
#scheduler1 = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones = list(range(500, 500*5, 100)), gamma = 0.1)
scheduler1 = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones = list(range(25, 100, 50)), gamma = 0.1)
scheduler2 = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=.1, patience=12, threshold=1e-3, cooldown=2, verbose=True)
#prepare containers for statistics
losses_trend = {'train': [],
'dev': []}
#candidates_pools_size = 100 if train_conf['distractors_sampling'] < 0 else train_conf['distractors_sampling'] + 1
#print('candidates\' pool size: {}'.format(candidates_pools_size))
#accumulation_steps = 8
best_loss = math.inf
global_step = 0
start_t = time.time()
for epoch in range(args.epochs):
ep_start = time.time()
model.train()
curr_epoch_losses = []
for batch_idx, (dial_ids, turns, batch, generative_targets) in enumerate(trainloader):
global_step += 1
step_start = time.time()
response_loss = forward_step(model,
batch=batch,
response_criterion=response_criterion,
generative_targets=generative_targets,
device=device)
optimizer.zero_grad()
#averaging losses from various GPUs by dividing by the batch size
response_loss.mean().backward()
#torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
step_end = time.time()
h_count = (step_end-step_start) /60 /60
m_count = ((step_end-step_start)/60) % 60
s_count = (step_end-step_start) % 60
print('step {}, loss: {}, time: {}h:{}m:{}s'.format(global_step, round(response_loss.mean().item(), 4), round(h_count), round(m_count), round(s_count)))
"""
if (batch_idx+1) % accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
#torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
#step_end = time.time()
print('step {}, loss: {}'.format(global_step, round(response_loss.item()*accumulation_steps, 4)))
p = random.randint(0, 9)
if p > 8:
h_count = (step_end-step_start) /60 /60
m_count = ((step_end-step_start)/60) % 60
s_count = (step_end-step_start) % 60
print('step {}, loss: {}, time: {}h:{}m:{}s'.format(global_step, round(response_loss.mean().item(), 4), round(h_count), round(m_count), round(s_count)))
"""
#scheduler1.step()
#scheduler2.step(response_loss.item())
curr_epoch_losses.append(response_loss.mean().item())
losses_trend['train'].append(np.mean(curr_epoch_losses))
model.eval()
curr_epoch_losses = []
with torch.no_grad():
for curr_step, (dial_ids, turns, batch, generative_targets) in enumerate(devloader):
response_loss = forward_step(model,
batch=batch,
response_criterion=response_criterion,
generative_targets=generative_targets,
device=device)
curr_epoch_losses.append(response_loss.mean().item())
losses_trend['dev'].append(np.mean(curr_epoch_losses))
# save checkpoint if best model
if losses_trend['dev'][-1] < best_loss:
best_loss = losses_trend['dev'][-1]
if args.checkpoints:
try:
state_dict = model.cpu().module.state_dict()
except AttributeError:
state_dict = model.cpu().state_dict()
torch.save(state_dict, os.path.join(checkpoint_dir, 'state_dict.pt'))
#torch.save(model.cpu().state_dict(), os.path.join(checkpoint_dir, 'state_dict.pt'))
model.to(device)
ep_end = time.time()
ep_h_count = (ep_end-ep_start) /60 /60
ep_m_count = ((ep_end-ep_start)/60) % 60
ep_s_count = (ep_end-ep_start) % 60
time_str = '{}h:{}m:{}s'.format(round(ep_h_count), round(ep_m_count), round(ep_s_count))
print('EPOCH #{} :: train_loss = {:.4f} ; dev_loss = {:.4f} ; (lr={}); --time: {}'.format(epoch+1,
losses_trend['train'][-1],
losses_trend['dev'][-1],
optimizer.param_groups[0]['lr'],
time_str))
#TODO uncomment
#scheduler1.step()
scheduler2.step(losses_trend['dev'][-1])
end_t = time.time()
h_count = (end_t-start_t) /60 /60
m_count = ((end_t-start_t)/60) % 60
s_count = (end_t-start_t) % 60
print('training time: {}h:{}m:{}s'.format(round(h_count), round(m_count), round(s_count)))
if not args.checkpoints:
checkpoint_dir = None
plotting(epochs=args.epochs, losses_trend=losses_trend, checkpoint_dir=checkpoint_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
choices=['blindstateless', 'blindstateful', 'matransformer'],
required=True,
help="Type of the model (options: 'blindstateless', 'blindstateful', 'matransformer')")
parser.add_argument(
"--data",
type=str,
required=True,
help="Path to preprocessed training data file .dat")
parser.add_argument(
"--eval",
type=str,
required=True,
help="Path to preprocessed eval data file .dat")
parser.add_argument(
"--metadata_ids",
type=str,
required=True,
help="Path to metadata ids file")
parser.add_argument(
"--generative_vocab",
type=str,
required=True,
help="Path to generative vocabulary file")
parser.add_argument(
"--batch_size",
required=True,
type=int,
help="Batch size")
parser.add_argument(
"--epochs",
required=True,
type=int,
help="Number of epochs")
parser.add_argument(
"--from_checkpoint",
type=str,
required=False,
default=None,
help="Path to checkpoint to load")
parser.add_argument(
"--checkpoints",
action='store_true',
default=False,
required=False,
help="Flag to enable checkpoint saving for best model, logs and plots")
parser.add_argument(
"--cuda",
action='store_true',
default=False,
required=False,
help="flag to use cuda")
args = parser.parse_args()
if not args.checkpoints:
print('************ NO CHECKPOINT SAVE !!! ************')
train_dataset = FastDataset(dat_path=args.data, metadata_ids_path= args.metadata_ids, distractors_sampling=train_conf['distractors_sampling'])
dev_dataset = FastDataset(dat_path=args.eval, metadata_ids_path= args.metadata_ids, distractors_sampling=train_conf['distractors_sampling']) #? sampling on eval
print('TRAIN DATA LEN: {}'.format(len(train_dataset)))
device = torch.device('cuda:0'.format(args.cuda) if torch.cuda.is_available() and args.cuda else 'cpu')
train(train_dataset, dev_dataset, args, device)
| 15,568 | 42.733146 | 186 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/dataset/processed_dataset.py
|
import pdb
import random
import numpy as np
import torch
from torch.utils.data import Dataset
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
class FastDataset(Dataset):
"""Dataset with preprocessed data for response generation subtask
self.data.keys() = dict_keys(['dial_ids', 'turns', 'utterances', 'histories', 'actions',
'attributes', 'visual_contexts', 'seq_lengths', 'candidates'])
"""
def __init__(self, dat_path, metadata_ids_path, retrieval=False, distractors_sampling=-1):
super(FastDataset, self).__init__()
self.data = torch.load(dat_path)
self.retrieval = retrieval
if not retrieval:
self.data['data_dict'].pop('candidates', None)
self.data['data_dict'].pop('attributes', None)
self.metadata = torch.load(metadata_ids_path)
self.dataset_name = 'SIMMC'
self.task = 'response_retrieval'
self.distractors_sampling = distractors_sampling
def __getitem__(self, index):
"""
candidates = []
if self.retrieval and self.distractors_sampling >= 0:
samples = random.sample(range(1, 100), self.distractors_sampling)
# the first is always the ground truth
candidates.append(self.data['data_dict']['candidates'][index][0])
for sample in samples:
candidates.append(self.data['data_dict']['candidates'][index][sample])
assert len(candidates) == 1 + self.distractors_sampling, 'Invalid size of candidate list after sampling'
else:
candidates = self.data['data_dict']['candidates'][index]
"""
focus_id = self.data['data_dict']['focus'][index]
focus_pos = self.metadata['id2pos'][focus_id]
if self.data['turns'][index] != 0:
assert self.data['turns'][index] == self.data['data_dict']['history'][index]['input_ids'].shape[0], 'Number of turns and history length do not correpond'
ret_tuple = (self.data['dial_ids'][index],
self.data['turns'][index],
self.data['data_dict']['utterances']['input_ids'][index],
self.data['data_dict']['utterances']['attention_mask'][index],
self.data['data_dict']['utterances']['token_type_ids'][index],
self.data['data_dict']['responses']['input_ids'][index],
self.data['data_dict']['responses']['attention_mask'][index],
self.data['data_dict']['responses']['token_type_ids'][index],
self.data['data_dict']['attributes'][index],
#self.data['data_dict']['history'][index],
#self.data['data_dict']['actions'][index],
#self.data['data_dict']['attributes'][index],
self.metadata['items_tensors']['input_ids'][focus_pos],
self.metadata['items_tensors']['attention_mask'][focus_pos],
self.metadata['items_tensors']['token_type_ids'][focus_pos])
if self.retrieval:
ret_tuple += (self.data['data_dict']['candidates']['input_ids'][index],
self.data['data_dict']['candidates']['attention_mask'][index],
self.data['data_dict']['candidates']['token_type_ids'][index])
return ret_tuple
def create_id2turns(self):
"""used to create the eval dict during evaluation phase
"""
self.id2turns = {}
for dial_id in self.data['dial_ids']:
if dial_id in self.id2turns:
self.id2turns[dial_id] += 1
else:
self.id2turns[dial_id] = 1
def __len__(self):
#_DATA_PERC = 25
#frac = int(self.data['data_dict']['utterances']['input_ids'].shape[0] * (_DATA_PERC/100))
#return frac
return self.data['data_dict']['utterances']['input_ids'].shape[0]
def __str__(self):
return '{}_subtask({})'.format(self.dataset_name, self.task)
| 4,071 | 43.26087 | 165 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/dataset/__init__.py
|
from .processed_dataset import FastDataset
| 42 | 42 | 42 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/models/bert.py
|
import pdb
import torch
import torch.nn as nn
from transformers import BertConfig, BertModel
class BertEncoder(nn.Module):
def __init__(self, pretrained, freeze=False):
super(BertEncoder, self).__init__()
configuration = BertConfig()
self.bert = BertModel(config=configuration).from_pretrained(pretrained)
self.configuration = self.bert.config
if freeze:
for p in self.bert.parameters():
p.requires_grad = False
def forward(self, input, input_mask, input_token_type):
out_all, _ = self.bert(input_ids=input,
attention_mask=input_mask,
token_type_ids=input_token_type)
return out_all
| 728 | 29.375 | 79 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/models/embednets.py
|
import pdb
import numpy as np
import torch
import torch.nn as nn
from spellchecker import SpellChecker
class ItemEmbeddingNetwork(nn.Module):
"""Base class for word embedding layer initialization and weights loading
Args:
nn (torch.nn.Module): inherits from torch.nn.Module
"""
def __init__(self, item_embeddings_path, freeze=False):
super(ItemEmbeddingNetwork, self).__init__()
raw_data = np.load(item_embeddings_path, allow_pickle=True)
raw_data = dict(raw_data.item())
self.item2id = {}
for idx, item in enumerate(raw_data['item_ids']):
self.item2id[item] = idx
self.embedding_dim = raw_data['embedding_size']
fields_embeddings = np.stack([item_embs[0] for item_embs in raw_data['embeddings']])
values_embeddings = np.stack([item_embs[1] for item_embs in raw_data['embeddings']])
fields_embedding_weights = torch.tensor(fields_embeddings)
values_embedding_weights = torch.tensor(values_embeddings)
pdb.set_trace()
assert fields_embedding_weights.shape[0] == values_embedding_weights.shape[0], 'Number of fields and values embedding does not match'
assert fields_embedding_weights.shape[-1] == values_embedding_weights.shape[-1] and fields_embedding_weights.shape[-1] == self.embedding_dim,\
'Real embedding dimension does not match the declared one'
num_embeddings = fields_embedding_weights.shape[0]
self.fields_embedding_layer = nn.Embedding(num_embeddings, self.embedding_dim)
self.fields_embedding_layer.load_state_dict({'weight': fields_embedding_weights})
self.values_embedding_layer = nn.Embedding(num_embeddings, self.embedding_dim)
self.values_embedding_layer.load_state_dict({'weight': values_embedding_weights})
if freeze:
for p in self.fields_embedding_layer.parameters():
p.requires_grad = False
for p in self.values_embedding_layer.parameters():
p.requires_grad = False
def forward(self, fields_ids, values_ids):
return self.fields_embedding_layer(fields_ids), self.values_embedding_layer(values_ids)
class WordEmbeddingNetwork(nn.Module):
"""Base class for word embedding layer initialization and weights loading
Args:
nn (torch.nn.Module): inherits from torch.nn.Module
"""
def __init__(self, word_embeddings_path, word2id, pad_token, unk_token, OOV_corrections=False, freeze=False):
super(WordEmbeddingNetwork, self).__init__()
self.pad_token = pad_token
self.unk_token = unk_token
self.corrected_flag = OOV_corrections
self.word2id = word2id
self.embedding_file = word_embeddings_path.split('/')[-1]
self.load_embeddings_from_file(word_embeddings_path)
embedding_weights = self.get_embeddings_weights(OOV_corrections)
num_embeddings, self.embedding_dim = embedding_weights.shape
self.embedding_layer = nn.Embedding(num_embeddings, self.embedding_dim)
self.embedding_layer.load_state_dict({'weight': embedding_weights})
if freeze:
for p in self.embedding_layer.parameters():
p.requires_grad = False
def forward(self, batch):
return self.embedding_layer(batch)
def load_embeddings_from_file(self, embeddings_path):
self.glove = {}
with open(embeddings_path) as fp:
for l in fp:
line_tokens = l.split()
word = line_tokens[0]
if word in self.glove:
raise Exception('Repeated words in {} embeddings file'.format(embeddings_path))
vector = np.asarray(line_tokens[1:], "float32")
self.glove[word] = vector
self.embedding_size = vector.size
def get_embeddings_weights(self, OOV_corrections):
#if OOV_corrections:
# dataset_vocabulary = self.correct_spelling(dataset_vocabulary)
matrix_len = len(self.word2id)
weights_matrix = np.zeros((matrix_len, self.embedding_size))
# set pad and unknow ids
pad_id = self.word2id[self.pad_token]
unk_id = self.word2id[self.unk_token]
weights_matrix[pad_id] = np.zeros(shape=(self.embedding_size, ))
weights_matrix[unk_id] = np.random.normal(scale=0.6, size=(self.embedding_size, ))
for idx, word in enumerate(self.word2id):
if word in self.glove:
weights_matrix[idx] = self.glove[word]
return torch.tensor(weights_matrix, dtype=torch.float32)
def correct_spelling(self, dataset_vocabulary):
#todo fix: now dataset_vocabulary is a map, not a set (get the .keys())
oov = []
self.corrections = {}
checker = SpellChecker()
vocab_copy = copy.deepcopy(dataset_vocabulary)
for word in vocab_copy:
if word not in self.glove:
oov.append(word)
corrected_w = checker.correction(word)
if corrected_w in self.glove:
# the word with typos is assigned to the same id of the correspondant word after the correction
try:
self.word2id[word] = self.word2id[corrected_w] #TODO fix: word2id is still empty at this point
except:
pdb.set_trace()
self.corrections[word] = corrected_w
dataset_vocabulary.remove(word)
#print(oov)
#print(corrections.values())
return dataset_vocabulary
| 5,715 | 39.828571 | 150 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/models/old_encoder.py
|
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class SingleEncoder(nn.Module):
def __init__(self,
input_size,
hidden_size,
dropout_prob,
encoder_heads,
embedding_net):
super(SingleEncoder, self).__init__()
self.word_embeddings_layer = embedding_net
self.sentence_encoder = SentenceEncoder(emb_dim=input_size,
hidden_size=hidden_size,
bidirectional=True,
dropout_prob=dropout_prob)
self.memory_net = MemoryNet(in_features=hidden_size,
memory_hidden_size=hidden_size,
dropout_prob=dropout_prob)
#for h heads: d_k == d_v == emb_dim/h
self.triton_attention = Triton(in_features=hidden_size,
d_k=hidden_size//encoder_heads,
d_v=hidden_size//encoder_heads,
d_f=hidden_size//2,
n_heads=encoder_heads,
n_layers=1,
dropout_prob=dropout_prob)
self.layerNorm = nn.LayerNorm(hidden_size)
self.dropout = nn.Dropout(p=dropout_prob)
def forward(self, utterances, history, focus_items, seq_lengths=None):
# todo use attributes to enforce the user utterance?
# u_t shape [BATCH_SIZE x 2MEMORY_HIDDEN_SIZE], u_t_all shape [BATCH_SIZE x MAX_SEQ_LEN x 2MEMORY_HIDDEN_SIZE]
embedded_utt_tensor = self.word_embeddings_layer(utterances)
u_t, u_t_all = self.sentence_encoder(embedded_utt_tensor, seq_lengths)
#h_t shape h_t[<sample_in_batch>].shape = [N_TURNSx300] or [] if no history
embedded_hist_tensor = []
for history_sample in history:
if not len(history_sample):
embedded_hist_tensor.append([])
else:
embedded_hist_tensor.append(self.word_embeddings_layer(history_sample))
h_t = []
for h_embedding in embedded_hist_tensor:
if not len(h_embedding):
h_t.append([])
else:
h_t.append(self.sentence_encoder(h_embedding)[0])
#embedded_vis_tensor shape v_t[<sample_in_batch>][0/1].shape = [N_FIELDSx300]
#v_t shape [<sample_in_batch>]x300
#v_t_tilde contains contextual embedding for each item in the batch. Shape [Bx300]
v_t = self.encode_v_context(focus_items)
v_t_tilde = torch.stack([self.triton_attention(utt, v[0], v[1]) for utt, v in zip(u_t, v_t)])
# compute history context
h_t_tilde = []
for idx in range(u_t.shape[0]):
if not len(h_t[idx]):
h_t_tilde.append(u_t[idx])
else:
h_t_tilde.append(self.memory_net(u_t[idx], h_t[idx]))
h_t_tilde = torch.stack(h_t_tilde)
return u_t_all, v_t_tilde, h_t_tilde
def encode_v_context(self, focus_images):
v_batch = []
for keys, values in focus_images:
k_ht, _ = self.sentence_encoder(self.word_embeddings_layer(keys))
v_ht, _ = self.sentence_encoder(self.word_embeddings_layer(values))
v_batch.append([k_ht, v_ht])
return v_batch
def __str__(self):
return super().__str__()
class SentenceEncoder(nn.Module):
def __init__(self, emb_dim, hidden_size, dropout_prob, bidirectional=False):
super(SentenceEncoder, self).__init__()
self.encoder = nn.LSTM(emb_dim,
hidden_size,
batch_first=True,
bidirectional=bidirectional)
in_features = 2*hidden_size if bidirectional else hidden_size
self.mlp = nn.Linear(in_features=in_features, out_features=hidden_size)
self.dropout = nn.Dropout(p=dropout_prob)
self.layerNorm = nn.LayerNorm(hidden_size)
def forward(self, seq, seq_lengths=None):
if seq_lengths is not None:
# pack padded sequence
input_seq = pack_padded_sequence(seq, seq_lengths, batch_first=True) #seq_lengths.cpu().numpy()
else:
input_seq = seq
#to call every forward if DataParallel is used. Otherwise only once inside __init__()
self.encoder.flatten_parameters()
sentences_outs, (h_t, c_t) = self.encoder(input_seq)
#concat right and left hidden states of the last layer
bidirectional_h_t = torch.cat((h_t[-2], h_t[-1]), dim=-1)
if seq_lengths is not None:
# unpack padded sequence
sentences_outs, input_sizes = pad_packed_sequence(sentences_outs, batch_first=True)
mlp_out = self.mlp(bidirectional_h_t)
out = self.layerNorm(self.dropout(mlp_out))
return out, sentences_outs
class MemoryNet(nn.Module):
def __init__(self, in_features, memory_hidden_size, dropout_prob):
super(MemoryNet, self).__init__()
self.memory_hidden_size = memory_hidden_size
self.query_encoder = nn.Linear(in_features=in_features, out_features=memory_hidden_size)
self.memory_encoder = nn.Linear(in_features=in_features, out_features=memory_hidden_size)
self.dropout = nn.Dropout(p=dropout_prob)
self.layerNorm = nn.LayerNorm(memory_hidden_size)
def forward(self, input, context, device='cpu'):
query = self.query_encoder(input)
memory = self.memory_encoder(context) + self.get_positional_embeddings(context.shape[0], self.memory_hidden_size).to(context.device)
attn_logits = torch.matmul(query, torch.transpose(memory, 0, 1))/math.sqrt(self.memory_hidden_size)
attn_scores = F.softmax(attn_logits, -1)
weighted_memory = torch.sum(attn_scores[:, None] * memory, dim=0)
out = self.layerNorm(query + self.dropout(weighted_memory))
return out
def get_positional_embeddings(self, n_position, emb_dim):
"""Create positional embeddings (from "Attention Is All You Need", Vaswani et al. 2017)
Args:
n_position (int): number of elements in the sequence
emb_dim (int): size of embeddings
Returns:
toch.FloatTensor: a positional embedding with shape [N_POSITION x EMB_DIM]
"""
# keep dim 0 for padding token position encoding zero vector
position_enc = np.array([
[pos / np.power(10000, 2 * (k // 2) / emb_dim) for k in range(emb_dim)]
if pos != 0 else np.zeros(emb_dim) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return torch.from_numpy(position_enc).type(torch.FloatTensor)
# Triton, trident? Not self attention! Triplet as input q, k, v belonging to different conceptual sets
class Triton(nn.Module):
def __init__(self, in_features, d_k, d_v, d_f, n_heads, n_layers, dropout_prob):
super(Triton, self).__init__()
assert n_layers >= 1, 'Not acceptable number of layers: {}'.format(n_layers)
self.n_layers = n_layers
#self.encoders = nn.ModuleList([TritonEncoder(emb_dim, d_k, d_v, n_heads) for _ in range(n_layers)])
#encoders = [TritonEncoder(emb_dim, d_k, d_v, d_f, n_heads) for _ in range(n_layers)]
#self.encoders = nn.Sequential(*encoders)
#todo change to allow multiple layers. Problem: sequential take only 1 input, so pack inputs to a tuple.
self.encoders = TritonEncoder(in_features, d_k, d_v, d_f, n_heads, dropout_prob)
def forward(self, ut, kt, vt):
out = self.encoders(ut, kt, vt)
return out
def __str__(self):
return super().__str__()
class TritonEncoder(nn.Module):
def __init__(self, in_features, d_k, d_v, d_f, n_heads, dropout_prob):
super(TritonEncoder, self).__init__()
self.multihead_attn = TritonMultiHeadCrossAttention(in_features, d_k, d_v, n_heads)
self.dropout = nn.Dropout(p=dropout_prob)
self.layerNorm = nn.LayerNorm(in_features)
self.fnn = nn.Sequential(nn.Linear(in_features=in_features, out_features=d_f),
nn.ReLU(),
nn.Linear(in_features=d_f, out_features=in_features))
def forward(self, u_t, k_t, v_t):
multihead_out = self.multihead_attn(u_t, k_t, v_t)
# residual connection is performed after the dropout and before normalization in (Vaswani et al.)
norm_out = self.layerNorm(self.dropout(multihead_out))
enc_out = self.fnn(norm_out)
out = self.layerNorm(self.dropout(multihead_out))
return out
def __str__(self):
return super().__str__()
class TritonMultiHeadCrossAttention(nn.Module):
def __init__(self, in_features, d_k, d_v, n_heads):
super(TritonMultiHeadCrossAttention, self).__init__()
self.n_heads = n_heads
self.attn_heads = nn.ModuleList([TritonCrossAttentionHead(in_features, d_k, d_v) for _ in range(n_heads)])
def forward(self, u_t, k_t, v_t):
attn_outs = []
heads_out = torch.cat([attn_head(u_t, k_t, v_t) for attn_head in self.attn_heads])
return heads_out
def __str__(self):
return super().__str__()
class TritonCrossAttentionHead(nn.Module):
def __init__(self, in_features, d_k, d_v):
super(TritonCrossAttentionHead, self).__init__()
self.d_k = d_k
self.d_v = d_v
self.Q = nn.Linear(in_features, d_k)
self.K = nn.Linear(in_features, d_k)
self.V = nn.Linear(in_features, d_v)
def forward(self, u_t, k_t, v_t):
query = self.Q(u_t)
key = self.K(k_t)
value = self.V(v_t)
attn_logits = torch.matmul(query, torch.transpose(key, 0, 1))/ math.sqrt(self.d_k)
attn_scores = F.softmax(attn_logits, -1)
out = torch.sum(attn_scores[:, None] * value, dim=0)
return out
def __str__(self):
return super().__str__()
| 10,368 | 36.981685 | 140 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/models/decoder.py
|
import math
import pdb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
#the value with which to mask the attention
# DO NOT USE '-INF' BECAUSE IT WILL GENERATE NaN AFTER SOFTMAX FOR PADDING ROWS (filled with all 0's)
_MASKING_VALUE=-1e30
class Decoder(nn.Module):
def __init__(self,
d_model,
d_enc,
d_context,
d_k,
d_v,
d_f,
n_layers,
n_heads,
embedding_net,
input_vocab_size,
out_vocab_size,
dropout_prob):
super(Decoder, self).__init__()
self.d_model = d_model
self.input_vocab_size = input_vocab_size
self.out_vocab_size = out_vocab_size
self.n_layers = n_layers
self.embedding_layer = TransformerEmbedding(input_vocab_size, d_model)
#self.embedding_layer = embedding_net
self.decoder_layers = nn.ModuleList([
MultiAttentiveDecoder(d_model=d_model,
d_enc=d_enc,
d_context=d_context,
d_k=d_k,
d_v=d_v,
d_f=d_f,
n_heads=n_heads,
dropout_prob=dropout_prob)
for _ in range(n_layers)
])
self.out_layer = nn.Sequential(nn.Linear(d_model, d_model//2),
nn.ReLU(),
nn.Linear(d_model//2, d_model//4),
nn.ReLU(),
nn.Linear(d_model//4, self.out_vocab_size))
def forward(self,
input_batch,
encoder_out,
#history_context,
visual,
enc_mask,
visual_mask,
input_mask=None):
device = input_batch.device
if input_mask is None:
input_mask = torch.ones(input_batch.shape, dtype=torch.long).to(device)
assert input_batch.dim() == 2, 'Expected tensor with 2 dimensions but got {}'.format(input_batch.dim())
assert encoder_out.dim() == 3, 'Expected tensor with 2 dimensions but got {}'.format(encoder_out.dim())
assert input_mask.dim() == 2, 'Expected tensor with 2 dimensions but got {}'.format(input_mask.dim())
assert enc_mask.dim() == 2, 'Expected tensor with 2 dimensions but got {}'.format(enc_mask.dim())
assert input_batch.shape[0] == encoder_out.shape[0], 'Inconsistent batch size'
#assert input_batch.shape[0] == history_context.shape[0], 'Inconsistent batch size'
#assert input_batch.shape[0] == visual_context.shape[0], 'Inconsistent batch size'
assert input_batch.shape[0] == input_mask.shape[0], 'Inconsistent batch size'
assert input_batch.shape[0] == enc_mask.shape[0], 'Inconsistent batch size'
assert input_batch.shape == input_mask.shape, 'Inconsistent mask size, {} and {}'.format(input_batch.shape, input_mask.shape)
assert encoder_out.shape[:2] == enc_mask.shape, 'Inconsistent mask size, {} and {}'.format(encoder_out.shape[:2], enc_mask.shape)
assert input_batch.device == encoder_out.device, 'Different devices'
#assert input_batch.device == history_context.device, 'Different devices'
#assert input_batch.device == visual_context.device, 'Different devices'
assert input_batch.device == input_mask.device, 'Different devices'
assert input_batch.device == enc_mask.device, 'Different devices'
#input mask is the padding mask
#self attention mask is a mask resulting from the combination of attention and padding masks
#the attention mask is an upper triangular mask that avoid each word to attend to the future
#the padding mask instead is contains 0's for the entries containing padding in the correspondent sequence
#the resulting matrix avoids each word to attend to future and delete the attention between paddings
if input_mask is not None:
self_attn_mask = torch.tensor((np.triu(np.ones((input_batch.shape[0], input_batch.shape[1], input_batch.shape[1])), k=1) == 0), dtype=torch.long).to(device)
self_attn_mask &= input_mask[:, :, None]
else:
#this is used during inference, where the words are given one by one (future is not present)
self_attn_mask = torch.ones((input_batch.shape[0], input_batch.shape[1], input_batch.shape[1]))
#encoder attention mask avoid 2 things:
# the decoder to attend to encoder padding (to apply row wise)
# to use the decoder padding as query (to apply column wise)
enc_attn_mask = torch.zeros((input_mask.shape[0], input_mask.shape[1], enc_mask.shape[1])).to(device)
enc_attn_mask[:, :] = enc_mask[:, None, :]
enc_attn_mask = enc_attn_mask.transpose(1, 2)
enc_attn_mask[:, :] *= input_mask[:, None, :]
enc_attn_mask = enc_attn_mask.transpose(1, 2)
visual_attn_mask = torch.zeros((input_mask.shape[0], input_mask.shape[1], visual_mask.shape[1])).to(device)
visual_attn_mask[:, :] = visual_mask[:, None, :]
visual_attn_mask = visual_attn_mask.transpose(1, 2)
visual_attn_mask[:, :] *= input_mask[:, None, :]
visual_attn_mask = visual_attn_mask.transpose(1, 2)
x = self.embedding_layer(input_batch)
"""
x = torch.zeros((input_batch.shape[0], input_batch.shape[1], self.d_model), dtype=torch.float32).to(device)
#todo x[:, 0] = self.embedding_layer(self.start_id)
for i in range(input_batch.shape[-1]):
curr_embs = self.embedding_layer(input=input_batch[:, :i+1],
input_mask=self_attn_mask[:, i, :i+1],
input_token_type=torch.zeros(input_batch[:, :i+1].shape, dtype=torch.long).to(device))
x[:, i] = curr_embs[:, i]
"""
#todo from here
#pdb.set_trace()
for idx in range(len(self.decoder_layers)):
#pdb.set_trace()
x = self.decoder_layers[idx](input_embs=x,
enc_out=encoder_out,
#history_context=history_context,
visual=visual,
self_attn_mask=self_attn_mask,
enc_attn_mask=enc_attn_mask,
visual_attn_mask=visual_attn_mask)
#pdb.set_trace()
vocab_logits = self.out_layer(x)
return vocab_logits
"""
if self.training:
for _ in range(gt_sequences.shape[1]):
#curr_out, (curr_ht, curr_ct) = self.decoder_module(prev_out, (prev_ht, prev_ct))
#logits = self.out_layer(curr_out)
#todo save the logits (need to know the sentences lengths)
#pred_tok = torch.argmax(F.softmax(logits))
prev_out = gt_sequences['tok_embeddings'][:, 0]
#prev_ht = curr_ht
#prev_ct = curr_ct
#todo end tok at the end
return None
else:
#here beam search
pass
"""
def __str__(self):
return super().__str__()
class TransformerEmbedding(nn.Module):
def __init__(self, vocab_size, d_model, embedding_net=None):
super(TransformerEmbedding, self).__init__()
if embedding_net is not None:
assert embedding_net.embedding_dim == d_model, 'Embedding size of {} does not match d_model of {}'.format(embedding_net.embedding_dim, d_model)
self.embedding_net = embedding_net
else:
self.embedding_net = nn.Embedding(vocab_size, d_model)
self.d_model = d_model
self.positional_embeddings = self.init_positional(max_seq_len=512,
emb_dim=d_model)
def forward(self, input_seq):
assert input_seq.dim() == 2, 'Expected tensor with 2 dimensions but got {}'.format(input_seq.dim())
batch_size = input_seq.shape[0]
input_len = input_seq.shape[1]
device = input_seq.device
pos_emb = self.positional_embeddings[:input_len].to(device)
#enforce the embedding to prevent loose on information by multiplying it with a scalar
return self.embedding_net(input_seq)*math.sqrt(self.d_model) + pos_emb[None, :, :]
def init_positional(self, max_seq_len, emb_dim):
"""Create positional embeddings (from "Attention Is All You Need", Vaswani et al. 2017)
Args:
n_position (int): number of elements in the sequence
emb_dim (int): size of embeddings
Returns:
toch.FloatTensor: a positional embedding with shape [N_POSITION x EMB_DIM]
"""
# keep dim 0 for padding token position encoding zero vector
position_enc = np.array([
[pos / np.power(10000, 2 * (k // 2) / emb_dim) for k in range(emb_dim)]
if pos != 0 else np.zeros(emb_dim) for pos in range(max_seq_len)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return torch.from_numpy(position_enc).type(torch.FloatTensor)
def __str__(self):
return super().__str__()
class MultiAttentiveDecoder(nn.Module):
def __init__(self,
d_model,
d_enc,
d_context,
d_k,
d_v,
d_f,
n_heads,
dropout_prob):
super(MultiAttentiveDecoder, self).__init__()
#multi head self attention
#encoder attention
#fusion layer
self.multi_head_self = MultiHeadSelfAttention(d_model=d_model, d_k=d_k, d_v=d_v, n_heads=n_heads, dropout_prob=dropout_prob)
self.multi_head_enc = MultiHeadEncoderAttention(d_model=d_model, d_enc=d_enc, d_k=d_k, d_v=d_v, n_heads=n_heads, dropout_prob=dropout_prob)
self.multi_head_item = MultiHeadEncoderAttention(d_model=d_model, d_enc=d_enc, d_k=d_k, d_v=d_v, n_heads=n_heads, dropout_prob=dropout_prob)
#self.fusion_module = FusionModule(d_model=d_model, d_context=d_context, dropout_prob=dropout_prob)
self.layerNorm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(p=dropout_prob)
self.fnn = nn.Sequential(nn.Linear(in_features=d_model, out_features=d_f),
nn.ReLU(),
nn.Dropout(p=dropout_prob),
nn.Linear(in_features=d_f, out_features=d_model))
def forward(self,
input_embs,
enc_out,
#history_context,
visual,
visual_attn_mask,
self_attn_mask,
enc_attn_mask):
#pdb.set_trace()
self_attn_out = self.multi_head_self(input_embs, self_attn_mask)
sub_out1 = self.layerNorm(input_embs + self.dropout(self_attn_out))
enc_attn_out = self.multi_head_enc(sub_out1, enc_out, enc_attn_mask)
sub_out2 = self.layerNorm(sub_out1 + self.dropout(enc_attn_out))
item_attn_out = self.multi_head_item(sub_out2, visual, visual_attn_mask)
sub_out3 = self.layerNorm(sub_out2 + self.dropout(item_attn_out))
#fusion_out = self.fusion_module(sub_out2, history_context, visual_context)
#sub_out3 = self.layerNorm(sub_out2 + self.dropout(fusion_out))
fnn_out = self.fnn(sub_out3) #todo change to subout3
sub_out4 = self.layerNorm(sub_out3 + self.dropout(fnn_out)) #todo change to subout3
return sub_out4
def __str__(self):
return super().__str__()
class FusionModule(nn.Module):
def __init__(self, d_model, d_context, dropout_prob):
super(FusionModule, self).__init__()
self.d_context = d_context
self.d_model = d_model
d_cat = d_model+d_context
self.h_stream = nn.Sequential(nn.Linear(in_features=d_cat, out_features=d_cat//2),
nn.Linear(in_features=d_cat//2, out_features=d_cat//4),
nn.ReLU(),
nn.Dropout(p=dropout_prob),
nn.Linear(in_features=d_cat//4, out_features=d_cat//2),
nn.Linear(in_features=d_cat//2, out_features=d_cat))
self.v_stream = nn.Sequential(nn.Linear(in_features=d_cat, out_features=d_cat//2),
nn.Linear(in_features=d_cat//2, out_features=d_cat//4),
nn.ReLU(),
nn.Dropout(p=dropout_prob),
nn.Linear(in_features=d_cat//4, out_features=d_cat//2),
nn.Linear(in_features=d_cat//2, out_features=d_cat))
self.fusion_stream = nn.Sequential(nn.Linear(in_features=2*d_cat, out_features=d_cat),
nn.ReLU(),
nn.Dropout(p=dropout_prob),
nn.Linear(in_features=d_cat, out_features=d_model))
def forward(self, decoder_batch, history_cntx, visual_cntx):
assert decoder_batch.dim() == 3, 'Expected 3 dimensions, got {}'.format(decoder_batch.dim())
assert history_cntx.shape[-1] == self.d_context, 'History dimension {} does not match d_context of {}'.format(history_cntx.shape[-1], self.d_context)
assert history_cntx.shape[-1] == visual_cntx.shape[-1], 'History and visual context sizes do not match'
h_in = torch.cat((decoder_batch, history_cntx.unsqueeze(1).expand(-1, decoder_batch.shape[1], -1)), dim=-1)
v_in = torch.cat((decoder_batch, visual_cntx.unsqueeze(1).expand(-1, decoder_batch.shape[1], -1)), dim=-1)
h_out = self.v_stream(h_in)
v_out = self.h_stream(v_in)
fuse_in = torch.cat((h_out, v_out), dim=-1)
fuse_out = self.fusion_stream(fuse_in)
return fuse_out
class MultiHeadSelfAttention(nn.Module):
def __init__(self, d_model, d_k, d_v, n_heads, dropout_prob):
super(MultiHeadSelfAttention, self).__init__()
self.n_heads = n_heads
self.attn_heads = nn.ModuleList([SelfAttention(d_model, d_k, d_v, dropout_prob) for _ in range(n_heads)])
def forward(self, input_batch, attn_mask):
return torch.cat([attn_head(input_batch, attn_mask) for attn_head in self.attn_heads], dim=-1)
def __str__(self):
return super().__str__()
class MultiHeadEncoderAttention(nn.Module):
def __init__(self, d_model, d_enc, d_k, d_v, n_heads, dropout_prob):
super(MultiHeadEncoderAttention, self).__init__()
self.n_heads = n_heads
self.attn_heads = nn.ModuleList([EncoderAttention(d_model, d_enc, d_k, d_v, dropout_prob) for _ in range(n_heads)])
def forward(self, decoder_batch, encoder_out, attn_mask):
return torch.cat([attn_head(decoder_batch, encoder_out, attn_mask) for attn_head in self.attn_heads], dim=-1)
def __str__(self):
return super().__str__()
class SelfAttention(nn.Module):
def __init__(self, d_model, d_k, d_v, dropout_prob):
super(SelfAttention, self).__init__()
self.d_k = d_k
self.d_v = d_v
self.Q = nn.Linear(d_model, d_k)
self.K = nn.Linear(d_model, d_k)
self.V = nn.Linear(d_model, d_v)
self.dropout = nn.Dropout(p=dropout_prob)
def forward(self, input_batch, attn_mask):
query = self.Q(input_batch)
key = self.K(input_batch)
value = self.V(input_batch)
attn_logits = torch.matmul(query, torch.transpose(key, -2, -1))/ math.sqrt(self.d_k)
#mask padding and future words with a great negative value.
# DO NOT USE '-INF' VALUES BECAUSE THEY WILL PRODUCE NaN VALUES AFTER SOFTMAX FOR PADDING WORDS
masked_attn_logits = attn_logits.masked_fill(attn_mask==0, value=_MASKING_VALUE)
attn_scores = F.softmax(masked_attn_logits, -1)
attn_scores = self.dropout(attn_scores)
out = torch.matmul(attn_scores, value)
return out
def __str__(self):
return super().__str__()
class EncoderAttention(nn.Module):
def __init__(self, d_model, d_enc, d_k, d_v, dropout_prob):
super(EncoderAttention, self).__init__()
self.d_k = d_k
self.d_v = d_v
self.Q = nn.Linear(d_model, d_k)
self.K = nn.Linear(d_enc, d_k)
self.V = nn.Linear(d_enc, d_v)
self.dropout = nn.Dropout(p=dropout_prob)
def forward(self, decoder_batch, encoder_out, attn_mask):
query = self.Q(decoder_batch)
key = self.K(encoder_out)
value = self.V(encoder_out)
attn_logits = torch.matmul(query, torch.transpose(key, -2, -1))/ math.sqrt(self.d_k)
#mask padding and future words with a great negative value.
# DO NOT USE '-INF' VALUES BECAUSE THEY WILL PRODUCE NaN VALUES AFTER SOFTMAX FOR PADDING WORDS
masked_attn_logits = attn_logits.masked_fill(attn_mask==0, value=_MASKING_VALUE)
attn_scores = F.softmax(masked_attn_logits, dim=-1)
attn_scores = self.dropout(attn_scores)
out = torch.matmul(attn_scores, value)
return out
def __str__(self):
return super().__str__()
| 17,844 | 42.630807 | 168 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/models/matransformer.py
|
import math
import pdb
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from .embednets import ItemEmbeddingNetwork, WordEmbeddingNetwork
from .decoder import Decoder
from .old_encoder import SingleEncoder
from .bert import BertEncoder
from transformers import BertTokenizer #todo remove
_MAX_INFER_LEN = 30
class MultiAttentiveTransformer(nn.Module):
def __init__(self,
pad_token,
start_token,
end_token,
unk_token, #? remove
seed,
dropout_prob,
n_decoders,
decoder_heads,
out_vocab,
freeze_bert=False,
beam_size=None,
retrieval_eval=False,
gen_eval=False,
mode='train',
device='cpu'):
torch.manual_seed(seed)
super(MultiAttentiveTransformer, self).__init__()
if mode == 'inference':
assert retrieval_eval is not None or gen_eval is not None, 'At least one among retrieval_eval and gen_eval must be activated during inference'
if gen_eval is not None:
assert beam_size is not None, 'Beam size need to be defined during generation inference'
self.mode = mode
self.beam_size = beam_size
self.retrieval_eval = retrieval_eval
self.gen_eval = gen_eval
self.bert2genid = out_vocab
#self.item_embeddings_layer = ItemEmbeddingNetwork(item_embeddings_path)
"""
self.word_embeddings_layer = WordEmbeddingNetwork(word_embeddings_path=word_embeddings_path,
word2id=word2id,
pad_token=pad_token,
unk_token=unk_token,
freeze=freeze_embeddings)
self.emb_dim = self.word_embeddings_layer.embedding_size
"""
self.bert = BertEncoder(pretrained='bert-base-uncased', freeze=freeze_bert)
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.vocab = self.tokenizer.vocab
self.id2word = {id: word for word, id in self.vocab.items()}
self.genid2word = {gen_id: self.tokenizer.convert_ids_to_tokens(bert_id) for bert_id, gen_id in self.bert2genid.items()}
self.genid2bertid = {id: word for word, id in self.bert2genid.items()}
#start_id only presents in input
self.start_id = self.vocab[start_token]
#end_id only presents in output
self.end_id = self.bert2genid[self.vocab[end_token]]
#pad_id is the same between input and ouput
self.pad_id = self.vocab[pad_token]
self.unk_id = self.vocab[unk_token]
conf = self.bert.configuration
self.input_vocab_size = conf.vocab_size
self.output_vocab_size = len(out_vocab)
self.encoder_hidden_size = conf.hidden_size
"""
self.encoder = SingleEncoder(input_size=self.emb_dim,
hidden_size=hidden_size,
dropout_prob=dropout_prob,
encoder_heads=encoder_heads,
embedding_net=self.word_embeddings_layer)
"""
#for h heads: d_k == d_v == emb_dim/h
self.decoder = Decoder(d_model=self.encoder_hidden_size,
d_enc=self.encoder_hidden_size,
d_context=self.encoder_hidden_size,
d_k=self.encoder_hidden_size//decoder_heads,
d_v=self.encoder_hidden_size//decoder_heads,
d_f=self.encoder_hidden_size//2,
n_layers=n_decoders,
n_heads=decoder_heads,
embedding_net=self.bert,
input_vocab_size=self.input_vocab_size,
out_vocab_size=self.output_vocab_size,
dropout_prob=dropout_prob)
def forward(self,
utterances,
utterances_mask,
utterances_token_type,
responses,
responses_mask,
responses_token_type,
focus,
focus_mask,
focus_token_type,
history,
actions,
attributes,
candidates=None,
candidates_mask=None,
candidates_token_type=None,
candidates_targets=None,
seq_lengths=None):
"""The visual context is a list of visual contexts (a batch). Each visual context is, in turn, a list
of items. Each item is a list of (key, values) pairs, where key is a tensor containing the word ids
for the field name and values is a list of values where each value is a tensor of word ids.
type(visual_context[<sample_in_batch>][<item_n>][<field_n>]) -> tuple(tensor(key), [tensor(value)])
Args:
utterances ([type]): [description]
history ([type]): [description]
visual_context ([type]): [description]
seq_lengths ([type], optional): [description]. Defaults to None.
device (str, optional): [description]. Defaults to 'cpu'.
Returns:
[type]: [description]
"""
curr_device = utterances.device
if self.mode == 'inference':
assert utterances.shape[0] == 1, 'Only unitary batches allowed during inference'
#check batch size consistency (especially when using different gpus) and move list tensors to correct gpu
assert utterances.shape[0] == utterances_mask.shape[0], 'Inconstistent batch size'
assert utterances.shape[0] == utterances_token_type.shape[0], 'Inconstistent batch size'
assert utterances.shape[0] == responses.shape[0], 'Inconstistent batch size'
assert utterances.shape[0] == responses_mask.shape[0], 'Inconstistent batch size'
assert utterances.shape[0] == responses_token_type.shape[0], 'Inconstistent batch size'
assert utterances.shape[0] == focus.shape[0], 'Inconstistent batch size'
assert utterances.shape[0] == focus_mask.shape[0], 'Inconstistent batch size'
assert utterances.shape[0] == focus_token_type.shape[0], 'Inconstistent batch size'
if self.retrieval_eval:
assert candidates_targets is not None, 'Candidates have to be specified with retrieval_eval set to True'
assert attributes is not None, 'Attributes needed for semantic score computation'
u_t_all = self.bert(input=utterances,
input_mask=utterances_mask,
input_token_type=utterances_token_type)
v_t_tilde = self.bert(input=focus,
input_mask=focus_mask,
input_token_type=focus_token_type)
"""
u_t_all, v_t_tilde, h_t_tilde = self.encoder(utterances=utterances,
history=history,
focus_items=focus,
seq_lengths=seq_lengths)
"""
#decoding phase
if self.mode == 'train':
vocab_logits = self.decoder(input_batch=responses,
encoder_out=u_t_all,
#history_context=h_t_tilde,
visual=v_t_tilde,
input_mask=responses_mask,
enc_mask=utterances_mask,
visual_mask=focus_mask)
return vocab_logits
else:
infer_res = {}
if self.gen_eval:
#at inference time (NOT EVAL)
self.never_ending = 0
dec_args = {'encoder_out': u_t_all, 'enc_mask': utterances_mask, 'visual': v_t_tilde, 'visual_mask': focus_mask}
best_dict = self.beam_search(curr_seq=[self.start_id],
curr_score=0,
dec_args=dec_args,
best_dict={'seq': [], 'score': -float('inf')},
device=curr_device)
best_dict['string'] = self.tokenizer.decode([self.genid2bertid[id] for id in best_dict['seq']])
#print('Never-ending generated sequences: {}'.format(self.never_ending))
infer_res['generation'] = best_dict
if self.retrieval_eval:
#eval on retrieval task
#build a fake batch by expanding the tensors
vocab_logits = [
self.decoder(input_batch=pool,
encoder_out=u_t_all.expand(pool.shape[0], -1, -1),
#history_context=h_t_tilde.expand(pool.shape[0], -1),
visual=v_t_tilde.expand(pool.shape[0], -1, -1),
visual_mask=focus_mask.expand(pool.shape[0], -1),
input_mask=pool_mask,
enc_mask=utterances_mask.expand(pool.shape[0], -1))
for pool, pool_mask in zip(candidates, candidates_mask)
]
#candidates_scores shape: Bx100
candidates_scores = self.compute_candidates_scores(candidates_targets, vocab_logits, attributes)
infer_res['retrieval'] = candidates_scores
return infer_res
def beam_search(self, curr_seq, curr_score, dec_args, best_dict, device):
#pdb.set_trace()
if curr_seq[-1] == self.end_id or len(curr_seq) > _MAX_INFER_LEN:
assert len(curr_seq)-1 != 0, 'Division by 0 for generated sequence {}'.format(curr_seq)
#discard the start_id only. The probability of END token has to be taken into account instead.
norm_score = curr_score/(len(curr_seq)-1)
if norm_score > best_dict['score']:
best_dict['score'], best_dict['seq'] = curr_score, curr_seq[1:].copy() #delete the start_token
if len(curr_seq) > _MAX_INFER_LEN:
self.never_ending += 1
return best_dict
vocab_logits = self.decoder(input_batch=torch.tensor(curr_seq).unsqueeze(0).to(device), **dec_args).squeeze(0)
#take only the prediction for the last word
vocab_logits = vocab_logits[-1]
beam_ids = torch.argsort(vocab_logits, descending=True, dim=-1)[:self.beam_size].tolist()
lprobs = F.log_softmax(vocab_logits, dim=-1)
for curr_id in beam_ids:
curr_lprob = lprobs[curr_id].item()
best_dict = self.beam_search(curr_seq=curr_seq+[curr_id],
curr_score=curr_score+curr_lprob,
dec_args=dec_args,
best_dict=best_dict,
device=device)
return best_dict
def compute_candidates_scores(self, candidates_targets, vocab_logits, attributes):
"""The score of each candidate is the sum of the log-likelihood of each word, normalized by its length.
The score will be a negative value, longer sequences will be penalized without the normalization by length.
"""
scores = torch.zeros(candidates_targets.shape[:2])
for batch_idx, (pool_ids, pool_logits) in enumerate(zip(candidates_targets, vocab_logits)):
pool_lprobs = F.log_softmax(pool_logits, dim=-1)
for sentence_idx, (candidate_ids, candidate_lprobs) in enumerate(zip(pool_ids, pool_lprobs)):
curr_lprob = []
for candidate_word, words_probs in zip(candidate_ids, candidate_lprobs):
#until padding
if candidate_word.item() == self.pad_id:
break
curr_lprob.append(words_probs[candidate_word.item()].item())
scores[batch_idx, sentence_idx] = sum(curr_lprob)/len(curr_lprob)
#semantic score
candidate_string = self.tokenizer.decode([self.genid2bertid[id.item()] for id in candidate_ids])
semantic_score = [attr.lower() in candidate_string.lower() and len(attr) > 1 for attr in attributes]
if len(attributes):
scores[batch_idx, sentence_idx] += sum(semantic_score)/len(attributes)
return scores
def collate_fn(self, batch):
"""
This method prepares the batch for the LSTM: padding + preparation for pack_padded_sequence
Args:
batch (tuple): tuple of element returned by the Dataset.__getitem__()
Returns:
dial_ids (list): list of dialogue ids
turns (list): list of dialogue turn numbers
seq_tensor (torch.LongTensor): tensor with BxMAX_SEQ_LEN containing padded sequences of user transcript sorted by descending effective lengths
seq_lenghts: tensor with shape B containing the effective length of the correspondant transcript sequence
actions (torch.Longtensor): tensor with B shape containing target actions
attributes (torch.Longtensor): tensor with Bx33 shape containing attributes one-hot vectors, one for each sample.
"""
dial_ids = [item[0] for item in batch]
turns = [item[1] for item in batch]
utterances = torch.stack([item[2] for item in batch])
utterances_mask = torch.stack([item[3] for item in batch])
utterances_token_type = torch.stack([item[4] for item in batch])
responses = torch.stack([item[5] for item in batch])
responses_mask = torch.stack([item[6] for item in batch])
responses_token_type = torch.stack([item[7] for item in batch])
#history = [item[4] for item in batch]
#actions = torch.stack([item[5] for item in batch])
attributes = [item[8] for item in batch]
focus = torch.stack([item[9] for item in batch])
focus_mask = torch.stack([item[10] for item in batch])
focus_token_type = torch.stack([item[11] for item in batch])
if self.mode == 'train':
#creates target by shifting and converting id to output vocabulary
generative_targets = torch.cat((responses[:, 1:].clone().detach(), torch.zeros((responses.shape[0], 1), dtype=torch.long)), dim=-1)
for batch_idx, _ in enumerate(generative_targets):
for id_idx, curr_id in enumerate(generative_targets[batch_idx]):
if curr_id.item() == self.pad_id:
continue
if curr_id.item() not in self.bert2genid:
#dev test contains oov tokens
generative_targets[batch_idx][id_idx] = self.bert2genid[self.unk_id]
else:
generative_targets[batch_idx][id_idx] = self.bert2genid[curr_id.item()]
if self.mode == 'inference' and self.retrieval_eval:
candidates = torch.stack([item[12] for item in batch])
candidates_mask = torch.stack([item[13] for item in batch])
candidates_token_type = torch.stack([item[14] for item in batch])
candidates_targets = torch.cat((candidates[:, :, 1:].clone().detach(), torch.zeros((responses.shape[0], 100, 1), dtype=torch.long)), dim=-1)
for batch_idx, _ in enumerate(candidates_targets):
for pool_idx, curr_pool in enumerate(candidates_targets[batch_idx]):
for id_idx, curr_id in enumerate(candidates_targets[batch_idx][pool_idx]):
if curr_id.item() == self.pad_id:
continue
if curr_id.item() not in self.bert2genid:
candidates_targets[batch_idx][pool_idx][id_idx] = self.bert2genid[self.unk_id]
else:
candidates_targets[batch_idx][pool_idx][id_idx] = self.bert2genid[curr_id.item()]
assert utterances.shape[0] == len(dial_ids), 'Batch sizes do not match'
assert utterances.shape[0] == len(turns), 'Batch sizes do not match'
#assert len(utterances) == len(history), 'Batch sizes do not match'
#assert len(utterances) == len(actions), 'Batch sizes do not match'
#assert len(utterances) == len(attributes), 'Batch sizes do not match'
assert utterances.shape[0] == len(focus), 'Batch sizes do not match'
if self.mode == 'train':
assert responses.shape == generative_targets.shape, 'Batch sizes do not match'
if self.mode == 'inference' and self.retrieval_eval:
assert len(utterances) == candidates.shape[0], 'Batch sizes do not match'
batch_dict = {}
batch_dict['utterances'] = utterances
batch_dict['utterances_mask'] = utterances_mask
batch_dict['utterances_token_type'] = utterances_token_type
batch_dict['responses'] = responses
batch_dict['responses_mask'] = responses_mask
batch_dict['responses_token_type'] = responses_token_type
batch_dict['focus'] = focus
batch_dict['focus_mask'] = focus_mask
batch_dict['focus_token_type'] = focus_token_type
if self.retrieval_eval:
batch_dict['candidates'] = candidates
batch_dict['candidates_mask'] = candidates_mask
batch_dict['candidates_token_type'] = candidates_token_type
batch_dict['candidates_targets'] = candidates_targets
assert len(attributes) == 1, 'Batch size greater than 1 for attributes'
batch_dict['attributes'] = attributes[0]
ret_tuple = (dial_ids, turns, batch_dict)
if self.mode == 'train':
ret_tuple += (generative_targets,)
return ret_tuple
def __str__(self):
return super().__str__()
| 18,570 | 51.312676 | 155 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/models/__init__.py
|
from .blindstateless import BlindStatelessLSTM
from .matransformer import MultiAttentiveTransformer
| 99 | 49 | 52 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/models/blindstateless.py
|
import pdb
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from .embednets import WordEmbeddingNetwork
class BlindStatelessLSTM(nn.Module):
"""Implementation of a blind and stateless LSTM for action prediction. It approximates the probability distribution:
P(a_t | U_t)
Where a_t is the action and U_t the user utterance.
Args:
torch (WordEmbeddingBasedNetwork): inherits from WordEmbeddingBasedNetwork
Attributes:
self.corrections (dict): Mapping from dataset word to its corrections (the corrections is included in the vocabulary)
"""
_HIDDEN_SIZE = 600
def __init__(self, word_embeddings_path, word2id, pad_token, unk_token,
seed, OOV_corrections=False, freeze_embeddings=False):
"""
Glove download: https://nlp.stanford.edu/projects/glove/
Args:
embedding_path ([type]): [description]
"""
torch.manual_seed(seed)
super(BlindStatelessLSTM, self).__init__()
self.hidden_size = self._HIDDEN_SIZE
self.word_embeddings_layer = WordEmbeddingNetwork(word_embeddings_path=word_embeddings_path,
word2id=word2id,
pad_token=pad_token,
unk_token=unk_token,
OOV_corrections=OOV_corrections,
freeze=freeze_embeddings)
self.lstm = nn.LSTM(self.word_embeddings_layer.embedding_dim, self.hidden_size, batch_first=True)
self.dropout = nn.Dropout(p=0.5)
self.matching_layer = nn.Linear(in_features=2*self.hidden_size, out_features=1)
def forward(self, utterances, candidates_pool, seq_lengths=None, device='cpu'):
"""Forward pass for BlindStatelessLSTM
Args:
batch (torch.LongTensor): Tensor containing the batch (shape=BxMAX_SEQ_LEN)
seq_lengths (torch.LongTensor, optional): Effective lengths (no pad) of each sequence in the batch. If given the pack_padded__sequence are used.
The shape is B. Defaults to None.
"""
embedded_seq_tensor = self.word_embeddings_layer(utterances)
if seq_lengths is not None:
# pack padded sequence
packed_input = pack_padded_sequence(embedded_seq_tensor, seq_lengths.cpu().numpy(), batch_first=True)
# h_t has shape NUM_DIRxBxHIDDEN_SIZE
out, (h_t, c_t) = self.lstm(packed_input)
"""unpack not needed. We don't use the output
if seq_lengths is not None:
# unpack padded sequence
output, input_sizes = pad_packed_sequence(out1, batch_first=True)
"""
utterance_hiddens = self.dropout(h_t.squeeze(0))
# tensors_pool has shape BATCH_SIZEx100xHIDDEN_SIZE
tensors_pool = self.encode_pool(candidates_pool, device)
# build pairs (utterance, candidate[i]) for computing similarity
assert utterance_hiddens.shape[0] == tensors_pool.shape[0], 'Problem with first dimension'
matching_pairs = []
for utterance, candidate in zip(utterance_hiddens, tensors_pool):
matching_pairs.append(torch.cat((utterance.expand(candidate.shape[0], -1), candidate), dim=-1))
matching_pairs = torch.stack(matching_pairs)
# matching_pairs has shape Bx100x2*HIDDEN_SIZE
matching_logits = []
for pair in matching_pairs:
matching_logits.append(self.matching_layer(pair))
matching_logits = torch.stack(matching_logits).squeeze(-1)
matching_scores = torch.sigmoid(matching_logits)
return matching_logits, matching_scores
def encode_pool(self, candidates_pool, device):
tensors_pool = []
for pool in candidates_pool:
curr_hiddens = []
for candidate in pool:
embeddings = self.word_embeddings_layer(candidate.to(device))
_, (h_t, _) = self.lstm(embeddings.unsqueeze(0))
curr_hiddens.append(h_t.squeeze(0).squeeze(0))
tensors_pool.append(torch.stack(curr_hiddens))
tensors_pool = torch.stack(tensors_pool)
return tensors_pool
def collate_fn(self, batch):
"""This method prepares the batch for the LSTM: padding + preparation for pack_padded_sequence
Args:
batch (tuple): tuple of element returned by the Dataset.__getitem__()
Returns:
dial_ids (list): list of dialogue ids
turns (list): list of dialogue turn numbers
seq_tensor (torch.LongTensor): tensor with BxMAX_SEQ_LEN containing padded sequences of user transcript sorted by descending effective lengths
seq_lenghts: tensor with shape B containing the effective length of the correspondant transcript sequence
actions (torch.Longtensor): tensor with B shape containing target actions
arguments (torch.Longtensor): tensor with Bx33 shape containing arguments one-hot vectors, one for each sample.
"""
dial_ids = [item[0] for item in batch]
turns = [item[1] for item in batch]
transcripts = [torch.tensor(item[2]) for item in batch]
responses_pool = [item[7] for item in batch]
assert len(transcripts) == len(dial_ids), 'Batch sizes do not match'
assert len(transcripts) == len(turns), 'Batch sizes do not match'
assert len(transcripts) == len(responses_pool), 'Batch sizes do not match'
# reorder the sequences from the longest one to the shortest one.
# keep the correspondance with the target
transcripts_lengths = torch.tensor(list(map(len, transcripts)), dtype=torch.long)
transcripts_tensor = torch.zeros((len(transcripts), transcripts_lengths.max()), dtype=torch.long)
for idx, (seq, seqlen) in enumerate(zip(transcripts, transcripts_lengths)):
transcripts_tensor[idx, :seqlen] = seq.clone().detach()
# sort instances by sequence length in descending order
transcripts_lengths, perm_idx = transcripts_lengths.sort(0, descending=True)
transcripts_tensor = transcripts_tensor[perm_idx]
sorted_dial_ids = []
sorted_dial_turns = []
sorted_responses = []
for idx in perm_idx:
sorted_dial_ids.append(dial_ids[idx])
sorted_dial_turns.append(turns[idx])
sorted_responses.append(responses_pool[idx])
batch_dict = {}
batch_dict['utterances'] = transcripts_tensor
batch_dict['seq_lengths'] = transcripts_lengths
# seq_lengths is used to create a pack_padded_sequence
return sorted_dial_ids, sorted_dial_turns, batch_dict, sorted_responses
def __str__(self):
return super().__str__()
| 7,099 | 42.82716 | 156 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/utilities/simmc_utilities.py
|
import os
import sys
import matplotlib.pyplot as plt
class Logger(object):
def __init__(self, log_path):
self.terminal = sys.stdout
self.log = open(log_path, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass
def plotting_loss(x_values, x_label, y_label, plot_title, functions, save_path=None, legend=True):
"""plot functions
Args:
save_path (str): path where to save the plot
x_values (numpy.array): values on the x axis
x_label (str): label for the x axis
y_label (str): label for the y axis
plot_title (str): title for the plot
functions (list): list of tuples (list(values), color, label) where color and label are strings
legend (bool): to print the legend for the plot. (Default: True)
"""
# plot train vs validation
for f in functions:
plt.plot(x_values, f[0], color=f[1], label=f[2])
plt.title(plot_title)
plt.xlabel(x_label)
plt.ylabel(y_label)
if legend:
plt.legend(loc='best')
if save_path is not None:
plt.savefig(save_path)
plt.clf()
else:
plt.show()
def print_annotation_dialogue(dialogue, candidates):
"""Print the specified dialogue with belief state and state graph annotations
Args:
dialogue (list): list of dialogue turns
actions (list): list of actions with shape [ {'turn_idx': <idx>, 'action': <action>, 'action_supervision': {'attributes': [...]}}, ...]
"""
assert len(dialogue) == len(actions), 'Actions and turns do not match'
for turn, act in zip(dialogue, actions):
print('+U{}: {}\n+A{}: {}'.format(turn['turn_idx'], turn['transcript'], turn['turn_idx'], turn['system_transcript']))
print('------- Annotations: turn {}--------'.format(turn['turn_idx']))
print('+action:{}'.format(act['action']))
if act['action_supervision'] is not None:
print('+attributes:{}'.format(act['action_supervision']['attributes']))
"""
print('+belief_state:{}\n+transcript_annotated{}\n+system_transcript_annotated{}\n+turn_label{}\n+state_graph_0:{}\n+state_graph_1:{}\n+state_graph_2:{}'
.format(turn['belief_state'], turn['transcript_annotated'], turn['system_transcript_annotated'], turn['turn_label'],
turn['state_graph_0'], turn['state_graph_1'], turn['state_graph_2']))
"""
print('-------------------------------\n\n')
| 2,737 | 37.027778 | 161 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/utilities/__init__.py
|
from .simmc_utilities import Logger, plotting_loss
from .dataparallelv2 import DataParallelV2
| 93 | 46 | 50 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_response_generation/utilities/dataparallelv2.py
|
from torch.nn.parallel._functions import Scatter
from torch.nn.parallel import DataParallel
import torch
import math
# This code was copied from torch.nn.parallel and adapted for DataParallel to chunk lists instead of duplicating them
# (this is really all this code is here for)
def scatter(inputs, target_gpus, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
return Scatter.apply(target_gpus, None, dim, obj)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
#on the last gpu the torch scatter always put the remaining samples to fit the batch
# (e.g., batch=256, n_gpus=3 ==> chunks=[86, 86, 84])
size = math.ceil(len(obj)/len(target_gpus))
chunk = [obj[i * size:(i + 1) * size] for i in range(len(target_gpus)-1)]
diff = len(obj) - size*(len(target_gpus)-1)
chunk.append(obj[-diff:])
return chunk
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class DataParallelV2(DataParallel):
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
| 2,498 | 41.355932 | 117 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/preprocessing.py
|
import argparse
import datetime
import math
import os
import pdb
import random
import sys
import time
import numpy as np
import torch
from torch.utils.data import DataLoader
import sys
sys.path.append('.')
from config import TrainConfig
from tools.simmc_dataset import SIMMCDatasetForActionPrediction
class Collate():
def __init__(self, word2id, item2id, unk_token):
self.word2id = word2id
self.item2id = item2id
self.unk_token = unk_token
def collate_fn(self, batch):
dial_ids = [item[0] for item in batch]
turns = [item[1] for item in batch]
history = [item[3] for item in batch]
visual_context = [item[4] for item in batch]
actions = torch.tensor([item[5] for item in batch])
attributes = torch.tensor([item[6] for item in batch])
# words to ids for the current utterance
utterance_seq_ids = []
for item in batch:
curr_seq = []
for word in item[2].split():
word_id = self.word2id[word] if word in self.word2id else self.word2id[self.unk_token]
curr_seq.append(word_id)
utterance_seq_ids.append(curr_seq)
# words to ids for the history
history_seq_ids = []
for turn, item in zip(turns, history):
assert len(item) == turn, 'Number of turns does not match history length'
curr_turn_ids = []
for t in range(turn):
concat_sentences = item[t][0] + ' ' + item[t][1] #? separator token
curr_seq = []
for word in concat_sentences.split():
word_id = self.word2id[word] if word in self.word2id else self.word2id[self.unk_token]
curr_seq.append(word_id)
curr_turn_ids.append(torch.tensor(curr_seq))
history_seq_ids.append(curr_turn_ids)
# item to id for the visual context
visual_ids = {'focus': [], 'history': []}
for v in visual_context:
curr_focus = self.item2id[v['focus']]
curr_history = []
for vv in v['history']:
v_id = self.item2id[vv]
curr_history.append(torch.tensor(v_id))
visual_ids['focus'].append(torch.tensor(curr_focus))
if len(curr_history):
curr_history = torch.stack(curr_history)
visual_ids['history'].append(curr_history)
visual_ids['focus'] = torch.stack(visual_ids['focus'])
assert len(utterance_seq_ids) == 1, 'Only unitary batch sizes allowed'
assert len(utterance_seq_ids) == len(dial_ids), 'Batch sizes do not match'
assert len(utterance_seq_ids) == len(turns), 'Batch sizes do not match'
assert len(utterance_seq_ids) == len(history_seq_ids), 'Batch sizes do not match'
assert len(utterance_seq_ids) == actions.shape[0], 'Batch sizes do not match'
assert len(utterance_seq_ids) == attributes.shape[0], 'Batch sizes do not match'
assert len(utterance_seq_ids) == len(visual_ids['focus']), 'Batch sizes do not match'
assert len(utterance_seq_ids) == len(visual_ids['history']), 'Batch sizes do not match'
batch_dict = {}
batch_dict['utterances'] = utterance_seq_ids
batch_dict['history'] = history_seq_ids
batch_dict['visual_context'] = visual_ids
return dial_ids, turns, batch_dict, actions, attributes
def save_data_on_file(iterator, save_path):
dial_id_list = []
turn_list = []
utterance_list = []
history_list = []
actions_list = []
attributes_list = []
visual_context_list = {'focus': [], 'history': []}
for dial_ids, turns, batch, actions, attributes in iterator:
dial_id_list.append(dial_ids[0])
turn_list.append(turns[0])
utterance_list.append(batch['utterances'][0])
history_list.append(batch['history'][0])
actions_list.append(actions[0])
attributes_list.append(attributes[0])
visual_context_list['focus'].append(batch['visual_context']['focus'][0])
visual_context_list['history'].append(batch['visual_context']['history'][0])
torch.save(
{
'dial_ids': dial_id_list,
'turns': turn_list,
'utterances': utterance_list,
'histories': history_list,
'actions': torch.stack(actions_list),
'attributes': torch.stack(attributes_list),
'visual_contexts': visual_context_list,
'num_actions': len(SIMMCDatasetForActionPrediction._LABEL2ACT),
'num_attributes': len(SIMMCDatasetForActionPrediction._ATTRS),
'actions_support': iterator.dataset.act_support,
'attributes_support': iterator.dataset.attr_support
},
save_path
)
def preprocess(train_dataset, dev_dataset, test_dataset, args):
# prepare model's vocabulary
train_vocabulary = train_dataset.get_vocabulary()
dev_vocabulary = dev_dataset.get_vocabulary()
test_vocabulary = test_dataset.get_vocabulary()
vocabulary = train_vocabulary.union(dev_vocabulary)
vocabulary = vocabulary.union(test_vocabulary)
word2id = {}
word2id[TrainConfig._PAD_TOKEN] = 0
word2id[TrainConfig._UNK_TOKEN] = 1
for idx, word in enumerate(vocabulary):
word2id[word] = idx+2
np.save(os.path.join('/'.join(args.train_folder.split('/')[:-1]), 'vocabulary.npy'), word2id) #todo uncomment
print('VOCABULARY SIZE: {}'.format(len(vocabulary)))
raw_data = np.load(args.metadata_embeddings, allow_pickle=True)
raw_data = dict(raw_data.item())
item2id = {}
for idx, item in enumerate(raw_data['item_ids']):
item2id[item] = idx
collate = Collate(word2id=word2id, item2id=item2id, unk_token=TrainConfig._UNK_TOKEN)
# prepare DataLoader
params = {'batch_size': 1,
'shuffle': False,
'num_workers': 0}
trainloader = DataLoader(train_dataset, **params, collate_fn=collate.collate_fn)
devloader = DataLoader(dev_dataset, **params, collate_fn=collate.collate_fn)
testloader = DataLoader(test_dataset, **params, collate_fn=collate.collate_fn)
start_t = time.time()
save_path='{}/{}_action_prediction_data.dat'
save_data_on_file(iterator=trainloader, save_path=save_path.format(args.actions_folder, 'train'))
save_data_on_file(iterator=devloader, save_path=save_path.format(args.actions_folder, 'dev'))
save_data_on_file(iterator=testloader, save_path=save_path.format(args.actions_folder, 'devtest'))
end_t = time.time()
h_count = (end_t-start_t) /60 /60
m_count = ((end_t-start_t)/60) % 60
s_count = (end_t-start_t) % 60
print('preprocessing time: {}h:{}m:{}s'.format(round(h_count), round(m_count), round(s_count)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--simmc_folder",
type=str,
required=True,
help="Path to simmc fashion dataset folder")
parser.add_argument(
"--actions_folder",
type=str,
required=True,
help="Path to simmc fashion actions folder")
parser.add_argument(
"--embeddings",
type=str,
required=True,
help="Path to embeddings file")
parser.add_argument(
"--metadata_embeddings",
type=str,
required=True,
help="Path to metadata embeddings file")
parser.add_argument(
"--metadata",
type=str,
required=True,
help="Path to metadata JSON file")
args = parser.parse_args()
dataset_path = '{}/fashion_{}_dials.json'
actions_path = '{}/fashion_{}_dials_api_calls.json'
train_dataset = SIMMCDatasetForActionPrediction(data_path=dataset_path.format(args.simmc_folder, 'train'),
metadata_path=args.metadata,
actions_path=actions_path.format(args.actions_folder, 'train'))
dev_dataset = SIMMCDatasetForActionPrediction(data_path=dataset_path.format(args.simmc_folder, 'dev'),
metadata_path=args.metadata,
actions_path=actions_path.format(args.actions_folder, 'dev'))
test_dataset = SIMMCDatasetForActionPrediction(data_path=dataset_path.format(args.simmc_folder, 'devtest'),
metadata_path=args.metadata,
actions_path=actions_path.format(args.actions_folder, 'devtest'))
preprocess(train_dataset, dev_dataset, test_dataset, args)
| 8,706 | 38.220721 | 117 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/config.py
|
class TrainConfig():
_SEED = 240797
_LEARNING_RATE = 1e-3
_WEIGHT_DECAY = 0#1e-3
_PAD_TOKEN = '[PAD]'
_UNK_TOKEN = '[UNK]'
_CHECKPOINT_FOLDER = 'mm_action_prediction/checkpoints'
| 205 | 19.6 | 59 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/eval.py
|
import argparse
import json
import os
import pdb
import sys
import torch
from torch.utils.data import DataLoader
sys.path.append('.')
from config import TrainConfig
from dataset import FastDataset
from models import BlindStatefulLSTM, BlindStatelessLSTM, MMStatefulLSTM
from tools.simmc_dataset import SIMMCDatasetForActionPrediction
"""expected form for model output
[
{
"dialog_id": ...,
"predictions": [
{
"action": <predicted_action>,
"action_log_prob": {
<action_token>: <action_log_prob>,
...
},
"attributes": {
<attribute_label>: <attribute_val>,
...
}
}
]
}
]
Where <attribute_label> is "focus" or "attributes" (only "attributes" for fashion dataset).
"""
id2act = SIMMCDatasetForActionPrediction._LABEL2ACT
id2attrs = SIMMCDatasetForActionPrediction._ATTRS
def instantiate_model(args, num_actions, num_attrs, word2id):
if args.model == 'blindstateless':
return BlindStatelessLSTM(word_embeddings_path=args.embeddings,
word2id=word2id,
num_actions=num_actions,
num_attrs=num_attrs,
pad_token=TrainConfig._PAD_TOKEN,
unk_token=TrainConfig._UNK_TOKEN,
seed=TrainConfig._SEED,
OOV_corrections=False,
freeze_embeddings=True)
elif args.model == 'blindstateful':
return BlindStatefulLSTM(word_embeddings_path=args.embeddings,
word2id=word2id,
num_actions=num_actions,
num_attrs=num_attrs,
pad_token=TrainConfig._PAD_TOKEN,
unk_token=TrainConfig._UNK_TOKEN,
seed=TrainConfig._SEED,
OOV_corrections=False)
elif args.model == 'mmstateful':
return MMStatefulLSTM(word_embeddings_path=args.embeddings,
word2id=word2id,
item_embeddings_path=args.metadata_embeddings,
num_actions=num_actions,
num_attrs=num_attrs,
pad_token=TrainConfig._PAD_TOKEN,
unk_token=TrainConfig._UNK_TOKEN,
seed=TrainConfig._SEED,
OOV_corrections=False)
else:
raise Exception('Model not present!')
def create_eval_dict(dataset):
dataset.create_id2turns()
eval_dict = {}
for dial_id, num_turns in dataset.id2turns.items():
eval_dict[dial_id] = {'dialog_id': dial_id, 'predictions': [None] * num_turns}
return eval_dict
def eval(model, test_dataset, args, save_folder, device):
model.eval()
model.to(device)
print('MODEL: {}'.format(model))
# prepare DataLoader
params = {'batch_size': 1,
'shuffle': False,
'num_workers': 0}
testloader = DataLoader(test_dataset, **params, collate_fn=model.collate_fn)
eval_dict = create_eval_dict(test_dataset)
with torch.no_grad():
for curr_step, (dial_ids, turns, batch, actions, attributes) in enumerate(testloader):
assert len(dial_ids) == 1, 'Only unitary batch size is allowed during testing'
dial_id = dial_ids[0]
turn = turns[0]
batch['utterances'] = batch['utterances'].to(device)
actions = actions.to(device)
attributes = attributes.to(device)
actions_out, attributes_out, actions_probs, attributes_probs = model(**batch, device=device)
#get predicted action and arguments
actions_predictions = torch.argmax(actions_probs, dim=-1)
attributes_predictions = []
for batch_idx, t in enumerate(attributes_probs):
attributes_predictions.append([])
for pos, val in enumerate(t):
if val >= .5:
attributes_predictions[batch_idx].append(pos)
actions_predictions = actions_predictions[0].item()
attributes_predictions = attributes_predictions[0]
predicted_action = SIMMCDatasetForActionPrediction._LABEL2ACT[actions_predictions]
action_log_prob = {}
for idx, prob in enumerate(actions_probs[0]):
action_log_prob[SIMMCDatasetForActionPrediction._LABEL2ACT[idx]] = torch.log(prob).item()
attributes = {}
#for arg in args_predictions:
attributes['attributes'] = [SIMMCDatasetForActionPrediction._ATTRS[attr] for attr in attributes_predictions]
eval_dict[dial_id]['predictions'][turn] = {'action': predicted_action,
'action_log_prob': action_log_prob,
'attributes': attributes}
eval_list = []
for key in eval_dict:
eval_list.append(eval_dict[key])
save_file = os.path.join(save_folder, 'eval_out.json')
try:
with open(save_file, 'w+') as fp:
json.dump(eval_list, fp)
print('results saved in {}'.format(save_file))
except:
print('Error in writing the resulting JSON')
if __name__ == '__main__':
#TODO make "infer": dataset with unknown labels (modify the dataset class)
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
choices=['blindstateless', 'blindstateful', 'mmstateful'],
required=True,
help="Type of the model (options: 'blindstateless', 'blindstateful', 'mmstateful')")
parser.add_argument(
"--model_path",
default=None,
type=str,
required=True,
help="Path to the weights of the model")
parser.add_argument(
"--vocabulary",
default=None,
type=str,
required=True,
help="Path to the vocabulary pickle file")
parser.add_argument(
"--data",
default=None,
type=str,
required=True,
help="Path to training dataset json file")
parser.add_argument(
"--embeddings",
default=None,
type=str,
required=True,
help="Path to embedding file")
parser.add_argument(
"--metadata_embeddings",
type=str,
required=True,
help="Path to metadata embeddings file")
parser.add_argument(
"--cuda",
default=None,
required=False,
type=int,
help="id of device to use")
args = parser.parse_args()
test_dataset = FastDataset(dat_path=args.data)
device = torch.device('cuda:{}'.format(args.cuda) if torch.cuda.is_available() and args.cuda is not None else "cpu")
eval_dict = create_eval_dict(test_dataset)
print('EVAL DATASET: {}'.format(test_dataset))
# prepare model
word2id = torch.load(args.vocabulary)
model = instantiate_model(args,
num_actions=len(SIMMCDatasetForActionPrediction._LABEL2ACT),
num_attrs=len(SIMMCDatasetForActionPrediction._ATTRS),
word2id=word2id)
model.load_state_dict(torch.load(args.model_path))
model_folder = '/'.join(args.model_path.split('/')[:-1])
print('model loaded from {}'.format(model_folder))
eval(model, test_dataset, args, save_folder=model_folder, device=device)
| 7,874 | 35.971831 | 120 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/train.py
|
import argparse
import datetime
import math
import os
import pdb
import random
import sys
import time
import numpy as np
import torch
from torch.utils.data import DataLoader
from config import TrainConfig
from models import BlindStatefulLSTM, BlindStatelessLSTM, MMStatefulLSTM
from utilities import Logger, plotting_loss
from dataset import FastDataset
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"]="0,5" # specify which GPU(s) to be used
def instantiate_model(args, num_actions, num_attrs, word2id):
if args.model == 'blindstateless':
return BlindStatelessLSTM(word_embeddings_path=args.embeddings,
word2id=word2id,
num_actions=num_actions,
num_attrs=num_attrs,
pad_token=TrainConfig._PAD_TOKEN,
unk_token=TrainConfig._UNK_TOKEN,
seed=TrainConfig._SEED,
OOV_corrections=False,
freeze_embeddings=True)
elif args.model == 'blindstateful':
return BlindStatefulLSTM(word_embeddings_path=args.embeddings,
word2id=word2id,
num_actions=num_actions,
num_attrs=num_attrs,
pad_token=TrainConfig._PAD_TOKEN,
unk_token=TrainConfig._UNK_TOKEN,
seed=TrainConfig._SEED,
OOV_corrections=False)
elif args.model == 'mmstateful':
return MMStatefulLSTM(word_embeddings_path=args.embeddings,
word2id=word2id,
item_embeddings_path=args.metadata_embeddings,
num_actions=num_actions,
num_attrs=num_attrs,
pad_token=TrainConfig._PAD_TOKEN,
unk_token=TrainConfig._UNK_TOKEN,
seed=TrainConfig._SEED,
OOV_corrections=False)
else:
raise Exception('Model not present!')
def plotting(epochs, losses_trend, checkpoint_dir):
epoch_list = np.arange(1, epochs+1)
losses = [(losses_trend['train']['global'], 'blue', 'train'),
(losses_trend['dev']['global'], 'red', 'validation')]
loss_path = os.path.join(checkpoint_dir, 'global_loss_plot')
plotting_loss(x_values=epoch_list, save_path=loss_path, functions=losses, plot_title='Global loss trend', x_label='epochs', y_label='loss')
losses = [(losses_trend['train']['actions'], 'green', 'train'),
(losses_trend['dev']['actions'], 'purple', 'validation')]
loss_path = os.path.join(checkpoint_dir, 'actions_loss_plot')
plotting_loss(x_values=epoch_list, save_path=loss_path, functions=losses, plot_title='Actions loss trend', x_label='epochs', y_label='loss')
losses = [(losses_trend['train']['attributes'], 'orange', 'train'),
(losses_trend['dev']['attributes'], 'black', 'validation')]
loss_path = os.path.join(checkpoint_dir, 'attributes_loss_plot')
plotting_loss(x_values=epoch_list, save_path=loss_path, functions=losses, plot_title='Arguments loss trend', x_label='epochs', y_label='loss')
def forward_step(model, batch, actions, attributes, actions_criterion, attributes_criterion, device):
batch['utterances'] = batch['utterances'].to(device)
actions_targets = actions.to(device)
attributes_targets = attributes.to(device)
"""
seq_lengths = seq_lengths.to(device)
"""
actions_logits, attributes_logits, actions_probs, attributes_probs = model(**batch, device=device)
actions_loss = actions_criterion(actions_logits, actions_targets)
attributes_targets = attributes_targets.type_as(actions_logits)
attributes_loss = attributes_criterion(attributes_logits, attributes_targets)
""" Not used
actions_predictions = torch.argmax(actions_probs, dim=-1)
attributes_predictions = []
for batch_idx, t in enumerate(attributes_probs):
attributes_predictions.append([])
for pos, val in enumerate(t):
if val >= .5:
attributes_predictions[batch_idx].append(pos)
"""
actions_predictions = None
attributes_predictions = None
return actions_loss, attributes_loss, actions_predictions, attributes_predictions
def train(train_dataset, dev_dataset, args, device):
# prepare checkpoint folder
curr_date = datetime.datetime.now().isoformat().split('.')[0]
checkpoint_dir = os.path.join(TrainConfig._CHECKPOINT_FOLDER, curr_date)
os.makedirs(checkpoint_dir, exist_ok=True)
# prepare logger to redirect both on file and stdout
sys.stdout = Logger(os.path.join(checkpoint_dir, 'train.log')) #todo uncomment before training
print('device used: {}'.format(str(device)))
print('batch used: {}'.format(args.batch_size))
print('lr used: {}'.format(TrainConfig._LEARNING_RATE))
print('weight decay: {}'.format(TrainConfig._WEIGHT_DECAY))
print('TRAINING DATASET: {}'.format(train_dataset))
print('VALIDATION DATASET: {}'.format(dev_dataset))
# prepare model's vocabulary
with open(args.vocabulary, 'rb') as fp:
vocabulary = np.load(fp, allow_pickle=True)
vocabulary = dict(vocabulary.item())
torch.save(vocabulary, os.path.join(checkpoint_dir, 'vocabulary.pkl'))
print('VOCABULARY SIZE: {}'.format(len(vocabulary)))
assert train_dataset.num_actions == dev_dataset.num_actions, 'Number of actions mismatch between train and dev dataset'
assert train_dataset.num_attributes == dev_dataset.num_attributes, 'Number of actions mismatch between train and dev dataset'
# prepare model
model = instantiate_model(args,
num_actions=train_dataset.num_actions,
num_attrs=train_dataset.num_attributes,
word2id=vocabulary)
model.to(device)
print('MODEL NAME: {}'.format(args.model))
print('NETWORK: {}'.format(model))
# prepare DataLoader
params = {'batch_size': args.batch_size,
'shuffle': True, #todo set to True
'num_workers': 0}
trainloader = DataLoader(train_dataset, **params, collate_fn=model.collate_fn)
devloader = DataLoader(dev_dataset, **params, collate_fn=model.collate_fn)
#prepare loss weights
act_per_class, act_tot_support = train_dataset.act_support['per_class_frequency'], train_dataset.act_support['tot_samples']
attr_per_class, attr_tot_support = train_dataset.attr_support['per_class_frequency'], train_dataset.attr_support['tot_samples']
#weights computed as negative_samples/positive_samples
ce_weights = torch.tensor([(act_tot_support-class_support)/class_support for class_support in act_per_class])
bce_weights = torch.tensor([(attr_tot_support-class_support)/class_support for class_support in attr_per_class])
#prepare losses and optimizer
actions_criterion = torch.nn.CrossEntropyLoss().to(device)
attributes_criterion = torch.nn.BCEWithLogitsLoss(pos_weight=bce_weights).to(device) #pos_weight=torch.tensor(10.)
optimizer = torch.optim.Adam(params=model.parameters(), lr=TrainConfig._LEARNING_RATE, weight_decay=TrainConfig._WEIGHT_DECAY)
scheduler1 = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones = list(range(10, args.epochs, 10)), gamma = 0.8)
scheduler2 = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=.5, patience=4, threshold=1e-2, cooldown=4, verbose=True)
#prepare containers for statistics
losses_trend = {'train': {'global':[], 'actions': [], 'attributes': []},
'dev': {'global':[], 'actions': [], 'attributes': []}}
best_loss = math.inf
start_t = time.time()
for epoch in range(args.epochs):
model.train()
curr_epoch_losses = {'global': [], 'actions': [], 'attributes': []}
for curr_step, (dial_ids, turns, batch, actions, attributes) in enumerate(trainloader):
actions_loss, attributes_loss, _, _ = forward_step(model,
batch=batch,
actions=actions,
attributes=attributes,
actions_criterion=actions_criterion,
attributes_criterion=attributes_criterion,
device=device)
#backward
optimizer.zero_grad()
loss = (actions_loss + attributes_loss)/2
loss.backward()
optimizer.step()
curr_epoch_losses['global'].append(loss.item())
curr_epoch_losses['actions'].append(actions_loss.item())
curr_epoch_losses['attributes'].append(attributes_loss.item())
losses_trend['train']['global'].append(np.mean(curr_epoch_losses['global']))
losses_trend['train']['actions'].append(np.mean(curr_epoch_losses['actions']))
losses_trend['train']['attributes'].append(np.mean(curr_epoch_losses['attributes']))
model.eval()
curr_epoch_losses = {'global': [], 'actions': [], 'attributes': []}
with torch.no_grad():
for curr_step, (dial_ids, turns, batch, actions, attributes) in enumerate(devloader):
actions_loss, attributes_loss, _, _ = forward_step(model,
batch=batch,
actions=actions,
attributes=attributes,
actions_criterion=actions_criterion,
attributes_criterion=attributes_criterion,
device=device)
loss = (actions_loss + attributes_loss)/2
curr_epoch_losses['global'].append(loss.item())
curr_epoch_losses['actions'].append(actions_loss.item())
curr_epoch_losses['attributes'].append(attributes_loss.item())
losses_trend['dev']['global'].append(np.mean(curr_epoch_losses['global']))
losses_trend['dev']['actions'].append(np.mean(curr_epoch_losses['actions']))
losses_trend['dev']['attributes'].append(np.mean(curr_epoch_losses['attributes']))
# save checkpoint if best model
if losses_trend['dev']['global'][-1] < best_loss:
best_loss = losses_trend['dev']['global'][-1]
torch.save(model.cpu().state_dict(),\
os.path.join(checkpoint_dir, 'state_dict.pt'))
model.to(device)
print('EPOCH #{} :: train_loss = {:.4f} ; dev_loss = {:.4f} [act_loss={:.4f}, attr_loss={:.4f}]; (lr={})'
.format(epoch+1, losses_trend['train']['global'][-1],
losses_trend['dev']['global'][-1],
losses_trend['dev']['actions'][-1],
losses_trend['dev']['attributes'][-1],
optimizer.param_groups[0]['lr']))
scheduler1.step()
scheduler2.step(losses_trend['dev']['global'][-1])
end_t = time.time()
h_count = (end_t-start_t) /60 /60
m_count = ((end_t-start_t)/60) % 60
s_count = (end_t-start_t) % 60
print('training time: {}h:{}m:{}s'.format(round(h_count), round(m_count), round(s_count)))
plotting(epochs=args.epochs, losses_trend=losses_trend, checkpoint_dir=checkpoint_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
choices=['blindstateless', 'blindstateful', 'mmstateful'],
required=True,
help="Type of the model (options: 'blindstateless', 'blindstateful', 'mmstateful')")
parser.add_argument(
"--data",
type=str,
required=True,
help="Path to preprocessed training data file .dat")
parser.add_argument(
"--eval",
type=str,
required=True,
help="Path to preprocessed eval data file .dat")
parser.add_argument(
"--vocabulary",
type=str,
required=True,
help="Path to vocabulary file")
parser.add_argument(
"--embeddings",
type=str,
required=True,
help="Path to embeddings file")
parser.add_argument(
"--metadata_embeddings",
type=str,
required=True,
help="Path to metadata embeddings file")
parser.add_argument(
"--batch_size",
required=True,
type=int,
help="Batch size")
parser.add_argument(
"--epochs",
required=True,
type=int,
help="Number of epochs")
parser.add_argument(
"--cuda",
default=None,
required=False,
type=int,
help="id of device to use")
args = parser.parse_args()
train_dataset = FastDataset(dat_path=args.data)
dev_dataset = FastDataset(dat_path=args.eval)
device = torch.device('cuda:{}'.format(args.cuda) if torch.cuda.is_available() and args.cuda is not None else "cpu")
train(train_dataset, dev_dataset, args, device)
| 13,757 | 44.556291 | 146 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/dataset/processed_dataset.py
|
import numpy as np
import torch
from torch.utils.data import Dataset
import pdb
class FastDataset(Dataset):
"""Dataset with preprocessed data for response generation subtask
self.data.keys() = dict_keys(['dial_ids', 'turns', 'utterances', 'histories', 'actions',
'attributes', 'visual_contexts', 'seq_lengths', 'candidates'])
"""
def __init__(self, dat_path):
super(FastDataset, self).__init__()
self.data = torch.load(dat_path)
self.dataset_name = 'SIMMC'
self.task = 'action_prediction'
self.num_actions = self.data['num_actions']
self.num_attributes = self.data['num_attributes']
self.act_support = self.data['actions_support']
self.attr_support = self.data['attributes_support']
assert len(self.data['dial_ids']) == len(self.data['turns']), 'Inconsistent dataset'
assert len(self.data['dial_ids']) == len(self.data['utterances']), 'Inconsistent dataset'
assert len(self.data['dial_ids']) == len(self.data['histories']), 'Inconsistent dataset'
assert len(self.data['dial_ids']) == len(self.data['actions']), 'Inconsistent dataset'
assert len(self.data['dial_ids']) == len(self.data['attributes']), 'Inconsistent dataset'
assert len(self.data['dial_ids']) == len(self.data['visual_contexts']['focus']), 'Inconsistent dataset'
assert len(self.data['dial_ids']) == len(self.data['visual_contexts']['history']), 'Inconsistent dataset'
self.check_history_consistency()
def check_history_consistency(self):
for turns_num, history in zip(self.data['turns'], self.data['histories']):
assert turns_num == len(history), 'History length do not match number of turns'
def create_id2turns(self):
"""used to create the eval dict during evaluation phase
"""
self.id2turns = {}
for dial_id in self.data['dial_ids']:
if dial_id in self.id2turns:
self.id2turns[dial_id] += 1
else:
self.id2turns[dial_id] = 1
def __getitem__(self, index):
return self.data['dial_ids'][index], self.data['turns'][index], self.data['utterances'][index],\
self.data['histories'][index], self.data['actions'][index], self.data['attributes'][index],\
self.data['visual_contexts']['focus'][index], self.data['visual_contexts']['history'][index],
def __len__(self):
return len(self.data['utterances'])
def __str__(self):
return '{}_subtask({})'.format(self.dataset_name, self.task)
| 2,625 | 39.4 | 113 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/dataset/__init__.py
|
from .processed_dataset import FastDataset
| 42 | 42 | 42 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/models/embednets.py
|
import pdb
import numpy as np
import torch
import torch.nn as nn
from spellchecker import SpellChecker
class ItemEmbeddingNetwork(nn.Module):
"""Base class for word embedding layer initialization and weights loading
Args:
nn (torch.nn.Module): inherits from torch.nn.Module
"""
def __init__(self, item_embeddings_path, freeze=False):
super(ItemEmbeddingNetwork, self).__init__()
raw_data = np.load(item_embeddings_path, allow_pickle=True)
raw_data = dict(raw_data.item())
self.item2id = {}
for idx, item in enumerate(raw_data['item_ids']):
self.item2id[item] = idx
self.embedding_dim = raw_data['embedding_size']
embeddings = np.stack(raw_data['embeddings'])
embedding_weights = torch.tensor(embeddings)
num_embeddings = embedding_weights.shape[0]
assert embedding_weights.shape[-1] == self.embedding_dim, 'Real embedding dimension does not match the declared one'
self.embedding_layer = nn.Embedding(num_embeddings, self.embedding_dim)
self.embedding_layer.load_state_dict({'weight': embedding_weights})
if freeze:
for p in self.embedding_layer.parameters():
p.requires_grad = False
def forward(self, input):
return self.embedding_layer(input)
class WordEmbeddingNetwork(nn.Module):
"""Base class for word embedding layer initialization and weights loading
Args:
nn (torch.nn.Module): inherits from torch.nn.Module
"""
def __init__(self, word_embeddings_path, word2id, pad_token, unk_token, OOV_corrections=False, freeze=False):
super(WordEmbeddingNetwork, self).__init__()
self.pad_token = pad_token
self.unk_token = unk_token
self.corrected_flag = OOV_corrections
self.word2id = word2id
self.embedding_file = word_embeddings_path.split('/')[-1]
self.load_embeddings_from_file(word_embeddings_path)
embedding_weights = self.get_embeddings_weights(OOV_corrections)
num_embeddings, self.embedding_dim = embedding_weights.shape
self.embedding_layer = nn.Embedding(num_embeddings, self.embedding_dim)
self.embedding_layer.load_state_dict({'weight': embedding_weights})
if freeze:
for p in self.embedding_layer.parameters():
p.requires_grad = False
def forward(self, input):
return self.embedding_layer(input)
def load_embeddings_from_file(self, embeddings_path):
self.glove = {}
with open(embeddings_path) as fp:
for l in fp:
line_tokens = l.split()
word = line_tokens[0]
if word in self.glove:
raise Exception('Repeated words in {} embeddings file'.format(embeddings_path))
vector = np.asarray(line_tokens[1:], "float32")
self.glove[word] = vector
self.embedding_size = vector.size
def get_embeddings_weights(self, OOV_corrections):
#if OOV_corrections:
# dataset_vocabulary = self.correct_spelling(dataset_vocabulary)
matrix_len = len(self.word2id)
weights_matrix = np.zeros((matrix_len, self.embedding_size))
# set pad and unknow ids
pad_id = self.word2id[self.pad_token]
unk_id = self.word2id[self.unk_token]
weights_matrix[pad_id] = np.zeros(shape=(self.embedding_size, ))
weights_matrix[unk_id] = np.random.normal(scale=0.6, size=(self.embedding_size, ))
for idx, word in enumerate(self.word2id):
if word in self.glove:
weights_matrix[idx] = self.glove[word]
return torch.tensor(weights_matrix, dtype=torch.float32)
def correct_spelling(self, dataset_vocabulary):
#todo fix: now dataset_vocabulary is a map, not a set (get the .keys())
oov = []
self.corrections = {}
checker = SpellChecker()
vocab_copy = copy.deepcopy(dataset_vocabulary)
for word in vocab_copy:
if word not in self.glove:
oov.append(word)
corrected_w = checker.correction(word)
if corrected_w in self.glove:
# the word with typos is assigned to the same id of the correspondant word after the correction
try:
self.word2id[word] = self.word2id[corrected_w] #TODO fix: word2id is still empty at this point
except:
pdb.set_trace()
self.corrections[word] = corrected_w
dataset_vocabulary.remove(word)
#print(oov)
#print(corrections.values())
return dataset_vocabulary
| 4,777 | 35.753846 | 124 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/models/__init__.py
|
from .blindstateless import BlindStatelessLSTM
from .blindstateful import BlindStatefulLSTM
from .mmstateful import MMStatefulLSTM
| 130 | 42.666667 | 46 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/models/blindstateless.py
|
import pdb
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from .embednets import WordEmbeddingNetwork
class BlindStatelessLSTM(nn.Module):
"""Implementation of a blind and stateless LSTM for action prediction. It approximates the probability distribution:
P(a_t | U_t)
Where a_t is the action and U_t the user utterance.
Args:
torch (WordEmbeddingBasedNetwork): inherits from WordEmbeddingBasedNetwork
Attributes:
self.corrections (dict): Mapping from dataset word to its corrections (the corrections is included in the vocabulary)
"""
_HIDDEN_SIZE = 600
def __init__(self, word_embeddings_path, word2id, num_actions, num_attrs,
pad_token, unk_token, seed, OOV_corrections=False, freeze_embeddings=False):
"""
Glove download: https://nlp.stanford.edu/projects/glove/
Args:
embedding_path ([type]): [description]
"""
torch.manual_seed(seed)
super(BlindStatelessLSTM, self).__init__()
self.hidden_size = self._HIDDEN_SIZE
self.word_embeddings_layer = WordEmbeddingNetwork(word_embeddings_path=word_embeddings_path,
word2id=word2id,
pad_token=pad_token,
unk_token=unk_token,
OOV_corrections=OOV_corrections,
freeze=freeze_embeddings)
self.lstm = nn.LSTM(self.word_embeddings_layer.embedding_dim, self.hidden_size, batch_first=True)
self.dropout = nn.Dropout(p=0.5)
self.actions_linear = nn.Linear(in_features=self.hidden_size, out_features=num_actions)
self.attrs_linear = nn.Linear(in_features=self.hidden_size, out_features=num_attrs)
def forward(self, utterances, seq_lengths=None, device='cpu'):
"""Forward pass for BlindStatelessLSTM
Args:
batch (torch.LongTensor): Tensor containing the batch (shape=BxMAX_SEQ_LEN)
seq_lengths (torch.LongTensor, optional): Effective lengths (no pad) of each sequence in the batch. If given the pack_padded__sequence are used.
The shape is B. Defaults to None.
"""
embedded_seq_tensor = self.word_embeddings_layer(utterances)
if seq_lengths is not None:
# pack padded sequence
packed_input = pack_padded_sequence(embedded_seq_tensor, seq_lengths.cpu().numpy(), batch_first=True)
out1, (h_t, c_t) = self.lstm(packed_input)
"""unpack not needed. We don't use the output
if seq_lengths is not None:
# unpack padded sequence
output, input_sizes = pad_packed_sequence(out1, batch_first=True)
"""
h_t = self.dropout(h_t)
# h_t has shape NUM_DIRxBxHIDDEN_SIZE
actions_logits = self.actions_linear(h_t[0])
attrs_logits = self.attrs_linear(h_t[0])
#out2.shape = BxNUM_LABELS
actions_probs = F.softmax(actions_logits, dim=-1)
attrs_probs = torch.sigmoid(attrs_logits)
return actions_logits, attrs_logits, actions_probs, attrs_probs
def collate_fn(self, batch):
"""This method prepares the batch for the LSTM: padding + preparation for pack_padded_sequence
Args:
batch (tuple): tuple of element returned by the Dataset.__getitem__()
Returns:
dial_ids (list): list of dialogue ids
turns (list): list of dialogue turn numbers
seq_tensor (torch.LongTensor): tensor with BxMAX_SEQ_LEN containing padded sequences of user transcript sorted by descending effective lengths
seq_lenghts: tensor with shape B containing the effective length of the correspondant transcript sequence
actions (torch.Longtensor): tensor with B shape containing target actions
arguments (torch.Longtensor): tensor with Bx33 shape containing arguments one-hot vectors, one for each sample.
"""
dial_ids = [item[0] for item in batch]
turns = [item[1] for item in batch]
transcripts = [torch.tensor(item[2]) for item in batch]
actions = torch.tensor([item[4] for item in batch])
attributes = torch.stack([item[5] for item in batch])
assert len(transcripts) == len(dial_ids), 'Batch sizes do not match'
assert len(transcripts) == len(turns), 'Batch sizes do not match'
assert len(transcripts) == actions.shape[0], 'Batch sizes do not match'
assert len(transcripts) == attributes.shape[0], 'Batch sizes do not match'
# reorder the sequences from the longest one to the shortest one.
# keep the correspondance with the target
transcripts_lengths = torch.tensor(list(map(len, transcripts)), dtype=torch.long)
transcripts_tensor = torch.zeros((len(transcripts), transcripts_lengths.max()), dtype=torch.long)
for idx, (seq, seqlen) in enumerate(zip(transcripts, transcripts_lengths)):
transcripts_tensor[idx, :seqlen] = seq.clone().detach()
# sort instances by sequence length in descending order
transcripts_lengths, perm_idx = transcripts_lengths.sort(0, descending=True)
transcripts_tensor = transcripts_tensor[perm_idx]
actions = actions[perm_idx]
attributes = attributes[perm_idx]
sorted_dial_ids = []
sorted_dial_turns = []
for idx in perm_idx:
sorted_dial_ids.append(dial_ids[idx])
sorted_dial_turns.append(turns[idx])
batch_dict = {}
batch_dict['utterances'] = transcripts_tensor
batch_dict['seq_lengths'] = transcripts_lengths
# seq_lengths is used to create a pack_padded_sequence
return sorted_dial_ids, sorted_dial_turns, batch_dict, actions, attributes
def __str__(self):
return super().__str__()
| 6,218 | 42.795775 | 156 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/models/mmstateful.py
|
import pdb
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from .embednets import WordEmbeddingNetwork, ItemEmbeddingNetwork
import numpy as np
def get_positional_embeddings(n_position, emb_dim):
"""Create positional embeddings (from "Attention Is All You Need", Vaswani et al. 2017)
Args:
n_position (int): number of elements in the sequence
emb_dim (int): size of embeddings
Returns:
toch.FloatTensor: a positional embedding with shape [N_POSITION x EMB_DIM]
"""
# keep dim 0 for padding token position encoding zero vector
position_enc = np.array([
[pos / np.power(10000, 2 * (k // 2) / emb_dim) for k in range(emb_dim)]
if pos != 0 else np.zeros(emb_dim) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return torch.from_numpy(position_enc).type(torch.FloatTensor)
class MMStatefulLSTM(nn.Module):
"""
Args:
nn ([type]): [description]
"""
_HIDDEN_SIZE = 300
def __init__(self, word_embeddings_path, word2id, item_embeddings_path,
num_actions, num_attrs, pad_token, unk_token,
seed, OOV_corrections):
torch.manual_seed(seed)
super(MMStatefulLSTM, self).__init__()
self.num_actions = num_actions
self.num_attrs = num_attrs
self.memory_hidden_size = self._HIDDEN_SIZE
# NETWORK
self.item_embeddings_layer = ItemEmbeddingNetwork(item_embeddings_path)
self.word_embeddings_layer = WordEmbeddingNetwork(word_embeddings_path=word_embeddings_path,
word2id=word2id,
pad_token=pad_token,
unk_token=unk_token)
self.utterance_encoder = nn.LSTM(self.word_embeddings_layer.embedding_dim,
self.memory_hidden_size,
batch_first=True,
bidirectional=True)
self.utterance_dropout = nn.Dropout(p=.5)
self.item_dim_reduction = nn.Linear(in_features=self.item_embeddings_layer.embedding_dim,
out_features=2*self.memory_hidden_size)
self.utterance_memory_attn = nn.Sequential(nn.Linear(4*self.memory_hidden_size, 4*self.memory_hidden_size),
nn.Tanh(),
nn.Dropout(p=.5),
nn.Linear(4*self.memory_hidden_size, 1),
nn.Dropout(p=.5)) #todo introduce layerNorm
self.linear_act_post_attn = nn.Sequential(nn.Linear(4*self.memory_hidden_size, 2*self.memory_hidden_size),
nn.Dropout(p=.5),
nn.ReLU())
self.linear_args_post_attn = nn.Sequential(nn.Linear(4*self.memory_hidden_size, 2*self.memory_hidden_size),
nn.Dropout(p=.5),
nn.ReLU())
self.multiturn_actions_outlayer = nn.Linear(in_features=2*self.memory_hidden_size, out_features=self.num_actions)
self.multiturn_args_outlayer = nn.Linear(in_features=2*self.memory_hidden_size, out_features=self.num_attrs)
self.singleturn_actions_outlayer = nn.Linear(in_features=4*self.memory_hidden_size, out_features=self.num_actions)
self.singleturn_args_outlayer = nn.Linear(in_features=4*self.memory_hidden_size, out_features=self.num_attrs)
def forward(self, utterances, history, visual_context, seq_lengths=None, device='cpu'):
# u_t shape [BATCH_SIZE x 2MEMORY_HIDDEN_SIZE]
u_t = self.encode_utterance(utterances, seq_lengths)
focus, focus_history = self.encode_visual(visual_context, device)
# u_t shape [BATCH_SIZE x 2MEMORY_HIDDEN_SIZE]
focus_t = self.item_dim_reduction(focus)
# separate single from multi-turn
single_turns = []
single_turns_v_focus = []
single_turns_pos = set()
multi_turns = []
multi_turns_v_focus = []
multi_turns_history = []
multi_turns_v_history = []
for dial_idx, history_item in enumerate(history):
if len(history_item) == 0:
single_turns_pos.add(dial_idx)
single_turns.append(u_t[dial_idx])
single_turns_v_focus.append(focus_t[dial_idx])
else:
multi_turns.append(u_t[dial_idx])
multi_turns_history.append(history[dial_idx])
multi_turns_v_focus.append(focus[dial_idx])
multi_turns_v_history.append(focus_history[dial_idx])
if len(single_turns):
# concat u_t with correspondent v_t
single_u_t = torch.stack(single_turns)
single_v_t = torch.stack(single_turns_v_focus)
#pos = list(single_turns_pos)
single_u_v_concat = torch.cat((single_u_t, single_v_t), dim=-1)
# compute output for single turn dialogues
act_out1 = self.singleturn_actions_outlayer(single_u_v_concat)
args_out1 = self.singleturn_args_outlayer(single_u_v_concat)
if len(multi_turns):
multi_u_t = torch.stack(multi_turns)
multi_v_t = torch.stack(multi_turns_v_focus)
multi_v_t = self.item_dim_reduction(multi_v_t)
# memory bank is a list of BATCH_SIZE tensors, each of them having shape N_TURNSx2MEMORY_HIDDEN_SIZE
lang_memory = self.encode_history(multi_turns_history, device)
assert len(multi_turns) == len(lang_memory), 'Wrong memory size'
#visual_memory = self.encode_visual_history(multi_turns_v_history, device) #todo visual memory
#assert len(multi_turns) == len(visual_memory), 'Wrong memory size'
# c_t shape [MULTI_TURNS_SET_SIZE x MEMORY_HIDDEN_SIZE]
c_t = self.attention_over_memory(multi_u_t, lang_memory)
mm_c_t = c_t * multi_v_t
#? Hadamard product between c_t and u_t? It is simply "tensor1 * tensor2"
ut_ct_concat = torch.cat((multi_u_t, mm_c_t), dim=-1)
c_t_tilde1 = self.linear_act_post_attn(ut_ct_concat)
ut_ct1_concat = torch.cat((multi_u_t, c_t_tilde1), dim=-1)
c_t_tilde2 = self.linear_args_post_attn(ut_ct1_concat)
act_out2 = self.multiturn_actions_outlayer(c_t_tilde1)
args_out2 = self.multiturn_args_outlayer(c_t_tilde2)
# recompose the output
act_out = []
args_out = []
pos1 = 0
pos2 = 0
for idx in range(utterances.shape[0]):
if idx in single_turns_pos:
act_out.append(act_out1[pos1])
args_out.append(args_out1[pos1])
pos1 += 1
else:
act_out.append(act_out2[pos2])
args_out.append(args_out2[pos2])
pos2 += 1
act_out = torch.stack(act_out)
args_out = torch.stack(args_out)
act_probs = F.softmax(act_out, dim=-1)
args_probs = torch.sigmoid(args_out)
return act_out, args_out, act_probs, args_probs
def encode_utterance(self, batch, seq_lengths):
embedded_seq_tensor = self.word_embeddings_layer(batch)
if seq_lengths is not None:
# pack padded sequence
packed_input = pack_padded_sequence(embedded_seq_tensor, seq_lengths.cpu().numpy(), batch_first=True)
out1, (h_t, c_t) = self.utterance_encoder(packed_input)
bidirectional_h_t = torch.cat((h_t[0], h_t[-1]), dim=-1)
bidirectional_h_t = self.utterance_dropout(bidirectional_h_t)
"""unpack not needed. We don't use the output
if seq_lengths is not None:
# unpack padded sequence
output, input_sizes = pad_packed_sequence(out1, batch_first=True)
"""
return bidirectional_h_t
def encode_visual(self, visual_context, device):
focus = self.item_embeddings_layer(visual_context['focus'].to(device))
history = []
for history_item in visual_context['history']:
if not len(history_item):
history.append([])
else:
history.append(self.item_embeddings_layer(history_item.to(device)))
return focus, history
def encode_history(self, history, device):
#todo turn embedding based on previous turns (hierarchical recurrent encoder - HRE)
encoded_batch_history = []
for dial in history:
hiddens = []
positional_embeddings = get_positional_embeddings(len(dial), 2*self.memory_hidden_size).to(device)
assert positional_embeddings.shape[0] == len(dial)
for turn, pos_emb in zip(dial, positional_embeddings):
emb = self.word_embeddings_layer(turn.unsqueeze(0).to(device))
# h_t.shape = [num_directions x 1 x HIDDEN_SIZE]
out, (h_t, c_t) = self.utterance_encoder(emb)
bidirectional_h_t = torch.cat((h_t[0], h_t[-1]), dim=-1)
pos_bidirectional_h_t = bidirectional_h_t+pos_emb
hiddens.append(pos_bidirectional_h_t.squeeze(0))
assert len(hiddens) > 0, 'Impossible to encode history for single turn instance'
encoded_batch_history.append(torch.stack(hiddens))
return encoded_batch_history
def encode_visual_history(self, history, device):
encoded_batch_history = []
for dial in history:
hiddens = []
for turn in dial:
m_t = self.item_dim_reduction(turn.unsqueeze(0).to(device))
hiddens.append(m_t.squeeze(0))
assert len(hiddens) > 0, 'Impossible to encode history for single turn instance'
encoded_batch_history.append(torch.stack(hiddens))
return encoded_batch_history
def attention_over_memory(self, u_t, memory_bank):
# input for attention layer consists of pairs (utterance_j, memory_j_i), for each j, i
attn_input_list = []
for dial_idx, dial_mem in enumerate(memory_bank):
num_turns = dial_mem.shape[0]
#utterance_mem_concat shape N_TURNS x (utterance_size + memory_size)
utterance_mem_concat = torch.cat((u_t[dial_idx].expand(num_turns, -1), dial_mem), dim=-1)
attn_input_list.append(utterance_mem_concat)
scores = []
for idx, input_tensor in enumerate(attn_input_list):
curr_out = self.utterance_memory_attn(input_tensor)
scores.append(curr_out)
attn_weights = []
for score in scores:
attn = F.softmax(score, dim=0)
attn_weights.append(attn)
assert len(attn_weights) == len(memory_bank), 'Memory size and attention weights do not match'
weighted_sum_list = []
for attn, mem in zip(attn_weights, memory_bank):
weighted_mem = attn * mem
weighted_sum = torch.sum(weighted_mem, dim=0)
weighted_sum_list.append(weighted_sum)
weighted_sum = torch.stack(weighted_sum_list)
return weighted_sum
def collate_fn(self, batch):
"""This method prepares the batch for the LSTM: padding + preparation for pack_padded_sequence
Args:
batch (tuple): tuple of element returned by the Dataset.__getitem__()
Returns:
dial_ids (list): list of dialogue ids
turns (list): list of dialogue turn numbers
seq_tensor (torch.LongTensor): tensor with BxMAX_SEQ_LEN containing padded sequences of user transcript sorted by descending effective lengths
seq_lenghts: tensor with shape B containing the effective length of the correspondant transcript sequence
actions (torch.Longtensor): tensor with B shape containing target actions
attributes (torch.Longtensor): tensor with Bx33 shape containing attributes one-hot vectors, one for each sample.
"""
dial_ids = [item[0] for item in batch]
turns = [item[1] for item in batch]
transcripts = [torch.tensor(item[2]) for item in batch]
history = [item[3] for item in batch]
actions = torch.tensor([item[4] for item in batch])
attributes = torch.stack([item[5] for item in batch])
visual_context = {'focus': [], 'history': []}
visual_context['focus'] = [item[6] for item in batch]
visual_context['history'] = [item[7] for item in batch]
assert len(transcripts) == len(dial_ids), 'Batch sizes do not match'
assert len(transcripts) == len(turns), 'Batch sizes do not match'
assert len(transcripts) == len(history), 'Batch sizes do not match'
assert len(transcripts) == actions.shape[0], 'Batch sizes do not match'
assert len(transcripts) == attributes.shape[0], 'Batch sizes do not match'
assert len(transcripts) == len(visual_context['focus']), 'Batch sizes do not match'
assert len(transcripts) == len(visual_context['history']), 'Batch sizes do not match'
# reorder the sequences from the longest one to the shortest one.
# keep the correspondance with the target
transcripts_lengths = torch.tensor(list(map(len, transcripts)), dtype=torch.long)
transcripts_tensor = torch.zeros((len(transcripts), transcripts_lengths.max()), dtype=torch.long)
for idx, (seq, seqlen) in enumerate(zip(transcripts, transcripts_lengths)):
transcripts_tensor[idx, :seqlen] = seq.clone().detach()
# sort instances by sequence length in descending order
transcripts_lengths, perm_idx = transcripts_lengths.sort(0, descending=True)
transcripts_tensor = transcripts_tensor[perm_idx]
actions = actions[perm_idx]
attributes = attributes[perm_idx]
sorted_dial_ids = []
sorted_dial_turns = []
sorted_dial_history = []
sorted_visual_context = {'focus': [], 'history':[]}
for idx in perm_idx:
sorted_dial_ids.append(dial_ids[idx])
sorted_dial_turns.append(turns[idx])
sorted_dial_history.append(history[idx])
sorted_visual_context['focus'].append(visual_context['focus'][idx])
sorted_visual_context['history'].append(visual_context['history'][idx])
sorted_visual_context['focus'] = torch.stack(sorted_visual_context['focus'])
batch_dict = {}
batch_dict['utterances'] = transcripts_tensor
batch_dict['history'] = sorted_dial_history
batch_dict['visual_context'] = sorted_visual_context
batch_dict['seq_lengths'] = transcripts_lengths
return sorted_dial_ids, sorted_dial_turns, batch_dict, actions, attributes
def __str__(self):
return super().__str__()
| 15,354 | 45.814024 | 154 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/models/blindstateful.py
|
import pdb
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from .embednets import WordEmbeddingNetwork
class BlindStatefulLSTM(nn.Module):
_HIDDEN_SIZE = 300
def __init__(self, word_embeddings_path, word2id, num_actions, num_attrs, pad_token, unk_token, seed, OOV_corrections):
torch.manual_seed(seed)
super(BlindStatefulLSTM, self).__init__()
self.num_actions = num_actions
self.num_attrs = num_attrs
self.memory_hidden_size = self._HIDDEN_SIZE
self.word_embeddings_layer = WordEmbeddingNetwork(word_embeddings_path=word_embeddings_path,
word2id=word2id,
pad_token=pad_token,
unk_token=unk_token)
self.utterance_encoder = nn.LSTM(self.word_embeddings_layer.embedding_dim,
self.memory_hidden_size,
batch_first=True,
bidirectional=True)
self.utterance_dropout = nn.Dropout(p=0.75)
self.utterance_normlayer = nn.LayerNorm([2*self.memory_hidden_size])
#todo recurrent attention?
#self.cross_history_attn = nn.Linear()
#! this is position agnostic (should not be good)
self.utterance_memory_attn = nn.Sequential(nn.Linear(4*self.memory_hidden_size, 4*self.memory_hidden_size),
nn.Tanh(),
nn.Dropout(p=0.75),
nn.Linear(4*self.memory_hidden_size, 1),
nn.Dropout(p=0.75)) #todo introduce layerNorm
self.linear_act_post_attn = nn.Sequential(nn.Linear(4*self.memory_hidden_size, 2*self.memory_hidden_size),
nn.Dropout(p=0.75),
nn.ReLU())
self.linear_attrs_post_attn = nn.Sequential(nn.Linear(4*self.memory_hidden_size, 2*self.memory_hidden_size),
nn.Dropout(p=0.75),
nn.ReLU(),)
self.multiturn_actions_outlayer = nn.Linear(in_features=2*self.memory_hidden_size, out_features=self.num_actions)
self.multiturn_attrs_outlayer = nn.Linear(in_features=2*self.memory_hidden_size, out_features=self.num_attrs)
self.singleturn_actions_outlayer = nn.Linear(in_features=2*self.memory_hidden_size, out_features=self.num_actions)
self.singleturn_attrs_outlayer = nn.Linear(in_features=2*self.memory_hidden_size, out_features=self.num_attrs)
def forward(self, utterances, history, seq_lengths=None, device='cpu'):
# u_t shape [BATCH_SIZE x 2MEMORY_HIDDEN_SIZE]
u_t = self.encode_utterance(utterances, seq_lengths)
# separate single from multi-turn
single_turns = []
single_turns_pos = set()
multi_turns = []
multi_turns_history = []
for dial_idx, history_item in enumerate(history):
if len(history_item) == 0:
single_turns_pos.add(dial_idx)
single_turns.append(u_t[dial_idx])
else:
multi_turns.append(u_t[dial_idx])
multi_turns_history.append(history[dial_idx])
if len(single_turns):
single_turns = torch.stack(single_turns)
# compute output for single turn dialogues
act_out1 = self.singleturn_actions_outlayer(single_turns)
attrs_out1 = self.singleturn_attrs_outlayer(single_turns)
if len(multi_turns):
multi_turns = torch.stack(multi_turns)
# memory bank is a list of BATCH_SIZE tensors, each of them having shape N_TURNSx2MEMORY_HIDDEN_SIZE
memory_bank = self.encode_history(multi_turns_history, device)
assert len(multi_turns) == len(memory_bank), 'Wrong memory size'
# c_t shape [MULTI_TURNS_SET_SIZE x MEMORY_HIDDEN_SIZE]
attentive_c_t = self.attention_over_memory(multi_turns, memory_bank)
#? Hadamard product between c_t and u_t? It is simply "tensor1 * tensor2"
ut_ct_concat = torch.cat((multi_turns, attentive_c_t), dim=-1)
c_t_tilde1 = self.linear_act_post_attn(ut_ct_concat)
ut_ct1_concat = torch.cat((multi_turns, c_t_tilde1), dim=-1)
c_t_tilde2 = self.linear_attrs_post_attn(ut_ct1_concat)
act_out2 = self.multiturn_actions_outlayer(c_t_tilde1)
attrs_out2 = self.multiturn_attrs_outlayer(c_t_tilde2)
# recompose the output
act_out = []
attrs_out = []
pos1 = 0
pos2 = 0
for idx in range(utterances.shape[0]):
if idx in single_turns_pos:
act_out.append(act_out1[pos1])
attrs_out.append(attrs_out1[pos1])
pos1 += 1
else:
act_out.append(act_out2[pos2])
attrs_out.append(attrs_out2[pos2])
pos2 += 1
act_out = torch.stack(act_out)
attrs_out = torch.stack(attrs_out)
act_probs = F.softmax(act_out, dim=-1)
attrs_probs = torch.sigmoid(attrs_out)
return act_out, attrs_out, act_probs, attrs_probs
def encode_history(self, history, device):
#todo turn embedding based on previous turns (hierarchical recurrent encoder - HRE)
encoded_batch_history = []
for dial in history:
hiddens = []
for turn in dial:
emb = self.word_embeddings_layer(turn.unsqueeze(0).to(device))
# h_t.shape = [num_directions x 1 x HIDDEN_SIZE]
out, (h_t, c_t) = self.utterance_encoder(emb)
bidirectional_h_t = torch.cat((h_t[0], h_t[-1]), dim=-1)
bidirectional_h_t = self.utterance_dropout(bidirectional_h_t)
bidirectional_h_t = self.utterance_normlayer(bidirectional_h_t)
hiddens.append(bidirectional_h_t.squeeze(0))
assert len(hiddens) > 0, 'Impossible to encode history for single turn instance'
encoded_batch_history.append(torch.stack(hiddens))
return encoded_batch_history
def encode_utterance(self, batch, seq_lengths):
embedded_seq_tensor = self.word_embeddings_layer(batch)
if seq_lengths is not None:
# pack padded sequence
packed_input = pack_padded_sequence(embedded_seq_tensor, seq_lengths.cpu().numpy(), batch_first=True)
out1, (h_t, c_t) = self.utterance_encoder(packed_input)
bidirectional_h_t = torch.cat((h_t[0], h_t[-1]), dim=-1)
bidirectional_h_t = self.utterance_dropout(bidirectional_h_t)
bidirectional_h_t = self.utterance_normlayer(bidirectional_h_t)
"""unpack not needed. We don't use the output
if seq_lengths is not None:
# unpack padded sequence
output, input_sizes = pad_packed_sequence(out1, batch_first=True)
"""
return bidirectional_h_t
def attention_over_memory(self, u_t, memory_bank):
# input for attention layer consists of pairs (utterance_j, memory_j_i), for each j, i
attn_input_list = []
for dial_idx, dial_mem in enumerate(memory_bank):
num_turns = dial_mem.shape[0]
#utterance_mem_concat shape N_TURNS x (utterance_size + memory_size)
utterance_mem_concat = torch.cat((u_t[dial_idx].expand(num_turns, -1), dial_mem), dim=-1)
attn_input_list.append(utterance_mem_concat)
scores = []
for idx, input_tensor in enumerate(attn_input_list):
curr_out = self.utterance_memory_attn(input_tensor)
scores.append(curr_out)
attn_weights = []
for score in scores:
attn = F.softmax(score, dim=0)
attn_weights.append(attn)
assert len(attn_weights) == len(memory_bank), 'Memory size and attention weights do not match'
weighted_sum_list = []
for attn, mem in zip(attn_weights, memory_bank):
weighted_mem = attn * mem
weighted_sum = torch.sum(weighted_mem, dim=0)
weighted_sum_list.append(weighted_sum)
weighted_sum = torch.stack(weighted_sum_list)
return weighted_sum
def collate_fn(self, batch):
"""This method prepares the batch for the LSTM: padding + preparation for pack_padded_sequence
Args:
batch (tuple): tuple of element returned by the Dataset.__getitem__()
Returns:
dial_ids (list): list of dialogue ids
turns (list): list of dialogue turn numbers
seq_tensor (torch.LongTensor): tensor with BxMAX_SEQ_LEN containing padded sequences of user transcript sorted by descending effective lengths
seq_lenghts: tensor with shape B containing the effective length of the correspondant transcript sequence
actions (torch.Longtensor): tensor with B shape containing target actions
attributes (torch.Longtensor): tensor with Bx33 shape containing attributes one-hot vectors, one for each sample.
"""
dial_ids = [item[0] for item in batch]
turns = [item[1] for item in batch]
transcripts = [torch.tensor(item[2]) for item in batch]
history = [item[3] for item in batch]
actions = torch.tensor([item[4] for item in batch])
attributes = torch.stack([item[5] for item in batch])
assert len(transcripts) == len(dial_ids), 'Batch sizes do not match'
assert len(transcripts) == len(turns), 'Batch sizes do not match'
assert len(transcripts) == len(history), 'Batch sizes do not match'
assert len(transcripts) == actions.shape[0], 'Batch sizes do not match'
assert len(transcripts) == attributes.shape[0], 'Batch sizes do not match'
# reorder the sequences from the longest one to the shortest one.
# keep the correspondance with the target
transcripts_lengths = torch.tensor(list(map(len, transcripts)), dtype=torch.long)
transcripts_tensor = torch.zeros((len(transcripts), transcripts_lengths.max()), dtype=torch.long)
for idx, (seq, seqlen) in enumerate(zip(transcripts, transcripts_lengths)):
transcripts_tensor[idx, :seqlen] = seq.clone().detach()
# sort instances by sequence length in descending order
transcripts_lengths, perm_idx = transcripts_lengths.sort(0, descending=True)
transcripts_tensor = transcripts_tensor[perm_idx]
actions = actions[perm_idx]
attributes = attributes[perm_idx]
sorted_dial_ids = []
sorted_dial_turns = []
sorted_dial_history = []
for idx in perm_idx:
sorted_dial_ids.append(dial_ids[idx])
sorted_dial_turns.append(turns[idx])
sorted_dial_history.append(history[idx])
batch_dict = {}
batch_dict['utterances'] = transcripts_tensor
batch_dict['history'] = sorted_dial_history
batch_dict['seq_lengths'] = transcripts_lengths
return sorted_dial_ids, sorted_dial_turns, batch_dict, actions, attributes
def __str__(self):
return super().__str__()
| 11,586 | 45.163347 | 154 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/utilities/simmc_utilities.py
|
import os
import sys
import matplotlib.pyplot as plt
class Logger(object):
def __init__(self, log_path):
self.terminal = sys.stdout
self.log = open(log_path, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass
def plotting_loss(save_path, x_values, x_label, y_label, plot_title, functions, legend=True):
"""plot functions
Args:
save_path (str): path where to save the plot
x_values (numpy.array): values on the x axis
x_label (str): label for the x axis
y_label (str): label for the y axis
plot_title (str): title for the plot
functions (list): list of tuples (list(values), color, label) where color and label are strings
legend (bool): to print the legend for the plot. (Default: True)
"""
# plot train vs validation
for f in functions:
plt.plot(x_values, f[0], color=f[1], label=f[2])
plt.title(plot_title)
plt.xlabel(x_label)
plt.ylabel(y_label)
if legend:
plt.legend(loc='best')
plt.savefig(save_path)
plt.clf()
def print_annotation_dialogue(dialogue, actions):
"""Print the specified dialogue with belief state and state graph annotations
Args:
dialogue (list): list of dialogue turns
actions (list): list of actions with shape [ {'turn_idx': <idx>, 'action': <action>, 'action_supervision': {'attributes': [...]}}, ...]
"""
"""
for turn, act in zip(dialogue, actions):
print('+U{}: {} -> {}({})'.format(turn['turn_idx']).item(), turn['transcript'])
#TODO end this function
"""
assert len(dialogue) == len(actions), 'Actions and turns do not match'
for turn, act in zip(dialogue, actions):
print('+U{}: {}\n+A{}: {}'.format(turn['turn_idx'], turn['transcript'], turn['turn_idx'], turn['system_transcript']))
print('------- Annotations: turn {}--------'.format(turn['turn_idx']))
print('+action:{}'.format(act['action']))
if act['action_supervision'] is not None:
print('+attributes:{}'.format(act['action_supervision']['attributes']))
"""
print('+belief_state:{}\n+transcript_annotated{}\n+system_transcript_annotated{}\n+turn_label{}\n+state_graph_0:{}\n+state_graph_1:{}\n+state_graph_2:{}'
.format(turn['belief_state'], turn['transcript_annotated'], turn['system_transcript_annotated'], turn['turn_label'],
turn['state_graph_0'], turn['state_graph_1'], turn['state_graph_2']))
"""
print('-------------------------------\n\n')
def print_sample_dialogue(dialogue, annotations=True):
"""Print an annotated sample of the specified dialogue
Args:
dialogue (list): list of dialogue turns
"""
for turn in dialogue:
print('+U{}: {}\n+A{}: {}'.format(turn['turn_idx'].item(), turn['transcript'], turn['turn_idx'].item(), turn['system_transcript']))
if annotations:
print('------- Annotations: turn{}--------'.format(turn['turn_idx'].item()))
print('+belief_state:{}\n+transcript_annotated{}\n+system_transcript_annotated{}\n+turn_label{}\n+state_graph_0:{}\n+state_graph_1:{}\n+state_graph_2:{}'
.format(turn['belief_state'], turn['transcript_annotated'], turn['system_transcript_annotated'], turn['turn_label'],
turn['state_graph_0'], turn['state_graph_1'], turn['state_graph_2']))
print('-------------------------------\n\n')
| 3,765 | 40.384615 | 165 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/utilities/__init__.py
|
from .simmc_utilities import *
| 30 | 30 | 30 |
py
|
dstc9-SIMMC
|
dstc9-SIMMC-master/mm_action_prediction/utilities/action_evaluation.py
|
"""Script evaluates action prediction along with attributes.
Author(s): Satwik Kottur
"""
from absl import app, flags
import collections
import json
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string(
"action_json_path", "data/furniture_api_calls.json", "Ground truth API calls"
)
flags.DEFINE_string(
"model_output_path", None, "Action API predictions by the model"
)
IGNORE_ATTRIBUTES = [
"minPrice",
"maxPrice",
"furniture_id",
"material",
"decorStyle",
"intendedRoom",
"raw_matches",
"focus" # fashion
]
def evaluate_action_prediction(gt_actions, model_actions):
"""Evaluates action prediction using the raw data and model predictions.
Args:
gt_actions: Ground truth actions + action attributes
model_actions: Actions + attributes predicted by the model
"""
gt_actions_pool = {ii["dialog_id"]: ii for ii in gt_actions}
matches = {"action": [], "attributes": [], "perplexity": []}
confusion_dict = collections.defaultdict(list)
for model_datum in model_actions:
dialog_id = model_datum["dialog_id"]
for round_id, round_datum in enumerate(model_datum["predictions"]):
gt_datum = gt_actions_pool[dialog_id]["actions"][round_id]
action_match = gt_datum["action"] == round_datum["action"]
# Record matches and confusion.
matches["action"].append(action_match)
matches["perplexity"].append(
round_datum["action_log_prob"][gt_datum["action"]]
)
confusion_dict[gt_datum["action"]].append(round_datum["action"])
# Get supervision for action attributes.
supervision = gt_datum["action_supervision"]
if supervision is not None and "args" in supervision:
supervision = supervision["args"]
if supervision is None:
continue
# Case 1: Action mismatch -- record False for all attributes.
if not action_match:
for key in supervision.keys():
if key in IGNORE_ATTRIBUTES:
continue
matches["attributes"].append(False)
# Case 2: Action matches -- use model predictions for attributes.
else:
for key in supervision.keys():
if key in IGNORE_ATTRIBUTES:
continue
gt_key_vals = supervision[key]
model_key_vals = round_datum["attributes"][key]
if not len(gt_key_vals):
continue
# For fashion, this is a list -- multi label prediction.
if isinstance(gt_key_vals, list):
assert isinstance(model_key_vals, list), (
"Model should also predict a list for attributes"
)
recall = np.mean(
[(ii in model_key_vals) for ii in gt_key_vals]
)
if len(model_key_vals):
precision = np.mean(
[(ii in gt_key_vals) for ii in model_key_vals]
)
else:
precision = 0.
f1_score = (2 * recall * precision) / (recall + precision + 1e-5)
matches["attributes"].append(f1_score)
else:
# For furniture, this is a string -- single label prediction.
matches["attributes"].append(gt_key_vals == model_key_vals)
# Compute the confusion matrix.
all_actions = sorted(
set(confusion_dict.keys()).union(
{jj for ii in confusion_dict.values() for jj in ii}
)
)
matrix = np.zeros((len(all_actions), len(all_actions)))
for index, action in enumerate(all_actions):
labels, counts = np.unique(confusion_dict[action], return_counts=True)
for label, count in zip(labels, counts):
matrix[all_actions.index(label), index] += count
return {
"action_accuracy": np.mean(matches["action"]),
"action_perplexity": np.exp(-1 * np.mean(matches["perplexity"])),
"attribute_accuracy": np.mean(matches["attributes"]),
"confusion_matrix": matrix
}
def main(_):
print("Reading: {}".format(FLAGS.action_json_path))
with open(FLAGS.action_json_path, "r") as file_id:
gt_actions = json.load(file_id)
print("Reading: {}".format(FLAGS.model_output_path))
with open(FLAGS.model_output_path, "r") as file_id:
model_actions = json.load(file_id)
action_metrics = evaluate_action_prediction(gt_actions, model_actions)
print(action_metrics)
if __name__ == "__main__":
app.run(main)
| 4,901 | 36.419847 | 89 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/translate.py
|
# -*- coding: utf-8 -*-
import logging
import torch
import os
from beaver.data import build_dataset
from beaver.infer import beam_search
from beaver.model import NMTModel
from beaver.utils import parseopt, get_device, calculate_bleu
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
opt = parseopt.parse_translate_args()
device = get_device()
def translate(dataset, fields, model):
already, hypothesis_1, hypothesis_2 = 0, [], []
for batch in dataset:
predictions_1, predictions_2 = beam_search(opt, model, batch.source, fields)
hypothesis_1 += [fields["summary_cn"].decode(p) for p in predictions_1]
hypothesis_2 += [fields["summary_en"].decode(p) for p in predictions_2]
already += len(predictions_1)
logging.info("Finished: %7d/%7d" % (already, dataset.num_examples))
origin = sorted(zip(hypothesis_1, hypothesis_2, dataset.seed), key=lambda t: t[2])
hypothesis_1 = [h for h, _, _ in origin]
hypothesis_2 = [h for _, h, _ in origin]
with open(opt.output[0], "w", encoding="UTF-8") as out_file:
out_file.write("\n".join(hypothesis_1))
out_file.write("\n")
with open(opt.output[1], "w", encoding="UTF-8") as out_file:
out_file.write("\n".join(hypothesis_2))
out_file.write("\n")
logging.info("All finished. ")
def main():
logging.info("Build dataset...")
dataset = build_dataset(opt, [opt.input, opt.input, opt.input], opt.vocab, device, train=False)
fields = dataset.fields
pad_ids = {"source": fields["source"].pad_id,
"summary_cn": fields["summary_cn"].pad_id,
"summary_en": fields["summary_en"].pad_id}
vocab_sizes = {"source": len(fields["source"].vocab),
"summary_cn": len(fields["summary_cn"].vocab),
"summary_en": len(fields["summary_en"].vocab)}
# load checkpoint from model_path
logging.info("Load checkpoint from %s." % opt.model_path)
checkpoint = torch.load(opt.model_path, map_location=lambda storage, loc: storage)
logging.info("Build model...")
model = NMTModel.load_model(checkpoint["opt"], pad_ids, vocab_sizes, checkpoint["model"]).to(device).eval()
logging.info("Start translation...")
with torch.set_grad_enabled(False):
translate(dataset, fields, model)
if __name__ == '__main__':
main()
| 2,389 | 33.142857 | 111 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/train.py
|
# -*- coding: utf-8 -*-
import logging
import torch
import torch.cuda
from beaver.data import build_dataset
from beaver.infer import beam_search
from beaver.loss import WarmAdam, LabelSmoothingLoss
from beaver.model import NMTModel
from beaver.utils import Saver
from beaver.utils import calculate_bleu
from beaver.utils import parseopt, get_device, printing_opt
from beaver.utils.metric import calculate_rouge
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
opt = parseopt.parse_train_args()
device = get_device()
logging.info("\n" + printing_opt(opt))
saver = Saver(opt)
def valid(model, criterion_cn, criterion_en, valid_dataset, step):
model.eval()
total_loss = total_cn_loss = total_en_loss = 0.0
total_n = 0
cn_hypothesis, cn_references = [], []
en_hypothesis, en_references = [], []
for batch in valid_dataset:
cn_scores, en_scores = model(batch.source, batch.summary_cn, batch.summary_en)
cn_loss = criterion_cn(cn_scores, batch.summary_cn)
en_loss = criterion_en(en_scores, batch.summary_en)
loss = cn_loss + en_loss
total_loss += loss.data
total_cn_loss += cn_loss.data
total_en_loss += en_loss.data
total_n += 1
_, cn_predictions = cn_scores.topk(k=1, dim=-1)
cn_hypothesis += [valid_dataset.fields["summary_cn"].decode(p) for p in cn_predictions]
cn_references += [valid_dataset.fields["summary_cn"].decode(t) for t in batch.summary_cn]
_, en_predictions = en_scores.topk(k=1, dim=-1)
en_hypothesis += [valid_dataset.fields["summary_en"].decode(p) for p in en_predictions]
en_references += [valid_dataset.fields["summary_en"].decode(t) for t in batch.summary_en]
bleu_cn = calculate_bleu(cn_hypothesis, cn_references)
bleu_en = calculate_bleu(en_hypothesis, en_references)
rouge1_cn, rouge2_cn = calculate_rouge(cn_hypothesis, cn_references)
rouge1_en, rouge2_en = calculate_rouge(en_hypothesis, en_references)
mean_loss = total_loss / total_n
mean_en_loss = total_en_loss / total_n
mean_cn_loss = total_cn_loss / total_n
logging.info("loss: %.2f\t loss-cn: %.2f \t loss-en %.2f \t bleu-cn: %3.2f\t bleu-en: %3.2f \t rouge1-cn: %3.2f \t rouge1-en: %3.2f \t rouge2-cn: %3.2f \t rouge2-en: %3.2f"
% (mean_loss, mean_cn_loss, mean_en_loss, bleu_cn, bleu_en, rouge1_cn, rouge1_en, rouge2_cn, rouge2_en))
checkpoint = {"model": model.state_dict(), "opt": opt}
saver.save(checkpoint, step, mean_loss, mean_cn_loss, mean_en_loss, bleu_cn, bleu_en, rouge1_cn, rouge1_en, rouge2_cn, rouge2_en)
def train(model, criterion_cn, criterion_en, optimizer, train_dataset, valid_dataset):
total_loss = total_cn_loss = total_en_loss = 0.0
model.zero_grad()
for i, batch in enumerate(train_dataset):
cn_scores, en_scores = model(batch.source, batch.summary_cn, batch.summary_en)
cn_loss = criterion_cn(cn_scores, batch.summary_cn)
en_loss = criterion_en(en_scores, batch.summary_en)
loss = cn_loss + en_loss
loss.backward()
total_loss += loss.data
total_cn_loss += cn_loss.data
total_en_loss += en_loss.data
if (i + 1) % opt.grad_accum == 0:
optimizer.step()
model.zero_grad()
if optimizer.n_step % opt.report_every == 0:
mean_loss = total_loss / opt.report_every / opt.grad_accum
mean_en_loss = total_en_loss / opt.report_every / opt.grad_accum
mean_cn_loss = total_cn_loss / opt.report_every / opt.grad_accum
logging.info("step: %7d\t loss: %.4f \t loss-cn: %.4f \t loss-en: %.4f"
% (optimizer.n_step, mean_loss, mean_cn_loss, mean_en_loss))
total_loss = total_cn_loss = total_en_loss = 0.0
if optimizer.n_step % opt.save_every == 0:
with torch.set_grad_enabled(False):
valid(model, criterion_cn, criterion_en, valid_dataset, optimizer.n_step)
model.train()
del loss
def main():
logging.info("Build dataset...")
train_dataset = build_dataset(opt, opt.train, opt.vocab, device, train=True)
valid_dataset = build_dataset(opt, opt.valid, opt.vocab, device, train=False)
fields = valid_dataset.fields = train_dataset.fields
logging.info("Build model...")
pad_ids = {"source": fields["source"].pad_id,
"summary_cn": fields["summary_cn"].pad_id,
"summary_en": fields["summary_en"].pad_id}
vocab_sizes = {"source": len(fields["source"].vocab),
"summary_cn": len(fields["summary_cn"].vocab),
"summary_en": len(fields["summary_en"].vocab)}
model = NMTModel.load_model(opt, pad_ids, vocab_sizes).to(device)
criterion_cn = LabelSmoothingLoss(opt.label_smoothing, vocab_sizes["summary_cn"], pad_ids["summary_cn"]).to(device)
criterion_en = LabelSmoothingLoss(opt.label_smoothing, vocab_sizes["summary_en"], pad_ids["summary_en"]).to(device)
n_step = int(opt.train_from.split("-")[-1]) if opt.train_from else 1
optimizer = WarmAdam(model.parameters(), opt.lr, opt.hidden_size, opt.warm_up, n_step)
logging.info("start training...")
train(model, criterion_cn, criterion_en, optimizer, train_dataset, valid_dataset)
if __name__ == '__main__':
main()
| 5,407 | 42.264 | 176 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/tools/model_average.py
|
# -*- coding: utf-8 -*-
import os
import torch
import sys
def main():
if len(sys.argv) != 3:
print("python model_average.py model_path n")
exit()
model_path = sys.argv[1]
n = int(sys.argv[2]) # last n model to be averaged
fs = [os.path.join(model_path, f) for f in os.listdir(model_path) if f.startswith("checkpoint")]
fs = sorted(fs, reverse=True)[:n] # last n file
n = len(fs) # actual file count
cks = [torch.load(f, map_location=lambda storage, loc: storage) for f in fs]
first_model = cks[0]["model"] # average all weights into first model and save it
for k, _ in first_model.items():
for ck in cks[1:]:
first_model[k] = (first_model[k] + ck["model"][k])
first_model[k] = first_model[k] / n
torch.save(cks[0], os.path.join(model_path, "averaged-%s-%s" % (fs[-1].split("-")[-1], fs[0].split("-")[-1])))
if __name__ == '__main__':
main()
| 941 | 29.387097 | 114 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/tools/build_vocab.py
|
# -*- coding: utf-8 -*-
import sys
import collections
log = sys.stderr.write
def main():
size = int(sys.argv[1])
counter = collections.Counter()
for line in sys.stdin:
counter.update(line.strip().split())
items = counter.most_common()
for word, _ in items[:size]:
print(word)
total = sum([c for _, c in items])
appear = sum([c for _, c in items[:size]])
log("total words: %d\n" % total)
log("words in vocab: %d\n" % appear)
log("vocab coverage: %.2f%%\n" % (1.0 * appear / total * 100))
log("total unique words: %d\n" % len(items))
if __name__ == '__main__':
main()
| 635 | 23.461538 | 66 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/__init__.py
|
# -*- coding: utf-8 -*-
| 24 | 11.5 | 23 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/loss/optimizers.py
|
# -*- coding: utf-8 -*-
import torch.nn as nn
import torch.optim as optim
class WarmAdam(object):
def __init__(self, params, lr, hidden_size, warm_up, n_step):
self.original_lr = lr
self.n_step = n_step
self.hidden_size = hidden_size
self.warm_up_step = warm_up
self.optimizer = optim.Adam(params, betas=[0.9, 0.998], eps=1e-9)
def step(self):
self.n_step += 1
warm_up = min(self.n_step ** (-0.5), self.n_step * self.warm_up_step ** (-1.5))
lr = self.original_lr * (self.hidden_size ** (-0.5) * warm_up)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.optimizer.step()
class LabelSmoothingLoss(nn.Module):
def __init__(self, label_smoothing, tgt_vocab_size, ignore_index):
self.padding_idx = ignore_index
self.label_smoothing = label_smoothing
self.vocab_size = tgt_vocab_size
super(LabelSmoothingLoss, self).__init__()
def forward(self, output, target):
target = target[:, 1:].contiguous().view(-1)
output = output.view(-1, self.vocab_size)
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -output.gather(dim=-1, index=target.view(-1, 1))[non_pad_mask].sum()
smooth_loss = -output.sum(dim=-1, keepdim=True)[non_pad_mask].sum()
eps_i = self.label_smoothing / self.vocab_size
loss = (1. - self.label_smoothing) * nll_loss + eps_i * smooth_loss
return loss / non_pad_mask.float().sum()
| 1,529 | 36.317073 | 87 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/loss/__init__.py
|
# -*- coding: utf-8 -*-
from beaver.loss.optimizers import WarmAdam, LabelSmoothingLoss
| 90 | 17.2 | 63 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/utils/saver.py
|
import json
import torch
import os
import datetime
class Saver(object):
def __init__(self, opt):
self.ckpt_names = []
self.model_path = opt.model_path + datetime.datetime.now().strftime("-%y%m%d-%H%M%S")
self.max_to_keep = opt.max_to_keep
os.mkdir(self.model_path)
with open(os.path.join(self.model_path, "params.json"), "w", encoding="UTF-8") as log:
log.write(json.dumps(vars(opt), indent=4) + "\n")
def save(self, save_dict, step, loss, loss_cn, loss_en, bleu_cn, bleu_en, rouge1_cn, rouge1_en, rouge2_cn, rouge2_en):
filename = "checkpoint-step-%06d" % step
full_filename = os.path.join(self.model_path, filename)
self.ckpt_names.append(full_filename)
torch.save(save_dict, full_filename)
with open(os.path.join(self.model_path, "log"), "a", encoding="UTF-8") as log:
log.write("%s\t" % datetime.datetime.now())
log.write("step: %6d\t" % step)
log.write("loss: %.2f\t" % loss)
log.write("loss-cn: %.2f\t" % loss_cn)
log.write("loss-en: %.2f\t" % loss_en)
log.write("bleu-cn: %3.2f\t" % bleu_cn)
log.write("bleu-en: %3.2f\t" % bleu_en)
log.write("rouge1-cn: %3.2f\t" % rouge1_cn)
log.write("rouge1-en: %3.2f\t" % rouge1_en)
log.write("rouge2-cn: %3.2f\t" % rouge2_cn)
log.write("rouge2-en: %3.2f\t" % rouge2_en)
log.write("\n")
if 0 < self.max_to_keep < len(self.ckpt_names):
earliest_ckpt = self.ckpt_names.pop(0)
os.remove(earliest_ckpt)
| 1,626 | 38.682927 | 122 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/utils/parseopt.py
|
# -*- coding: utf-8 -*-
import argparse
import json
def common_opts(parser):
parser.add_argument("-vocab", type=str, nargs="*", help="Vocab file")
parser.add_argument("-batch_size", type=int, default=8192, help="Batch size")
parser.add_argument("-beam_size", type=int, default=4, help="Beam size")
parser.add_argument("-max_length", type=int, default=200, help="Maximum prediction length")
parser.add_argument("-length_penalty", type=float, default=0.6, help="Length penalty")
parser.add_argument("-model_path", default="train", help="Path to model checkpoint file")
parser.add_argument("-tf", action="store_true", help="Use teacher forcing for decoding")
parser.add_argument("-share_cn_embedding", action="store_false", help="原文和中文摘要共享词表")
parser.add_argument("-min_length", type=int, default=1, help="Minimum prediction length")
def data_opts(parser):
parser.add_argument("-train", type=str, nargs=3, help="Training data")
parser.add_argument("-valid", type=str, nargs=3, help="Validation data")
def train_opts(parser):
parser.add_argument("-grad_accum", type=int, default=1, help="Accumulate gradients")
parser.add_argument("-max_to_keep", type=int, default=5, help="How many checkpoints to keep")
parser.add_argument("-report_every", type=int, default=1000, help="Report every n steps")
parser.add_argument("-save_every", type=int, default=2000, help="Valid and save model for every n steps")
parser.add_argument("-train_from", type=str, default=None, help="Train from checkpoint")
def model_opts(parser):
parser.add_argument("-layers", type=int, default=6, help="Number of layers")
parser.add_argument("-heads", type=int, default=8, help="Number of heads")
parser.add_argument("-hidden_size", type=int, default=512, help="Size of hidden states")
parser.add_argument("-ff_size", type=int, default=2048, help="Feed forward hidden size")
parser.add_argument("-lr", type=float, default=1.0, help="Learning rate")
parser.add_argument("-warm_up", type=int, default=8000, help="Warm up step")
parser.add_argument("-label_smoothing", type=float, default=0.1, help="Label smoothing rate")
parser.add_argument("-dropout", type=float, default=0.1, help="Dropout rate")
def translate_opts(parser):
parser.add_argument("-input", type=str, help="Translation data")
parser.add_argument("-truth", type=str, default=None, help="Truth target, used to calculate BLEU")
parser.add_argument("-output", nargs=2, default=["output1.txt", "output2.txt"], help="Path to output the predictions")
parser.add_argument("-bleu", action="store_true", help="Report BLEU")
def parse_train_args():
parser = argparse.ArgumentParser()
data_opts(parser)
train_opts(parser)
model_opts(parser)
common_opts(parser)
return parse_args(parser)
def parse_translate_args():
parser = argparse.ArgumentParser()
translate_opts(parser)
common_opts(parser)
return parse_args(parser)
def parse_args(parser):
parser.add_argument("-config", type=str, help="Config file")
opt = parser.parse_args()
if opt.config:
config = json.load(open(opt.config), object_hook=lambda d: {k: v for k, v in d.items() if k != "comment"})
parser.set_defaults(**config)
return parser.parse_args()
else:
return opt
| 3,366 | 42.166667 | 122 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/utils/rouge.py
|
# -*- coding: utf-8 -*-
def get_ngrams(n, text):
ngram_set = set()
text_length = len(text)
max_index_ngram = text_length - n
for i in range(max_index_ngram + 1):
ngram_set.add(tuple(text[i:i+n]))
return ngram_set
def rouge_n(evaluated_sentences, reference_sentences, n=2): #默认rouge_2
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
return 0
evaluated_ngrams = get_ngrams(n, evaluated_sentences)
reference_ngrams = get_ngrams(n, reference_sentences)
reference_ngrams_count = len(reference_ngrams)
if reference_ngrams_count == 0:
return 0
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_ngrams_count = len(overlapping_ngrams)
return overlapping_ngrams_count / reference_ngrams_count
def rouge_1(evaluated_sentences, reference_sentences):
evaluated_sentences = evaluated_sentences.split()
reference_sentences = reference_sentences.split()
return rouge_n(evaluated_sentences, reference_sentences, n=1)
def rouge_2(evaluated_sentences, reference_sentences):
evaluated_sentences = evaluated_sentences.split()
reference_sentences = reference_sentences.split()
return rouge_n(evaluated_sentences, reference_sentences, n=2)
def F_1(evaluated_sentences, reference_sentences, beta=1):
evaluated_sentences = evaluated_sentences.split()
reference_sentences = reference_sentences.split()
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
return 0
evaluated_ngrams = get_ngrams(beta, evaluated_sentences) # equal to retrieved set
reference_ngrams = get_ngrams(beta, reference_sentences) # equal to relevant set
evaluated_ngrams_num = len(evaluated_ngrams)
reference_ngrams_num = len(reference_ngrams)
if reference_ngrams_num == 0 or evaluated_ngrams_num == 0:
return 0
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_ngrams_num = len(overlapping_ngrams)
if overlapping_ngrams_num == 0:
return 0
return 2*overlapping_ngrams_num / (reference_ngrams_num + evaluated_ngrams_num)
| 2,152 | 35.491525 | 86 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/utils/__init__.py
|
# -*- coding: utf-8 -*-
import torch.cuda
from beaver.utils.metric import calculate_bleu, file_bleu
from beaver.utils.saver import Saver
def get_device():
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def printing_opt(opt):
return "\n".join(["%15s | %s" % (e[0], e[1]) for e in sorted(vars(opt).items(), key=lambda x: x[0])])
| 405 | 21.555556 | 105 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/utils/metric.py
|
import os
import re
import subprocess
import tempfile
from beaver.utils.rouge import F_1
def calculate_bleu(hypotheses, references, lowercase=False):
hypothesis_file = tempfile.NamedTemporaryFile(mode="w", encoding="UTF-8", delete=False)
hypothesis_file.write("\n".join(hypotheses) + "\n")
hypothesis_file.close()
reference_file = tempfile.NamedTemporaryFile(mode="w", encoding="UTF-8", delete=False)
reference_file.write("\n".join(references) + "\n")
reference_file.close()
return file_bleu(hypothesis_file.name, reference_file.name, lowercase)
def calculate_rouge(hypotheses, references):
rg1list = []
rg2list = []
for hypo, ref in zip(hypotheses, references):
rouge1 = F_1(hypo, ref, beta=1)
rouge2 = F_1(hypo, ref, beta=2)
rg1list.append(rouge1)
rg2list.append(rouge2)
rg1 = sum(rg1list) / len(rg1list)
rg2 = sum(rg2list) / len(rg2list)
return rg1 * 100, rg2 * 100
def file_bleu(hypothesis, reference, lowercase=False):
# ../../../tools/multi-bleu.perl, so take 3 levels up.
beaver_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
multi_bleu_path = os.path.join(beaver_path, "tools", "multi-bleu.perl")
with open(hypothesis, "r") as read_pred, open(os.devnull, "w") as black_hole:
bleu_cmd = ["perl", multi_bleu_path]
if lowercase:
bleu_cmd += ["-lc"]
bleu_cmd += [reference]
try:
bleu_out = subprocess.check_output(bleu_cmd, stdin=read_pred, stderr=black_hole).decode("utf-8")
bleu_score = re.search(r"BLEU = (.+?),", bleu_out).group(1)
except subprocess.CalledProcessError:
bleu_score = -1.0
return float(bleu_score)
| 1,745 | 35.375 | 108 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/data/field.py
|
# -*- coding: utf-8 -*-
from typing import List
import torch
EOS_TOKEN = "<eos>"
BOS_TOKEN = "<bos>"
UNK_TOKEN = "<unk>"
PAD_TOKEN = "<pad>"
class Field(object):
def __init__(self, bos: bool, eos: bool, pad: bool, unk: bool):
self.bos_token = BOS_TOKEN if bos else None
self.eos_token = EOS_TOKEN if eos else None
self.unk_token = UNK_TOKEN if unk else None
self.pad_token = PAD_TOKEN if pad else None
self.vocab = None
def load_vocab(self, words: List[str], specials: List[str]):
self.vocab = Vocab(words, specials)
def process(self, batch, device):
max_len = max(len(x) for x in batch)
padded, length = [], []
for x in batch:
bos = [self.bos_token] if self.bos_token else []
eos = [self.eos_token] if self.eos_token else []
pad = [self.pad_token] * (max_len - len(x))
padded.append(bos + x + eos + pad)
length.append(len(x) + len(bos) + len(eos))
padded = torch.tensor([self.encode(ex) for ex in padded])
return padded.long().to(device)
def encode(self, tokens):
ids = []
for tok in tokens:
if tok in self.vocab.stoi:
ids.append(self.vocab.stoi[tok])
else:
ids.append(self.unk_id)
return ids
def decode(self, ids):
tokens = []
for tok in ids:
tok = self.vocab.itos[tok]
if tok == self.eos_token:
break
if tok == self.bos_token:
continue
tokens.append(tok)
return " ".join(tokens).replace("@@ ", "").replace("@@", "")
@property
def special(self):
return [tok for tok in [self.unk_token, self.pad_token, self.bos_token, self.eos_token] if tok is not None]
@property
def pad_id(self):
return self.vocab.stoi[self.pad_token]
@property
def eos_id(self):
return self.vocab.stoi[self.eos_token]
@property
def bos_id(self):
return self.vocab.stoi[self.bos_token]
@property
def unk_id(self):
return self.vocab.stoi[self.unk_token]
class Vocab(object):
def __init__(self, words: List[str], specials: List[str]):
self.itos = specials + words
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
def __len__(self):
return len(self.itos)
| 2,418 | 25.582418 | 115 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/data/utils.py
|
# -*- coding: utf-8 -*-
from beaver.data.dataset import SumTransDataset
from beaver.data.field import Field
def build_dataset(opt, data_path, vocab_path, device, train=True):
source_path = data_path[0]
summary_cn_path = data_path[1]
summary_en_path = data_path[2]
source_field = Field(unk=True, pad=True, bos=False, eos=False)
summary_cn_field = Field(unk=True, pad=True, bos=True, eos=True)
summary_en_field = Field(unk=True, pad=True, bos=True, eos=True)
cn_vocab, en_vocab = vocab_path
source_special = source_field.special
summary_cn_special = summary_cn_field.special
summary_en_special = summary_en_field.special
if opt.share_cn_embedding:
summary_cn_special = source_special = sorted(set(source_special + summary_cn_special))
with open(cn_vocab, encoding="UTF-8") as f:
cn_words = [line.strip() for line in f]
with open(en_vocab, encoding="UTF-8") as f:
en_words = [line.strip() for line in f]
source_field.load_vocab(cn_words, source_special)
summary_cn_field.load_vocab(cn_words, summary_cn_special)
summary_en_field.load_vocab(en_words, summary_en_special)
return SumTransDataset(source_path, summary_cn_path, summary_en_path, opt.batch_size, device, train,
{'source': source_field, 'summary_cn': summary_cn_field, 'summary_en': summary_en_field})
| 1,387 | 37.555556 | 116 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/data/dataset.py
|
# -*- coding: utf-8 -*-
import random
from collections import namedtuple
from typing import Dict
import torch
from beaver.data.field import Field
Batch = namedtuple("Batch", ['source', 'summary_cn', 'summary_en', 'batch_size'])
Example = namedtuple("Example", ['source', 'summary_cn', 'summary_en'])
class SumTransDataset(object):
def __init__(self,
source_path: str,
summary_cn_path: str,
summary_en_path: str,
batch_size: int,
device: torch.device,
train: bool,
fields: Dict[str, Field]):
self.batch_size = batch_size
self.train = train
self.device = device
self.fields = fields
self.sort_key = lambda ex: (len(ex.source), len(ex.summary_cn), len(ex.summary_en))
examples = []
for src_line, cn_line, en_line in zip(read_file(source_path),
read_file(summary_cn_path),
read_file(summary_en_path)):
examples.append(Example(src_line, cn_line, en_line))
examples, self.seed = self.sort(examples)
self.num_examples = len(examples)
self.batches = list(batch(examples, self.batch_size))
def __iter__(self):
while True:
if self.train:
random.shuffle(self.batches)
for minibatch in self.batches:
source = self.fields["source"].process([x.source for x in minibatch], self.device)
summary_cn = self.fields["summary_cn"].process([x.summary_cn for x in minibatch], self.device)
summary_en = self.fields["summary_en"].process([x.summary_en for x in minibatch], self.device)
yield Batch(source=source, summary_cn=summary_cn, summary_en=summary_en, batch_size=len(minibatch))
if not self.train:
break
def sort(self, examples):
seed = sorted(range(len(examples)), key=lambda idx: self.sort_key(examples[idx]))
return sorted(examples, key=self.sort_key), seed
def read_file(path):
with open(path, encoding="utf-8") as f:
for line in f:
yield line.strip().split()
def batch(data, batch_size):
minibatch, cur_source_len, cur_target_len = [], 0, 0
for ex in data:
minibatch.append(ex)
cur_source_len = max(cur_source_len, len(ex.source))
cur_target_len = max(cur_target_len, len(ex.summary_en), len(ex.summary_cn))
if (cur_target_len + cur_source_len) * len(minibatch) > batch_size:
yield minibatch[:-1]
minibatch, cur_source_len, cur_target_len = [ex], len(ex.source), max(len(ex.summary_cn), len(ex.summary_en))
if minibatch:
yield minibatch
| 2,812 | 35.532468 | 121 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/data/__init__.py
|
# -*- coding: utf-8 -*-
from beaver.data.utils import build_dataset
| 69 | 16.5 | 43 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/infer/beam.py
|
# -*- coding: utf-8 -*-
import torch
class Beam(object):
def __init__(self, beam_size, pad, bos, eos, device, lp):
self.size = beam_size
self.alpha = lp
self.scores = torch.full([beam_size], -1e20).float().to(device)
self.scores[0] = 0.
self.hypotheses = torch.full([1, beam_size], fill_value=pad).long().to(device)
self.hypotheses[0][0] = bos
self.eos = eos
self.finished = []
@property
def current_state(self):
return self.hypotheses[-1]
def advance(self, scores, origin, tokens):
self.scores = scores
self.hypotheses = torch.index_select(self.hypotheses, 1, origin)
self.hypotheses = torch.cat([self.hypotheses, tokens.unsqueeze(0)])
for idx, tok in enumerate(self.hypotheses[-1]):
if tok == self.eos:
self.finished.append((self.scores[idx].clone(), self.hypotheses[1:, idx]))
self.scores[idx] = -1e20
@property
def done(self):
max_score = max([self.length_penalty(score, self.hypotheses.size(0)) for score in self.scores])
max_finish = max([self.length_penalty(t[0], t[1].size(0)) for t in self.finished]) if self.finished else -1e20
return bool(max_score < max_finish)
@property
def best_hypothesis(self):
finished = sorted(self.finished, key=lambda t: self.length_penalty(t[0], t[1].size(0)), reverse=True)
if not finished:
return self.hypotheses[1:, 0]
return finished[0][1]
def length_penalty(self, score, length):
return score * (6 ** self.alpha) / ((5 + length) ** self.alpha)
| 1,652 | 32.06 | 118 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/infer/__init__.py
|
# -*- coding: utf-8 -*-
from beaver.infer.translator import beam_search
| 74 | 14 | 47 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/infer/translator.py
|
# -*- coding: utf-8 -*-
import torch
from beaver.infer.beam import Beam
def beam_search(opt, model, src, fields):
batch_size = src.size(0)
beam_size = opt.beam_size
encoder = model.encoder
src = src.repeat(1, beam_size).view(batch_size * beam_size, -1)
src_pad = src.eq(fields["source"].pad_id)
src_out = encoder(src, src_pad)
p1 = beam_search_1(opt, batch_size, src, fields["summary_cn"], src_out, src_pad, model.cn_decoder, model.cn_generator)
p2 = beam_search_1(opt, batch_size, src, fields["summary_en"], src_out, src_pad, model.en_decoder, model.en_generator)
return p1, p2
def beam_search_1(opt, batch_size, src, field, src_out, src_pad, decoder, generator):
beam_size = opt.beam_size
device = src.device
num_words = generator.vocab_size
beams = [Beam(opt.beam_size, field.pad_id, field.bos_id, field.eos_id,
device, opt.length_penalty) for _ in range(batch_size)]
beam_expander = (torch.arange(batch_size) * beam_size).view(-1, 1).to(device)
previous = None
for i in range(opt.max_length):
if all((b.done for b in beams)):
break
# [batch_size x beam_size, 1]
current_token = torch.cat([b.current_state for b in beams]).unsqueeze(-1)
tgt_pad = current_token.eq(field.pad_id)
out, previous = decoder(current_token, src_out, src_pad, tgt_pad, previous, i)
previous_score = torch.stack([b.scores for b in beams]).unsqueeze(-1)
out = generator(out).view(batch_size, beam_size, -1)
if i < opt.min_length:
out[:, :, field.eos_id] = -1e15
# find topk candidates
scores, indexes = (out + previous_score).view(batch_size, -1).topk(beam_size)
# find origins and token
origins = (indexes.view(-1) // num_words).view(batch_size, beam_size)
tokens = (indexes.view(-1) % num_words).view(batch_size, beam_size)
for j, b in enumerate(beams):
b.advance(scores[j], origins[j], tokens[j])
origins = (origins + beam_expander).view(-1)
previous = torch.index_select(previous, 0, origins)
return [b.best_hypothesis for b in beams]
| 2,185 | 33.698413 | 122 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/model/embeddings.py
|
# -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
def positional_encoding(dim, max_len=5000):
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
return pe
class Embedding(nn.Module):
def __init__(self, embedding_dim, vocab_size, padding_idx, dropout):
self.word_padding_idx = padding_idx
self.embedding_dim = embedding_dim
pe = positional_encoding(embedding_dim)
super(Embedding, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.embedding.weight, mean=0.0, std=self.embedding_dim ** -0.5)
@property
def padding_idx(self):
return self.word_padding_idx
def forward(self, x, timestep=0):
embedding = self.embedding(x) * math.sqrt(self.embedding_dim) + self.pe[timestep:timestep + x.size(1)]
return self.dropout(embedding)
| 1,313 | 31.04878 | 110 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/model/transformer.py
|
# -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
class FeedForward(nn.Module):
def __init__(self, hidden_size, inner_size, dropout):
super(FeedForward, self).__init__()
self.linear_in = nn.Linear(hidden_size, inner_size, bias=False)
self.linear_out = nn.Linear(inner_size, hidden_size, bias=False)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear_in.weight)
nn.init.xavier_uniform_(self.linear_out.weight)
def forward(self, x):
y = self.linear_in(x)
y = self.relu(y)
y = self.dropout(y)
y = self.linear_out(y)
return y
class EncoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, head_count, ff_size):
super(EncoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(head_count, hidden_size, dropout)
self.feed_forward = FeedForward(hidden_size, ff_size, dropout)
self.dropout = nn.ModuleList([nn.Dropout(dropout) for _ in range(2)])
self.norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(2)])
def forward(self, x, mask):
# self attention
y = self.self_attn(self.norm[0](x), mask=mask)
x = x + self.dropout[0](y)
# feed forward
y = self.feed_forward(self.norm[1](x))
x = x + self.dropout[1](y)
return x
class Encoder(nn.Module):
def __init__(self, num_layers, num_heads, hidden_size, dropout, ff_size, embedding):
self.num_layers = num_layers
super(Encoder, self).__init__()
self.embedding = embedding
self.layers = nn.ModuleList([EncoderLayer(hidden_size, dropout, num_heads, ff_size) for _ in range(num_layers)])
self.norm = nn.LayerNorm(hidden_size)
def forward(self, src, src_pad):
src_mask = src_pad.unsqueeze(1)
output = self.embedding(src)
for i in range(self.num_layers):
output = self.layers[i](output, src_mask)
return self.norm(output)
class DecoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, head_count, ff_size):
super(DecoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(head_count, hidden_size, dropout)
self.src_attn = MultiHeadedAttention(head_count, hidden_size, dropout)
self.feed_forward = FeedForward(hidden_size, ff_size, dropout)
self.norm = nn.ModuleList([nn.LayerNorm(hidden_size, eps=1e-6) for _ in range(3)])
self.dropout = nn.ModuleList([nn.Dropout(dropout) for _ in range(3)])
def forward(self, x, enc_out, src_mask, tgt_mask, previous=None):
all_input = x if previous is None else torch.cat((previous, x), dim=1)
# self attention
y = self.self_attn(self.norm[0](x), self.norm[0](all_input), mask=tgt_mask)
x = x + self.dropout[0](y)
# encoder decoder attention
y = self.src_attn(self.norm[1](x), enc_out, mask=src_mask)
x = x + self.dropout[1](y)
# feed forward
y = self.feed_forward(self.norm[2](x))
x = x + self.dropout[2](y)
return x, all_input
class Decoder(nn.Module):
def __init__(self, num_layers, num_heads, hidden_size, dropout, ff_size, embedding):
self.num_layers = num_layers
super(Decoder, self).__init__()
self.embedding = embedding
self.layers = nn.ModuleList([DecoderLayer(hidden_size, dropout, num_heads, ff_size) for _ in range(num_layers)])
self.register_buffer("upper_triangle", torch.triu(torch.ones(1000, 1000), diagonal=1).byte())
self.register_buffer("zero_mask", torch.zeros(1).byte())
self.norm = nn.LayerNorm(hidden_size, eps=1e-6)
def forward(self, tgt, enc_out, src_pad, tgt_pad, previous=None, timestep=0):
output = self.embedding(tgt, timestep)
tgt_len = tgt.size(1)
src_mask = src_pad.unsqueeze(1)
tgt_mask = tgt_pad.unsqueeze(1)
upper_triangle = self.upper_triangle[:tgt_len, :tgt_len]
# tgt mask: 0 if not upper and not pad
tgt_mask = torch.gt(tgt_mask + upper_triangle, 0)
saved_inputs = []
for i in range(self.num_layers):
prev_layer = None if previous is None else previous[:, i]
tgt_mask = tgt_mask if previous is None else self.zero_mask
output, all_input = self.layers[i](output, enc_out, src_mask, tgt_mask, prev_layer)
saved_inputs.append(all_input)
return self.norm(output), torch.stack(saved_inputs, dim=1)
class MultiHeadedAttention(nn.Module):
def __init__(self, head_count, model_dim, dropout):
self.dim_per_head = model_dim // head_count
self.head_count = head_count
super(MultiHeadedAttention, self).__init__()
self.linear_q = nn.Linear(model_dim, model_dim, bias=False)
self.linear_k = nn.Linear(model_dim, model_dim, bias=False)
self.linear_v = nn.Linear(model_dim, model_dim, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.final_linear = nn.Linear(model_dim, model_dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear_q.weight)
nn.init.xavier_uniform_(self.linear_k.weight)
nn.init.xavier_uniform_(self.linear_v.weight)
nn.init.xavier_uniform_(self.final_linear.weight)
def forward(self, query, memory=None, mask=None):
memory = query if memory is None else memory
def split_head(x):
# B x L x D => B x h x L x d
return x.view(x.size(0), -1, self.head_count, self.dim_per_head).transpose(1, 2)
def combine_head(x):
# B x h x L x d => B x L x D
return x.transpose(1, 2).contiguous().view(x.size(0), -1, self.head_count * self.dim_per_head)
# 1) Project q, k, v.
q = split_head(self.linear_q(query))
k = split_head(self.linear_k(memory))
v = split_head(self.linear_v(memory))
# 2) Calculate and scale scores.
q = q / math.sqrt(self.dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3))
mask = mask.unsqueeze(1).expand_as(scores)
scores.masked_fill_(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
weights = self.dropout(self.softmax(scores))
context = combine_head(torch.matmul(weights, v))
return self.final_linear(context)
| 6,591 | 35.622222 | 120 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/model/__init__.py
|
# -*- coding: utf-8 -*-
from beaver.model.nmt_model import NMTModel
| 70 | 13.2 | 43 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-2task/beaver/model/nmt_model.py
|
# -*- coding: utf-8 -*-
from typing import Dict
import torch
import torch.nn as nn
from beaver.model.embeddings import Embedding
from beaver.model.transformer import Decoder, Encoder
class Generator(nn.Module):
def __init__(self, hidden_size: int, tgt_vocab_size: int):
self.vocab_size = tgt_vocab_size
super(Generator, self).__init__()
self.linear_hidden = nn.Linear(hidden_size, tgt_vocab_size)
self.lsm = nn.LogSoftmax(dim=-1)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear_hidden.weight)
def forward(self, dec_out):
score = self.linear_hidden(dec_out)
lsm_score = self.lsm(score)
return lsm_score
class NMTModel(nn.Module):
def __init__(self, encoder: Encoder,
cn_decoder: Decoder,
en_decoder: Decoder,
cn_generator: Generator,
en_generator: Generator):
super(NMTModel, self).__init__()
self.encoder = encoder
self.cn_decoder = cn_decoder
self.en_decoder = en_decoder
self.cn_generator = cn_generator
self.en_generator = en_generator
def forward(self, source, summary_cn, summary_en):
summary_cn = summary_cn[:, :-1] # shift left
summary_en = summary_en[:, :-1] # shift left
source_pad = source.eq(self.encoder.embedding.word_padding_idx)
summary_cn_pad = summary_cn.eq(self.cn_decoder.embedding.word_padding_idx)
summary_en_pad = summary_en.eq(self.en_decoder.embedding.word_padding_idx)
enc_out = self.encoder(source, source_pad)
cn_decoder_outputs, _ = self.cn_decoder(summary_cn, enc_out, source_pad, summary_cn_pad)
en_decoder_outputs, _ = self.en_decoder(summary_en, enc_out, source_pad, summary_en_pad)
cn_scores = self.cn_generator(cn_decoder_outputs)
en_scores = self.en_generator(en_decoder_outputs)
return cn_scores, en_scores
@classmethod
def load_model(cls, model_opt,
pad_ids: Dict[str, int],
vocab_sizes: Dict[str, int],
checkpoint=None):
source_embedding = Embedding(embedding_dim=model_opt.hidden_size,
dropout=model_opt.dropout,
padding_idx=pad_ids["source"],
vocab_size=vocab_sizes["source"])
summary_en_embedding = Embedding(embedding_dim=model_opt.hidden_size,
dropout=model_opt.dropout,
padding_idx=pad_ids["summary_en"],
vocab_size=vocab_sizes["summary_en"])
if model_opt.share_cn_embedding:
summary_cn_embedding = source_embedding
else:
summary_cn_embedding = Embedding(embedding_dim=model_opt.hidden_size,
dropout=model_opt.dropout,
padding_idx=pad_ids["summary_cn"],
vocab_size=vocab_sizes["summary_cn"])
encoder = Encoder(model_opt.layers,
model_opt.heads,
model_opt.hidden_size,
model_opt.dropout,
model_opt.ff_size,
source_embedding)
cn_decoder = Decoder(model_opt.layers,
model_opt.heads,
model_opt.hidden_size,
model_opt.dropout,
model_opt.ff_size,
summary_cn_embedding)
en_decoder = Decoder(model_opt.layers,
model_opt.heads,
model_opt.hidden_size,
model_opt.dropout,
model_opt.ff_size,
summary_en_embedding)
cn_generator = Generator(model_opt.hidden_size, vocab_sizes["summary_cn"])
en_generator = Generator(model_opt.hidden_size, vocab_sizes["summary_en"])
model = cls(encoder, cn_decoder, en_decoder, cn_generator, en_generator)
if checkpoint is None and model_opt.train_from:
checkpoint = torch.load(model_opt.train_from, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["model"])
elif checkpoint is not None:
model.load_state_dict(checkpoint)
return model
| 4,603 | 39.743363 | 100 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/translate.py
|
# -*- coding: utf-8 -*-
import logging
import torch
import os
from beaver.data import build_dataset
from beaver.infer import beam_search
from beaver.model import NMTModel
from beaver.utils import parseopt, get_device, calculate_bleu
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
opt = parseopt.parse_translate_args()
device = get_device()
def translate(dataset, fields, model):
already, hypothesis, references = 0, [], []
for batch in dataset:
if opt.tf:
scores = model(batch.src, batch.tgt)
_, predictions = scores.topk(k=1, dim=-1)
else:
predictions = beam_search(opt, model, batch.src, fields)
hypothesis += [fields["tgt"].decode(p) for p in predictions]
already += len(predictions)
logging.info("Translated: %7d/%7d" % (already, dataset.num_examples))
references += [fields["tgt"].decode(t) for t in batch.tgt]
if opt.bleu:
bleu = calculate_bleu(hypothesis, references)
logging.info("BLEU: %3.2f" % bleu)
origin = sorted(zip(hypothesis, dataset.seed), key=lambda t: t[1])
hypothesis = [h for h, _ in origin]
with open(opt.output, "w", encoding="UTF-8") as out_file:
out_file.write("\n".join(hypothesis))
out_file.write("\n")
logging.info("Translation finished. ")
def main():
logging.info("Build dataset...")
dataset = build_dataset(opt, [opt.input, opt.truth or opt.input], opt.vocab, device, train=False)
fields = dataset.fields
pad_ids = {"src": fields["src"].pad_id, "tgt": fields["tgt"].pad_id}
vocab_sizes = {"src": len(fields["src"].vocab), "tgt": len(fields["tgt"].vocab)}
# load checkpoint from model_path
logging.info("Load checkpoint from %s." % opt.model_path)
checkpoint = torch.load(opt.model_path, map_location=lambda storage, loc: storage)
logging.info("Build model...")
model = NMTModel.load_model(checkpoint["opt"], pad_ids, vocab_sizes, checkpoint["model"]).to(device).eval()
logging.info("Start translation...")
with torch.set_grad_enabled(False):
translate(dataset, fields, model)
if __name__ == '__main__':
main()
| 2,194 | 30.811594 | 111 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/train.py
|
# -*- coding: utf-8 -*-
import logging
import torch
import torch.cuda
from beaver.data import build_dataset
from beaver.infer import beam_search
from beaver.loss import WarmAdam, LabelSmoothingLoss
from beaver.model import NMTModel
from beaver.utils import Saver
from beaver.utils import calculate_bleu
from beaver.utils import parseopt, get_device, printing_opt
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
opt = parseopt.parse_train_args()
device = get_device()
logging.info("\n" + printing_opt(opt))
saver = Saver(opt)
def valid(model, criterion, valid_dataset, step):
model.eval()
total_loss, total = 0.0, 0
hypothesis, references = [], []
for batch in valid_dataset:
scores = model(batch.src, batch.tgt)
loss = criterion(scores, batch.tgt)
total_loss += loss.data
total += 1
if opt.tf:
_, predictions = scores.topk(k=1, dim=-1)
else:
predictions = beam_search(opt, model, batch.src, valid_dataset.fields)
hypothesis += [valid_dataset.fields["tgt"].decode(p) for p in predictions]
references += [valid_dataset.fields["tgt"].decode(t) for t in batch.tgt]
bleu = calculate_bleu(hypothesis, references)
logging.info("Valid loss: %.2f\tValid BLEU: %3.2f" % (total_loss / total, bleu))
checkpoint = {"model": model.state_dict(), "opt": opt}
saver.save(checkpoint, step, bleu, total_loss / total)
def train(model, criterion, optimizer, train_dataset, valid_dataset):
total_loss = 0.0
model.zero_grad()
for i, batch in enumerate(train_dataset):
scores = model(batch.src, batch.tgt)
loss = criterion(scores, batch.tgt)
loss.backward()
total_loss += loss.data
if (i + 1) % opt.grad_accum == 0:
optimizer.step()
model.zero_grad()
if optimizer.n_step % opt.report_every == 0:
mean_loss = total_loss / opt.report_every / opt.grad_accum
logging.info("step: %7d\t loss: %7f" % (optimizer.n_step, mean_loss))
total_loss = 0.0
if optimizer.n_step % opt.save_every == 0:
with torch.set_grad_enabled(False):
valid(model, criterion, valid_dataset, optimizer.n_step)
model.train()
del loss
def main():
logging.info("Build dataset...")
train_dataset = build_dataset(opt, opt.train, opt.vocab, device, train=True)
valid_dataset = build_dataset(opt, opt.valid, opt.vocab, device, train=False)
fields = valid_dataset.fields = train_dataset.fields
logging.info("Build model...")
pad_ids = {"src": fields["src"].pad_id, "tgt": fields["tgt"].pad_id}
vocab_sizes = {"src": len(fields["src"].vocab), "tgt": len(fields["tgt"].vocab)}
model = NMTModel.load_model(opt, pad_ids, vocab_sizes).to(device)
criterion = LabelSmoothingLoss(opt.label_smoothing, vocab_sizes["tgt"], pad_ids["tgt"]).to(device)
n_step = int(opt.train_from.split("-")[-1]) if opt.train_from else 1
optimizer = WarmAdam(model.parameters(), opt.lr, opt.hidden_size, opt.warm_up, n_step)
logging.info("start training...")
train(model, criterion, optimizer, train_dataset, valid_dataset)
if __name__ == '__main__':
main()
| 3,303 | 32.714286 | 102 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/tools/model_average.py
|
# -*- coding: utf-8 -*-
import os
import torch
import sys
def main():
if len(sys.argv) != 3:
print("python model_average.py model_path n")
exit()
model_path = sys.argv[1]
n = int(sys.argv[2]) # last n model to be averaged
fs = [os.path.join(model_path, f) for f in os.listdir(model_path) if f.startswith("checkpoint")]
fs = sorted(fs, reverse=True)[:n] # last n file
n = len(fs) # actual file count
cks = [torch.load(f, map_location=lambda storage, loc: storage) for f in fs]
first_model = cks[0]["model"] # average all weights into first model and save it
for k, _ in first_model.items():
for ck in cks[1:]:
first_model[k] = (first_model[k] + ck["model"][k])
first_model[k] = first_model[k] / n
torch.save(cks[0], os.path.join(model_path, "averaged-%s-%s" % (fs[-1].split("-")[-1], fs[0].split("-")[-1])))
if __name__ == '__main__':
main()
| 941 | 29.387097 | 114 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/tools/build_vocab.py
|
# -*- coding: utf-8 -*-
import sys
import collections
log = sys.stderr.write
def main():
size = int(sys.argv[1])
counter = collections.Counter()
for line in sys.stdin:
counter.update(line.strip().split())
items = counter.most_common()
for word, _ in items[:size]:
print(word)
total = sum([c for _, c in items])
appear = sum([c for _, c in items[:size]])
log("total words: %d\n" % total)
log("words in vocab: %d\n" % appear)
log("vocab coverage: %.2f%%\n" % (1.0 * appear / total * 100))
log("total unique words: %d\n" % len(items))
if __name__ == '__main__':
main()
| 635 | 23.461538 | 66 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/__init__.py
|
# -*- coding: utf-8 -*-
| 24 | 11.5 | 23 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/loss/optimizers.py
|
# -*- coding: utf-8 -*-
import torch.nn as nn
import torch.optim as optim
class WarmAdam(object):
def __init__(self, params, lr, hidden_size, warm_up, n_step):
self.original_lr = lr
self.n_step = n_step
self.hidden_size = hidden_size
self.warm_up_step = warm_up
self.optimizer = optim.Adam(params, betas=[0.9, 0.998], eps=1e-9)
def step(self):
self.n_step += 1
warm_up = min(self.n_step ** (-0.5), self.n_step * self.warm_up_step ** (-1.5))
lr = self.original_lr * (self.hidden_size ** (-0.5) * warm_up)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.optimizer.step()
class LabelSmoothingLoss(nn.Module):
def __init__(self, label_smoothing, tgt_vocab_size, ignore_index):
self.padding_idx = ignore_index
self.label_smoothing = label_smoothing
self.vocab_size = tgt_vocab_size
super(LabelSmoothingLoss, self).__init__()
def forward(self, output, target):
target = target[:, 1:].contiguous().view(-1)
output = output.view(-1, self.vocab_size)
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -output.gather(dim=-1, index=target.view(-1, 1))[non_pad_mask].sum()
smooth_loss = -output.sum(dim=-1, keepdim=True)[non_pad_mask].sum()
eps_i = self.label_smoothing / self.vocab_size
loss = (1. - self.label_smoothing) * nll_loss + eps_i * smooth_loss
return loss / non_pad_mask.float().sum()
| 1,529 | 36.317073 | 87 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/loss/__init__.py
|
# -*- coding: utf-8 -*-
from beaver.loss.optimizers import WarmAdam, LabelSmoothingLoss
| 90 | 17.2 | 63 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/utils/saver.py
|
import json
import torch
import os
import datetime
class Saver(object):
def __init__(self, opt):
self.ckpt_names = []
self.model_path = opt.model_path + datetime.datetime.now().strftime("-%y%m%d-%H%M%S")
self.max_to_keep = opt.max_to_keep
os.mkdir(self.model_path)
with open(os.path.join(self.model_path, "params.json"), "w", encoding="UTF-8") as log:
log.write(json.dumps(vars(opt), indent=4) + "\n")
def save(self, save_dict, step, bleu, loss):
filename = "checkpoint-step-%06d" % step
full_filename = os.path.join(self.model_path, filename)
self.ckpt_names.append(full_filename)
torch.save(save_dict, full_filename)
with open(os.path.join(self.model_path, "log"), "a", encoding="UTF-8") as log:
log.write("%s\t step: %6d\t loss: %.2f\t bleu: %.2f\n" % (datetime.datetime.now(), step, loss, bleu))
if 0 < self.max_to_keep < len(self.ckpt_names):
earliest_ckpt = self.ckpt_names.pop(0)
os.remove(earliest_ckpt)
| 1,063 | 34.466667 | 113 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/utils/parseopt.py
|
# -*- coding: utf-8 -*-
import argparse
import json
def common_opts(parser):
parser.add_argument("-vocab", type=str, nargs="*", help="Vocab file")
parser.add_argument("-batch_size", type=int, default=8192, help="Batch size")
parser.add_argument("-beam_size", type=int, default=4, help="Beam size")
parser.add_argument("-max_length", type=int, default=200, help="Maximum prediction length")
parser.add_argument("-min_length", type=int, default=1, help="Minimum prediction length")
parser.add_argument("-length_penalty", type=float, default=0.6, help="Length penalty")
parser.add_argument("-model_path", default="train", help="Path to model checkpoint file")
parser.add_argument("-tf", action="store_true", help="Use teacher forcing for decoding")
def data_opts(parser):
parser.add_argument("-train", type=str, nargs=2, help="Training data")
parser.add_argument("-valid", type=str, nargs=2, help="Validation data")
def train_opts(parser):
parser.add_argument("-grad_accum", type=int, default=1, help="Accumulate gradients")
parser.add_argument("-max_to_keep", type=int, default=5, help="How many checkpoints to keep")
parser.add_argument("-report_every", type=int, default=1000, help="Report every n steps")
parser.add_argument("-save_every", type=int, default=2000, help="Valid and save model for every n steps")
parser.add_argument("-train_from", type=str, default=None, help="Train from checkpoint")
def model_opts(parser):
parser.add_argument("-layers", type=int, default=6, help="Number of layers")
parser.add_argument("-heads", type=int, default=8, help="Number of heads")
parser.add_argument("-hidden_size", type=int, default=512, help="Size of hidden states")
parser.add_argument("-ff_size", type=int, default=2048, help="Feed forward hidden size")
parser.add_argument("-lr", type=float, default=1.0, help="Learning rate")
parser.add_argument("-warm_up", type=int, default=8000, help="Warm up step")
parser.add_argument("-label_smoothing", type=float, default=0.1, help="Label smoothing rate")
parser.add_argument("-dropout", type=float, default=0.1, help="Dropout rate")
def translate_opts(parser):
parser.add_argument("-input", type=str, help="Translation data")
parser.add_argument("-truth", type=str, default=None, help="Truth target, used to calculate BLEU")
parser.add_argument("-output", default="output.txt", help="Path to output the predictions")
parser.add_argument("-bleu", action="store_true", help="Report BLEU")
def parse_train_args():
parser = argparse.ArgumentParser()
data_opts(parser)
train_opts(parser)
model_opts(parser)
common_opts(parser)
return parse_args(parser)
def parse_translate_args():
parser = argparse.ArgumentParser()
translate_opts(parser)
common_opts(parser)
return parse_args(parser)
def parse_args(parser):
parser.add_argument("-config", type=str, help="Config file")
opt = parser.parse_args()
if opt.config:
config = json.load(open(opt.config), object_hook=lambda d: {k: v for k, v in d.items() if k != "comment"})
parser.set_defaults(**config)
return parser.parse_args()
else:
return opt
| 3,249 | 41.763158 | 114 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/utils/__init__.py
|
# -*- coding: utf-8 -*-
import torch.cuda
from beaver.utils.metric import calculate_bleu, file_bleu
from beaver.utils.saver import Saver
def get_device():
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def printing_opt(opt):
return "\n".join(["%15s | %s" % (e[0], e[1]) for e in sorted(vars(opt).items(), key=lambda x: x[0])])
| 405 | 21.555556 | 105 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/utils/metric.py
|
import os
import re
import subprocess
import tempfile
def calculate_bleu(hypotheses, references, lowercase=False):
hypothesis_file = tempfile.NamedTemporaryFile(mode="w", encoding="UTF-8", delete=False)
hypothesis_file.write("\n".join(hypotheses) + "\n")
hypothesis_file.close()
reference_file = tempfile.NamedTemporaryFile(mode="w", encoding="UTF-8", delete=False)
reference_file.write("\n".join(references) + "\n")
reference_file.close()
return file_bleu(hypothesis_file.name, reference_file.name, lowercase)
def file_bleu(hypothesis, reference, lowercase=False):
# ../../../tools/multi-bleu.perl, so take 3 levels up.
beaver_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
multi_bleu_path = os.path.join(beaver_path, "tools", "multi-bleu.perl")
with open(hypothesis, "r") as read_pred, open(os.devnull, "w") as black_hole:
bleu_cmd = ["perl", multi_bleu_path]
if lowercase:
bleu_cmd += ["-lc"]
bleu_cmd += [reference]
try:
bleu_out = subprocess.check_output(bleu_cmd, stdin=read_pred, stderr=black_hole).decode("utf-8")
bleu_score = re.search(r"BLEU = (.+?),", bleu_out).group(1)
except subprocess.CalledProcessError:
bleu_score = -1.0
return float(bleu_score)
| 1,328 | 39.272727 | 108 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/data/field.py
|
# -*- coding: utf-8 -*-
from typing import List
import torch
EOS_TOKEN = "<eos>"
BOS_TOKEN = "<bos>"
UNK_TOKEN = "<unk>"
PAD_TOKEN = "<pad>"
class Field(object):
def __init__(self, bos: bool, eos: bool, pad: bool, unk: bool):
self.bos_token = BOS_TOKEN if bos else None
self.eos_token = EOS_TOKEN if eos else None
self.unk_token = UNK_TOKEN if unk else None
self.pad_token = PAD_TOKEN if pad else None
self.vocab = None
def load_vocab(self, words: List[str], specials: List[str]):
self.vocab = Vocab(words, specials)
def process(self, batch, device):
max_len = max(len(x) for x in batch)
padded, length = [], []
for x in batch:
bos = [self.bos_token] if self.bos_token else []
eos = [self.eos_token] if self.eos_token else []
pad = [self.pad_token] * (max_len - len(x))
padded.append(bos + x + eos + pad)
length.append(len(x) + len(bos) + len(eos))
padded = torch.tensor([self.encode(ex) for ex in padded])
return padded.long().to(device)
def encode(self, tokens):
ids = []
for tok in tokens:
if tok in self.vocab.stoi:
ids.append(self.vocab.stoi[tok])
else:
ids.append(self.unk_id)
return ids
def decode(self, ids):
tokens = []
for tok in ids:
tok = self.vocab.itos[tok]
if tok == self.eos_token:
break
if tok == self.bos_token:
continue
tokens.append(tok)
# 删除BPE符号,按照T2T切分-。
return " ".join(tokens).replace("@@ ", "").replace("@@", "").replace("-", " - ")
@property
def special(self):
return [tok for tok in [self.unk_token, self.pad_token, self.bos_token, self.eos_token] if tok is not None]
@property
def pad_id(self):
return self.vocab.stoi[self.pad_token]
@property
def eos_id(self):
return self.vocab.stoi[self.eos_token]
@property
def bos_id(self):
return self.vocab.stoi[self.bos_token]
@property
def unk_id(self):
return self.vocab.stoi[self.unk_token]
class Vocab(object):
def __init__(self, words: List[str], specials: List[str]):
self.itos = specials + words
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
def __len__(self):
return len(self.itos)
| 2,466 | 25.815217 | 115 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/data/utils.py
|
# -*- coding: utf-8 -*-
from beaver.data.dataset import TranslationDataset
from beaver.data.field import Field
def build_dataset(opt, data_path, vocab_path, device, train=True):
src = data_path[0]
tgt = data_path[1]
src_field = Field(unk=True, pad=True, bos=False, eos=False)
tgt_field = Field(unk=True, pad=True, bos=True, eos=True)
if len(vocab_path) == 1:
# use shared vocab
src_vocab = tgt_vocab = vocab_path[0]
src_special = tgt_special = sorted(set(src_field.special + tgt_field.special))
else:
src_vocab, tgt_vocab = vocab_path
src_special = src_field.special
tgt_special = tgt_field.special
with open(src_vocab, encoding="UTF-8") as f:
src_words = [line.strip() for line in f]
with open(tgt_vocab, encoding="UTF-8") as f:
tgt_words = [line.strip() for line in f]
src_field.load_vocab(src_words, src_special)
tgt_field.load_vocab(tgt_words, tgt_special)
return TranslationDataset(src, tgt, opt.batch_size, device, train, {'src': src_field, 'tgt': tgt_field})
| 1,083 | 31.848485 | 108 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/data/dataset.py
|
# -*- coding: utf-8 -*-
import random
from collections import namedtuple
from typing import Dict
import torch
from beaver.data.field import Field
Batch = namedtuple("Batch", ['src', 'tgt', 'batch_size'])
Example = namedtuple("Example", ['src', 'tgt'])
class TranslationDataset(object):
def __init__(self,
src_path: str,
tgt_path: str,
batch_size: int,
device: torch.device,
train: bool,
fields: Dict[str, Field]):
self.batch_size = batch_size
self.train = train
self.device = device
self.fields = fields
self.sort_key = lambda ex: (len(ex.src), len(ex.tgt))
examples = []
for src_line, tgt_line in zip(read_file(src_path), read_file(tgt_path)):
examples.append(Example(src_line, tgt_line))
examples, self.seed = self.sort(examples)
self.num_examples = len(examples)
self.batches = list(batch(examples, self.batch_size))
def __iter__(self):
while True:
if self.train:
random.shuffle(self.batches)
for minibatch in self.batches:
src = self.fields["src"].process([x.src for x in minibatch], self.device)
tgt = self.fields["tgt"].process([x.tgt for x in minibatch], self.device)
yield Batch(src=src, tgt=tgt, batch_size=len(minibatch))
if not self.train:
break
def sort(self, examples):
seed = sorted(range(len(examples)), key=lambda idx: self.sort_key(examples[idx]))
return sorted(examples, key=self.sort_key), seed
def read_file(path):
with open(path, encoding="utf-8") as f:
for line in f:
yield line.strip().split()
def batch(data, batch_size):
minibatch, cur_len = [], 0
for ex in data:
minibatch.append(ex)
cur_len = max(cur_len, len(ex.src), len(ex.tgt))
if cur_len * len(minibatch) > batch_size:
yield minibatch[:-1]
minibatch, cur_len = [ex], max(len(ex.src), len(ex.tgt))
if minibatch:
yield minibatch
| 2,164 | 29.069444 | 89 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/data/__init__.py
|
# -*- coding: utf-8 -*-
from beaver.data.utils import build_dataset
| 69 | 16.5 | 43 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/infer/beam.py
|
# -*- coding: utf-8 -*-
import torch
class Beam(object):
def __init__(self, beam_size, pad, bos, eos, device, lp):
self.size = beam_size
self.alpha = lp
self.scores = torch.full([beam_size], -1e20).float().to(device)
self.scores[0] = 0.
self.hypotheses = torch.full([1, beam_size], fill_value=pad).long().to(device)
self.hypotheses[0][0] = bos
self.eos = eos
self.finished = []
@property
def current_state(self):
return self.hypotheses[-1]
def advance(self, scores, origin, tokens):
self.scores = scores
self.hypotheses = torch.index_select(self.hypotheses, 1, origin)
self.hypotheses = torch.cat([self.hypotheses, tokens.unsqueeze(0)])
for idx, tok in enumerate(self.hypotheses[-1]):
if tok == self.eos:
self.finished.append((self.scores[idx].clone(), self.hypotheses[1:, idx]))
self.scores[idx] = -1e20
@property
def done(self):
max_score = max([self.length_penalty(score, self.hypotheses.size(0)) for score in self.scores])
max_finish = max([self.length_penalty(t[0], t[1].size(0)) for t in self.finished]) if self.finished else -1e20
return bool(max_score < max_finish)
@property
def best_hypothesis(self):
finished = sorted(self.finished, key=lambda t: self.length_penalty(t[0], t[1].size(0)), reverse=True)
if not finished:
return self.hypotheses[1:, 0]
return finished[0][1]
def length_penalty(self, score, length):
return score * (6 ** self.alpha) / ((5 + length) ** self.alpha)
| 1,652 | 32.06 | 118 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/infer/__init__.py
|
# -*- coding: utf-8 -*-
from beaver.infer.translator import beam_search
| 74 | 14 | 47 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/infer/translator.py
|
# -*- coding: utf-8 -*-
import torch
from beaver.infer.beam import Beam
def beam_search(opt, model, src, fields):
batch_size = src.size(0)
beam_size = opt.beam_size
device = src.device
num_words = model.generator.vocab_size
encoder = model.encoder
decoder = model.decoder
generator = model.generator
beams = [Beam(opt.beam_size, fields["tgt"].pad_id, fields["tgt"].bos_id, fields["tgt"].eos_id,
device, opt.length_penalty) for _ in range(batch_size)]
src = src.repeat(1, beam_size).view(batch_size*beam_size, -1)
src_pad = src.eq(fields["src"].pad_id)
src_out = encoder(src, src_pad)
beam_expander = (torch.arange(batch_size) * beam_size).view(-1, 1).to(device)
previous = None
for i in range(opt.max_length):
if all((b.done for b in beams)):
break
# [batch_size x beam_size, 1]
current_token = torch.cat([b.current_state for b in beams]).unsqueeze(-1)
tgt_pad = current_token.eq(fields["tgt"].pad_id)
out, previous = decoder(current_token, src_out, src_pad, tgt_pad, previous, i)
previous_score = torch.stack([b.scores for b in beams]).unsqueeze(-1)
out = generator(out).view(batch_size, beam_size, -1)
if i < opt.min_length:
out[:, :, fields["tgt"].eos_id] = -1e15
# find topk candidates
scores, indexes = (out + previous_score).view(batch_size, -1).topk(beam_size)
# find origins and token
origins = (indexes.view(-1) // num_words).view(batch_size, beam_size)
tokens = (indexes.view(-1) % num_words).view(batch_size, beam_size)
for j, b in enumerate(beams):
b.advance(scores[j], origins[j], tokens[j])
origins = (origins + beam_expander).view(-1)
previous = torch.index_select(previous, 0, origins)
return [b.best_hypothesis for b in beams]
| 1,903 | 32.403509 | 98 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/model/embeddings.py
|
# -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
def positional_encoding(dim, max_len=5000):
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
return pe
class Embedding(nn.Module):
def __init__(self, embedding_dim, vocab_size, padding_idx, dropout):
self.word_padding_idx = padding_idx
self.embedding_dim = embedding_dim
pe = positional_encoding(embedding_dim)
super(Embedding, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.embedding.weight, mean=0.0, std=self.embedding_dim ** -0.5)
@property
def padding_idx(self):
return self.word_padding_idx
def forward(self, x, timestep=0):
embedding = self.embedding(x) * math.sqrt(self.embedding_dim) + self.pe[timestep:timestep + x.size(1)]
return self.dropout(embedding)
| 1,313 | 31.04878 | 110 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/model/transformer.py
|
# -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
class FeedForward(nn.Module):
def __init__(self, hidden_size, inner_size, dropout):
super(FeedForward, self).__init__()
self.linear_in = nn.Linear(hidden_size, inner_size, bias=False)
self.linear_out = nn.Linear(inner_size, hidden_size, bias=False)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear_in.weight)
nn.init.xavier_uniform_(self.linear_out.weight)
def forward(self, x):
y = self.linear_in(x)
y = self.relu(y)
y = self.dropout(y)
y = self.linear_out(y)
return y
class EncoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, head_count, ff_size):
super(EncoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(head_count, hidden_size, dropout)
self.feed_forward = FeedForward(hidden_size, ff_size, dropout)
self.dropout = nn.ModuleList([nn.Dropout(dropout) for _ in range(2)])
self.norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(2)])
def forward(self, x, mask):
# self attention
y = self.self_attn(self.norm[0](x), mask=mask)
x = x + self.dropout[0](y)
# feed forward
y = self.feed_forward(self.norm[1](x))
x = x + self.dropout[1](y)
return x
class Encoder(nn.Module):
def __init__(self, num_layers, num_heads, hidden_size, dropout, ff_size, embedding):
self.num_layers = num_layers
super(Encoder, self).__init__()
self.embedding = embedding
self.layers = nn.ModuleList([EncoderLayer(hidden_size, dropout, num_heads, ff_size) for _ in range(num_layers)])
self.norm = nn.LayerNorm(hidden_size)
def forward(self, src, src_pad):
src_mask = src_pad.unsqueeze(1)
output = self.embedding(src)
for i in range(self.num_layers):
output = self.layers[i](output, src_mask)
return self.norm(output)
class DecoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, head_count, ff_size):
super(DecoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(head_count, hidden_size, dropout)
self.src_attn = MultiHeadedAttention(head_count, hidden_size, dropout)
self.feed_forward = FeedForward(hidden_size, ff_size, dropout)
self.norm = nn.ModuleList([nn.LayerNorm(hidden_size, eps=1e-6) for _ in range(3)])
self.dropout = nn.ModuleList([nn.Dropout(dropout) for _ in range(3)])
def forward(self, x, enc_out, src_mask, tgt_mask, previous=None):
all_input = x if previous is None else torch.cat((previous, x), dim=1)
# self attention
y = self.self_attn(self.norm[0](x), self.norm[0](all_input), mask=tgt_mask)
x = x + self.dropout[0](y)
# encoder decoder attention
y = self.src_attn(self.norm[1](x), enc_out, mask=src_mask)
x = x + self.dropout[1](y)
# feed forward
y = self.feed_forward(self.norm[2](x))
x = x + self.dropout[2](y)
return x, all_input
class Decoder(nn.Module):
def __init__(self, num_layers, num_heads, hidden_size, dropout, ff_size, embedding):
self.num_layers = num_layers
super(Decoder, self).__init__()
self.embedding = embedding
self.layers = nn.ModuleList([DecoderLayer(hidden_size, dropout, num_heads, ff_size) for _ in range(num_layers)])
self.register_buffer("upper_triangle", torch.triu(torch.ones(1000, 1000), diagonal=1).byte())
self.register_buffer("zero_mask", torch.zeros(1).byte())
self.norm = nn.LayerNorm(hidden_size, eps=1e-6)
def forward(self, tgt, enc_out, src_pad, tgt_pad, previous=None, timestep=0):
output = self.embedding(tgt, timestep)
tgt_len = tgt.size(1)
src_mask = src_pad.unsqueeze(1)
tgt_mask = tgt_pad.unsqueeze(1)
upper_triangle = self.upper_triangle[:tgt_len, :tgt_len]
# tgt mask: 0 if not upper and not pad
tgt_mask = torch.gt(tgt_mask + upper_triangle, 0)
saved_inputs = []
for i in range(self.num_layers):
prev_layer = None if previous is None else previous[:, i]
tgt_mask = tgt_mask if previous is None else self.zero_mask
output, all_input = self.layers[i](output, enc_out, src_mask, tgt_mask, prev_layer)
saved_inputs.append(all_input)
return self.norm(output), torch.stack(saved_inputs, dim=1)
class MultiHeadedAttention(nn.Module):
def __init__(self, head_count, model_dim, dropout):
self.dim_per_head = model_dim // head_count
self.head_count = head_count
super(MultiHeadedAttention, self).__init__()
self.linear_q = nn.Linear(model_dim, model_dim, bias=False)
self.linear_k = nn.Linear(model_dim, model_dim, bias=False)
self.linear_v = nn.Linear(model_dim, model_dim, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.final_linear = nn.Linear(model_dim, model_dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear_q.weight)
nn.init.xavier_uniform_(self.linear_k.weight)
nn.init.xavier_uniform_(self.linear_v.weight)
nn.init.xavier_uniform_(self.final_linear.weight)
def forward(self, query, memory=None, mask=None):
memory = query if memory is None else memory
def split_head(x):
# B x L x D => B x h x L x d
return x.view(x.size(0), -1, self.head_count, self.dim_per_head).transpose(1, 2)
def combine_head(x):
# B x h x L x d => B x L x D
return x.transpose(1, 2).contiguous().view(x.size(0), -1, self.head_count * self.dim_per_head)
# 1) Project q, k, v.
q = split_head(self.linear_q(query))
k = split_head(self.linear_k(memory))
v = split_head(self.linear_v(memory))
# 2) Calculate and scale scores.
q = q / math.sqrt(self.dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3))
mask = mask.unsqueeze(1).expand_as(scores)
scores.masked_fill_(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
weights = self.dropout(self.softmax(scores))
context = combine_head(torch.matmul(weights, v))
return self.final_linear(context)
| 6,591 | 35.622222 | 120 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/model/__init__.py
|
# -*- coding: utf-8 -*-
from beaver.model.nmt_model import NMTModel
| 70 | 13.2 | 43 |
py
|
NCLS-Corpora
|
NCLS-Corpora-master/code/beaver-base/beaver/model/nmt_model.py
|
# -*- coding: utf-8 -*-
from typing import Dict
import torch
import torch.nn as nn
from beaver.model.embeddings import Embedding
from beaver.model.transformer import Decoder, Encoder
class Generator(nn.Module):
def __init__(self, hidden_size: int, tgt_vocab_size: int):
self.vocab_size = tgt_vocab_size
super(Generator, self).__init__()
self.linear_hidden = nn.Linear(hidden_size, tgt_vocab_size)
self.lsm = nn.LogSoftmax(dim=-1)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear_hidden.weight)
def forward(self, dec_out):
score = self.linear_hidden(dec_out)
lsm_score = self.lsm(score)
return lsm_score
class NMTModel(nn.Module):
def __init__(self, encoder: Encoder, decoder: Decoder, generator: Generator):
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.generator = generator
def forward(self, src, tgt):
tgt = tgt[:, :-1] # shift left
src_pad = src.eq(self.encoder.embedding.word_padding_idx)
tgt_pad = tgt.eq(self.decoder.embedding.word_padding_idx)
enc_out = self.encoder(src, src_pad)
decoder_outputs, _ = self.decoder(tgt, enc_out, src_pad, tgt_pad)
scores = self.generator(decoder_outputs)
return scores
@classmethod
def load_model(cls, model_opt,
pad_ids: Dict[str, int],
vocab_sizes: Dict[str, int],
checkpoint=None):
src_embedding = Embedding(embedding_dim=model_opt.hidden_size,
dropout=model_opt.dropout,
padding_idx=pad_ids["src"],
vocab_size=vocab_sizes["src"])
if len(model_opt.vocab) == 2:
tgt_embedding = Embedding(embedding_dim=model_opt.hidden_size,
dropout=model_opt.dropout,
padding_idx=pad_ids["tgt"],
vocab_size=vocab_sizes["tgt"])
else:
# use shared word embedding for source and target
tgt_embedding = src_embedding
encoder = Encoder(model_opt.layers,
model_opt.heads,
model_opt.hidden_size,
model_opt.dropout,
model_opt.ff_size,
src_embedding)
decoder = Decoder(model_opt.layers,
model_opt.heads,
model_opt.hidden_size,
model_opt.dropout,
model_opt.ff_size,
tgt_embedding)
generator = Generator(model_opt.hidden_size, vocab_sizes["tgt"])
model = cls(encoder, decoder, generator)
if model_opt.train_from and checkpoint is None:
checkpoint = torch.load(model_opt.train_from, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["model"])
elif checkpoint is not None:
model.load_state_dict(checkpoint)
return model
| 3,231 | 34.516484 | 100 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.