repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Paddle
|
Paddle-master/paddle/gserver/tests/img_conv_exconv.py
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=10)
data = data_layer(name="input", size=8 * 16 * 16)
conv = img_conv_layer(
input=data,
filter_size=1,
filter_size_y=1,
num_channels=8,
num_filters=16,
stride=1,
bias_attr=True,
act=LinearActivation(),
groups=2,
layer_type="exconv")
outputs(conv)
| 957 | 28.9375 | 73 |
py
|
Paddle
|
Paddle-master/paddle/gserver/tests/pyDataProvider.py
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import numpy
import struct
import traceback
def header_creator():
ret = ""
ret += struct.pack('i', 3) # slot num
ret += struct.pack('i', 1) # sequence flag
ret += struct.pack('i', 0) # slot0 dense type
ret += struct.pack('i', 3) # slot0 dim
ret += struct.pack('i', 1) # slot1 sparse non value type
ret += struct.pack('i', 7) # slot1 dim
ret += struct.pack('i', 3) # slot2 index type
ret += struct.pack('i', 2) # slot2 dim
return ret
def dense_value_creator(sample_num):
ret = ""
ret += struct.pack('i', sample_num) # slot0 sample num
for i in range(sample_num): # slot0 value
ret += struct.pack('f', 1.0)
ret += struct.pack('f', 2.0)
ret += struct.pack('f', 3.0)
return ret
def sparse_value_creator(sample_num):
ret = ""
ret += struct.pack('i', sample_num) # slot1 sample num
for i in range(sample_num): # slot1 index
ret += struct.pack('i', i * 2)
ret += struct.pack('i', sample_num * 2) #slot1 length
for i in range(sample_num): # slot1 value
ret += struct.pack('i', 1)
ret += struct.pack('i', 2)
return ret
def index_value_creator(sample_num):
ret = ""
ret += struct.pack('i', sample_num) # slot2 sample num
for i in range(sample_num): # slot2 value
ret += struct.pack('i', 0)
return ret
def sequenceStartPositions_creator():
ret = ""
ret += struct.pack('i', 2) # slot0 sequence num
ret += struct.pack('i', 0) # slot0 sequence value1
ret += struct.pack('i', 1) # slot0 sequence value2
ret += struct.pack('i', 1) # slot1 sequence num
ret += struct.pack('i', 0) # slot1 sequence value1
ret += struct.pack('i', 2) # slot2 sequence num
ret += struct.pack('i', 0) # slot2 sequence value1
ret += struct.pack('i', 1) # slot2 sequence value2
return ret
def subSequenceStartPositions_creator():
ret = ""
ret += struct.pack('i', 3) # slot0 subsequence num
ret += struct.pack('i', 0) # slot0 subsequence value1
ret += struct.pack('i', 1) # slot0 subsequence value2
ret += struct.pack('i', 2) # slot0 subsequence value3
ret += struct.pack('i', 2) # slot1 subsequence num
ret += struct.pack('i', 0) # slot1 subsequence value1
ret += struct.pack('i', 1) # slot1 subsequence value2
ret += struct.pack('i', 3) # slot2 subsequence num
ret += struct.pack('i', 0) # slot2 subsequence value1
ret += struct.pack('i', 1) # slot2 subsequence value2
ret += struct.pack('i', 2) # slot2 subsequence value3
return ret
class SimpleDataProvider:
def __init__(self, *file_list):
self.file_list = file_list
def shuffle(self):
pass
def reset(self):
pass
def getHeader(self):
return header_creator()
def getNextBatch(self, batch_size):
ret = ""
ret += struct.pack('i', 2) # batch size
ret += dense_value_creator(2) # slot0
ret += sparse_value_creator(2) # slot1
ret += index_value_creator(2) # slot2
ret += sequenceStartPositions_creator()
return ret
class SimpleNestDataProvider:
def __init__(self, *file_list):
self.file_list = file_list
def shuffle(self):
pass
def reset(self):
pass
def getHeader(self):
return header_creator()
def getNextBatch(self, batch_size):
ret = ""
ret += struct.pack('i', 2) # batch size
ret += dense_value_creator(4) # slot0
ret += sparse_value_creator(4) # slot1
ret += index_value_creator(4) # slot2
ret += sequenceStartPositions_creator()
ret += subSequenceStartPositions_creator()
return ret
if __name__ == "__main__":
# test code
data_provider = SimpleDataProvider('./test_batch')
print len(data_provider.getHeader())
print len(data_provider.getNextBatch(2))
data_provider = SimpleNestDataProvider('./test_batch')
print len(data_provider.getHeader())
print len(data_provider.getNextBatch(2))
| 4,664 | 30.734694 | 73 |
py
|
Paddle
|
Paddle-master/doc/fluid/api/gen_doc.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import sys
import types
import paddle.fluid as fluid
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument('--submodules', nargs="*")
parser.add_argument(
'module', type=str, help='Generate the documentation of which module')
return parser.parse_args()
class DocGenerator(object):
def __init__(self, module_name, stream=sys.stdout):
self.stream = stream
self.module_name = module_name
if not hasattr(fluid, module_name):
raise ValueError("Cannot find fluid.{0}".format(module_name))
else:
self.module = getattr(fluid, module_name)
self.stream.write('''.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
''')
self._print_header_(module_name, dot='=', is_title=True)
def print_submodule(self, submodule_name):
submodule = getattr(self.module, submodule_name)
if submodule is None:
raise ValueError("Cannot find submodule {0}".format(submodule_name))
self.print_section(submodule_name)
for item in submodule.__all__:
self.print_item(item)
def print_current_module(self):
for item in self.module.__all__:
self.print_item(item)
def print_section(self, name):
self._print_header_(name, dot='=', is_title=False)
def print_item(self, name):
item = getattr(self.module, name)
if isinstance(item, types.TypeType):
self.print_class(name)
elif isinstance(item, types.FunctionType):
self.print_method(name)
else:
raise RuntimeError("Unsupported item {0}".format(name))
def print_class(self, name):
self._print_header_(name, dot='-', is_title=False)
self.stream.write('''.. autoclass:: paddle.fluid.{0}.{1}
:members:
:noindex:
'''.format(self.module_name, name))
def print_method(self, name):
self._print_header_(name, dot='-', is_title=False)
self.stream.write('''.. autofunction:: paddle.fluid.{0}.{1}
:noindex:
'''.format(self.module_name, name))
def _print_header_(self, name, dot, is_title):
dot_line = dot * len(name)
if is_title:
self.stream.write(dot_line)
self.stream.write('\n')
self.stream.write(name)
self.stream.write('\n')
self.stream.write(dot_line)
self.stream.write('\n')
self.stream.write('\n')
def main():
args = parse_arg()
gen = DocGenerator(args.module)
if args.submodules is None:
gen.print_current_module()
else:
for submodule_name in args.submodules:
gen.print_submodule(submodule_name)
if __name__ == '__main__':
main()
| 3,426 | 30.154545 | 80 |
py
|
Paddle
|
Paddle-master/doc/fluid/dev/src/fc.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def fc(input,
size,
num_flatten_dims=1,
param_attr=None,
bias_attr=None,
act=None,
name=None):
"""
**Fully Connected Layer**
The fully connected layer can take multiple tensors as its inputs. It
creates a variable called weights for each input tensor, which represents
a fully connected weight matrix from each input unit to each output unit.
The fully connected layer multiplies each input tensor with its coresponding
weight to produce an output Tensor. If multiple input tensors are given,
the results of multiple multiplications will be sumed up. If bias_attr is
not None, a bias variable will be created and added to the output. Finally,
if activation is not None, it will be applied to the output as well.
This process can be formulated as follows:
.. math::
Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})
In the above equation:
* :math:`N`: Number of the input.
* :math:`X_i`: The input tensor.
* :math:`W`: The weights created by this layer.
* :math:`b`: The bias parameter created by this layer (if needed).
* :math:`Act`: The activation function.
* :math:`Out`: The output tensor.
Args:
input (Variable|list of Variable): The input tensor(s) of this layer, and the dimension of
the input tensor(s) is at least 2.
size(int): The number of output units in this layer.
num_flatten_dims (int, default 1): The fc layer can accept an input tensor with more than
two dimensions. If this happens, the multidimensional tensor will first be flattened
into a 2-dimensional matrix. The parameter `num_flatten_dims` determines how the input
tensor is flattened: the first `num_flatten_dims` (inclusive, index starts from 1)
dimensions will be flatten to form the first dimension of the final matrix (height of
the matrix), and the rest `rank(X) - num_flatten_dims` dimensions are flattened to
form the second dimension of the final matrix (width of the matrix). For example, suppose
`X` is a 6-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3.
Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30].
param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable
parameters/weights of this layer.
bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias
of this layer. If it is set to None, no bias will be added to the output units.
act (str, default None): Activation to be applied to the output of this layer.
name (str, default None): The name of this layer.
Returns:
A tensor variable storing the transformation result.
Raises:
ValueError: If rank of the input tensor is less than 2.
Examples:
.. code-block:: python
data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
fc = fluid.layers.fc(input=data, size=1000, act="tanh")
"""
| 3,759 | 44.853659 | 101 |
py
|
Paddle
|
Paddle-master/doc/v2/faq/local/src/reduce_min_pool_size.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@provider(min_pool_size=0, ...)
def process(settings, filename):
os.system('shuf %s > %s.shuf' % (filename, filename)) # shuffle before.
with open('%s.shuf' % filename, 'r') as f:
for line in f:
yield get_sample_from_line(line)
| 871 | 38.636364 | 76 |
py
|
Paddle
|
Paddle-master/doc/v2/faq/local/src/word2vec_dataprovider.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DICT_DIM = 3000
@provider(input_types=[integer_sequence(DICT_DIM), integer_value(DICT_DIM)])
def process(settings, filename):
with open(filename) as f:
# yield word ids to predict inner word id
# such as [28, 29, 10, 4], 4
# It means the sentance is 28, 29, 4, 10, 4.
yield read_next_from_file(f)
| 949 | 37 | 76 |
py
|
Paddle
|
Paddle-master/doc/v2/faq/local/src/word2vec_config.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
... # the settings and define data provider is omitted.
DICT_DIM = 3000 # dictionary dimension.
word_ids = data_layer('word_ids', size=DICT_DIM)
emb = embedding_layer(
input=word_ids, size=256, param_attr=ParamAttr(sparse_update=True))
emb_sum = pooling_layer(input=emb, pooling_type=SumPooling())
predict = fc_layer(input=emb_sum, size=DICT_DIM, act=Softmax())
outputs(
classification_cost(
input=predict, label=data_layer(
'label', size=DICT_DIM)))
| 1,095 | 39.592593 | 74 |
py
|
Paddle
|
Paddle-master/doc/v2/getstarted/concepts/src/infer.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2 as paddle
import numpy as np
paddle.init(use_gpu=False)
x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(2))
y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear())
# loading the model which generated by training
with open('params_pass_90.tar', 'r') as f:
parameters = paddle.parameters.Parameters.from_tar(f)
# Input multiple sets of data,Output the infer result in a array.
i = [[[1, 2]], [[3, 4]], [[5, 6]]]
print paddle.infer(output_layer=y_predict, parameters=parameters, input=i)
# Will print:
# [[ -3.24491572]
# [ -6.94668722]
# [-10.64845848]]
| 1,231 | 36.333333 | 76 |
py
|
Paddle
|
Paddle-master/doc/v2/getstarted/concepts/src/train.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2 as paddle
import numpy as np
# init paddle
paddle.init(use_gpu=False)
# network config
x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(2))
y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear())
y = paddle.layer.data(name='y', type=paddle.data_type.dense_vector(1))
cost = paddle.layer.square_error_cost(input=y_predict, label=y)
# create parameters
parameters = paddle.parameters.create(cost)
# create optimizer
optimizer = paddle.optimizer.Momentum(momentum=0)
# create trainer
trainer = paddle.trainer.SGD(cost=cost,
parameters=parameters,
update_equation=optimizer)
# event_handler to print training info
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 1 == 0:
print "Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id,
event.cost)
# product model every 10 pass
if isinstance(event, paddle.event.EndPass):
if event.pass_id % 10 == 0:
with open('params_pass_%d.tar' % event.pass_id, 'w') as f:
trainer.save_parameter_to_tar(f)
# define training dataset reader
def train_reader():
train_x = np.array([[1, 1], [1, 2], [3, 4], [5, 2]])
train_y = np.array([[-2], [-3], [-7], [-7]])
def reader():
for i in xrange(train_y.shape[0]):
yield train_x[i], train_y[i]
return reader
# define feeding map
feeding = {'x': 0, 'y': 1}
# training
trainer.train(
reader=paddle.batch(
train_reader(), batch_size=1),
feeding=feeding,
event_handler=event_handler,
num_passes=100)
| 2,336 | 31.458333 | 80 |
py
|
Paddle
|
Paddle-master/doc/v2/howto/cluster/multi_cluster/src/k8s_train/start_paddle.py
|
#!/usr/bin/python
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import time
import socket
import os
import argparse
# configuration for cluster
API = "/api/v1/namespaces/"
JOBSELECTOR = "labelSelector=job-name="
JOB_PATH = os.getenv("JOB_PATH") + "/" + os.getenv("JOB_NAME")
JOB_PATH_OUTPUT = JOB_PATH + "/output"
JOBNAME = os.getenv("JOB_NAME")
NAMESPACE = os.getenv("JOB_NAMESPACE")
PADDLE_NIC = os.getenv("CONF_PADDLE_NIC")
PADDLE_PORT = os.getenv("CONF_PADDLE_PORT")
PADDLE_PORTS_NUM = os.getenv("CONF_PADDLE_PORTS_NUM")
PADDLE_PORTS_NUM_SPARSE = os.getenv("CONF_PADDLE_PORTS_NUM_SPARSE")
PADDLE_SERVER_NUM = os.getenv("CONF_PADDLE_GRADIENT_NUM")
tokenpath = '/var/run/secrets/kubernetes.io/serviceaccount/token'
def refine_unknown_args(cmd_args):
'''
refine unknown parameters to handle some special parameters
'''
new_args = []
for arg in cmd_args:
if arg.startswith("--") and arg.find("=") != -1:
equal_pos = arg.find("=") # find first = pos
arglist = list(arg)
arglist[equal_pos] = " "
arg = "".join(arglist)
arg = arg.lstrip("-")
new_args += arg.split(" ")
elif arg.startswith("--") and arg.find("=") == -1:
arg = arg.lstrip("-")
new_args.append(arg)
else:
new_args.append(arg)
return new_args
def isPodAllRunning(podlist):
'''
check all pod is running
'''
require = len(podlist["items"])
running = 0
for pod in podlist["items"]:
if pod["status"]["phase"] == "Running":
running += 1
print "waiting for pods running, require:", require, "running:", running
if require == running:
return True
return False
def getPodList():
'''
get all container status of the job
'''
apiserver = "https://" + \
os.getenv("KUBERNETES_SERVICE_HOST") + ":" + \
os.getenv("KUBERNETES_SERVICE_PORT_HTTPS")
pod = API + NAMESPACE + "/pods?"
job = JOBNAME
if os.path.isfile(tokenpath):
tokenfile = open(tokenpath, mode='r')
token = tokenfile.read()
Bearer = "Bearer " + token
headers = {"Authorization": Bearer}
return requests.get(apiserver + pod + JOBSELECTOR + job,
headers=headers,
verify=False).json()
else:
return requests.get(apiserver + pod + JOBSELECTOR + job,
verify=False).json()
def getIdMap(podlist):
'''
generate tainer_id by ip
'''
ips = []
for pod in podlist["items"]:
ips.append(pod["status"]["podIP"])
ips.sort()
idMap = {}
for i in range(len(ips)):
idMap[ips[i]] = i
return idMap
def startPaddle(idMap={}, train_args_dict=None):
'''
start paddle pserver and trainer
'''
program = 'paddle train'
args = " --nics=" + PADDLE_NIC
args += " --port=" + str(PADDLE_PORT)
args += " --ports_num=" + str(PADDLE_PORTS_NUM)
args += " --comment=" + "paddle_process_by_paddle"
ip_string = ""
for ip in idMap.keys():
ip_string += (ip + ",")
ip_string = ip_string.rstrip(",")
args += " --pservers=" + ip_string
args_ext = ""
for key, value in train_args_dict.items():
args_ext += (' --' + key + '=' + value)
localIP = socket.gethostbyname(socket.gethostname())
trainerId = idMap[localIP]
args += " " + args_ext + " --trainer_id=" + \
str(trainerId) + " --save_dir=" + JOB_PATH_OUTPUT
logDir = JOB_PATH_OUTPUT + "/node_" + str(trainerId)
if not os.path.exists(JOB_PATH_OUTPUT):
os.makedirs(JOB_PATH_OUTPUT)
if not os.path.exists(logDir):
os.mkdir(logDir)
copyCommand = 'cp -rf ' + JOB_PATH + \
"/" + str(trainerId) + "/data/*" + " ./data/"
os.system(copyCommand)
startPserver = 'nohup paddle pserver' + \
" --port=" + str(PADDLE_PORT) + \
" --ports_num=" + str(PADDLE_PORTS_NUM) + \
" --ports_num_for_sparse=" + str(PADDLE_PORTS_NUM_SPARSE) + \
" --nics=" + PADDLE_NIC + \
" --comment=" + "paddle_process_by_paddle" + \
" --num_gradient_servers=" + str(PADDLE_SERVER_NUM) +\
" > " + logDir + "/server.log 2>&1 &"
print startPserver
os.system(startPserver)
# wait until pservers completely start
time.sleep(20)
startTrainer = program + args + " 2>&1 | tee " + \
logDir + "/train.log"
print startTrainer
os.system(startTrainer)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog="start_paddle.py", description='simple tool for k8s')
args, train_args_list = parser.parse_known_args()
train_args = refine_unknown_args(train_args_list)
train_args_dict = dict(zip(train_args[:-1:2], train_args[1::2]))
podlist = getPodList()
# need to wait until all pods are running
while not isPodAllRunning(podlist):
time.sleep(20)
podlist = getPodList()
idMap = getIdMap(podlist)
startPaddle(idMap, train_args_dict)
| 5,637 | 31.97076 | 76 |
py
|
Paddle
|
Paddle-master/doc/v2/howto/cluster/src/word2vec/api_train_v2.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gzip
import math
import paddle.v2 as paddle
embsize = 32
hiddensize = 256
N = 5
def wordemb(inlayer):
wordemb = paddle.layer.embedding(
input=inlayer,
size=embsize,
param_attr=paddle.attr.Param(
name="_proj",
initial_std=0.001,
learning_rate=1,
l2_rate=0,
sparse_update=True))
return wordemb
def main():
# for local training
cluster_train = False
if not cluster_train:
paddle.init(use_gpu=False, trainer_count=1)
else:
paddle.init(
use_gpu=False,
trainer_count=2,
port=7164,
ports_num=1,
ports_num_for_sparse=1,
num_gradient_servers=1)
word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)
firstword = paddle.layer.data(
name="firstw", type=paddle.data_type.integer_value(dict_size))
secondword = paddle.layer.data(
name="secondw", type=paddle.data_type.integer_value(dict_size))
thirdword = paddle.layer.data(
name="thirdw", type=paddle.data_type.integer_value(dict_size))
fourthword = paddle.layer.data(
name="fourthw", type=paddle.data_type.integer_value(dict_size))
nextword = paddle.layer.data(
name="fifthw", type=paddle.data_type.integer_value(dict_size))
Efirst = wordemb(firstword)
Esecond = wordemb(secondword)
Ethird = wordemb(thirdword)
Efourth = wordemb(fourthword)
contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth])
hidden1 = paddle.layer.fc(input=contextemb,
size=hiddensize,
act=paddle.activation.Sigmoid(),
layer_attr=paddle.attr.Extra(drop_rate=0.5),
bias_attr=paddle.attr.Param(learning_rate=2),
param_attr=paddle.attr.Param(
initial_std=1. / math.sqrt(embsize * 8),
learning_rate=1))
predictword = paddle.layer.fc(input=hidden1,
size=dict_size,
bias_attr=paddle.attr.Param(learning_rate=2),
act=paddle.activation.Softmax())
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
with gzip.open("batch-" + str(event.batch_id) + ".tar.gz",
'w') as f:
trainer.save_parameter_to_tar(f)
result = trainer.test(
paddle.batch(
paddle.dataset.imikolov.test(word_dict, N), 32))
print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics,
result.metrics)
cost = paddle.layer.classification_cost(input=predictword, label=nextword)
parameters = paddle.parameters.create(cost)
adagrad = paddle.optimizer.AdaGrad(
learning_rate=3e-3,
regularization=paddle.optimizer.L2Regularization(8e-4))
trainer = paddle.trainer.SGD(cost,
parameters,
adagrad,
is_local=not cluster_train)
trainer.train(
paddle.batch(paddle.dataset.imikolov.train(word_dict, N), 32),
num_passes=30,
event_handler=event_handler)
if __name__ == '__main__':
main()
| 4,208 | 35.6 | 79 |
py
|
Paddle
|
Paddle-master/doc/v2/howto/cluster/src/word2vec/api_train_v2_cluster.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import paddle.v2 as paddle
import pickle
embsize = 32
hiddensize = 256
N = 5
cluster_train_file = "./train_data_dir/train/train.txt"
cluster_test_file = "./test_data_dir/test/test.txt"
node_id = os.getenv("OMPI_COMM_WORLD_RANK")
if not node_id:
raise EnvironmentError("must provied OMPI_COMM_WORLD_RANK")
def wordemb(inlayer):
wordemb = paddle.layer.embedding(
input=inlayer,
size=embsize,
param_attr=paddle.attr.Param(
name="_proj",
initial_std=0.001,
learning_rate=1,
l2_rate=0,
sparse_update=True))
return wordemb
def cluster_reader_cluster(filename, node_id):
def cluster_reader():
with open("-".join([filename, "%05d" % int(node_id)]), "r") as f:
for l in f:
csv_data = [int(cell) for cell in l.split(",")]
yield tuple(csv_data)
return cluster_reader
def main():
# get arguments from env
# for local training
TRUTH = ["true", "True", "TRUE", "1", "yes", "Yes", "YES"]
cluster_train = os.getenv('PADDLE_CLUSTER_TRAIN', "False") in TRUTH
use_gpu = os.getenv('PADDLE_INIT_USE_GPU', "False")
if not cluster_train:
paddle.init(
use_gpu=use_gpu,
trainer_count=int(os.getenv("PADDLE_INIT_TRAINER_COUNT", "1")))
else:
paddle.init(
use_gpu=use_gpu,
trainer_count=int(os.getenv("PADDLE_INIT_TRAINER_COUNT", "1")),
port=int(os.getenv("PADDLE_INIT_PORT", "7164")),
ports_num=int(os.getenv("PADDLE_INIT_PORTS_NUM", "1")),
ports_num_for_sparse=int(
os.getenv("PADDLE_INIT_PORTS_NUM_FOR_SPARSE", "1")),
num_gradient_servers=int(
os.getenv("PADDLE_INIT_NUM_GRADIENT_SERVERS", "1")),
trainer_id=int(os.getenv("PADDLE_INIT_TRAINER_ID", "0")),
pservers=os.getenv("PADDLE_INIT_PSERVERS", "127.0.0.1"))
fn = open("thirdparty/wuyi_train_thdpty/word_dict.pickle", "r")
word_dict = pickle.load(fn)
fn.close()
dict_size = len(word_dict)
firstword = paddle.layer.data(
name="firstw", type=paddle.data_type.integer_value(dict_size))
secondword = paddle.layer.data(
name="secondw", type=paddle.data_type.integer_value(dict_size))
thirdword = paddle.layer.data(
name="thirdw", type=paddle.data_type.integer_value(dict_size))
fourthword = paddle.layer.data(
name="fourthw", type=paddle.data_type.integer_value(dict_size))
nextword = paddle.layer.data(
name="fifthw", type=paddle.data_type.integer_value(dict_size))
Efirst = wordemb(firstword)
Esecond = wordemb(secondword)
Ethird = wordemb(thirdword)
Efourth = wordemb(fourthword)
contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth])
hidden1 = paddle.layer.fc(input=contextemb,
size=hiddensize,
act=paddle.activation.Sigmoid(),
layer_attr=paddle.attr.Extra(drop_rate=0.5),
bias_attr=paddle.attr.Param(learning_rate=2),
param_attr=paddle.attr.Param(
initial_std=1. / math.sqrt(embsize * 8),
learning_rate=1))
predictword = paddle.layer.fc(input=hidden1,
size=dict_size,
bias_attr=paddle.attr.Param(learning_rate=2),
act=paddle.activation.Softmax())
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
result = trainer.test(
paddle.batch(
cluster_reader_cluster(cluster_test_file, node_id), 32))
print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics,
result.metrics)
cost = paddle.layer.classification_cost(input=predictword, label=nextword)
parameters = paddle.parameters.create(cost)
adagrad = paddle.optimizer.AdaGrad(
learning_rate=3e-3,
regularization=paddle.optimizer.L2Regularization(8e-4))
trainer = paddle.trainer.SGD(cost,
parameters,
adagrad,
is_local=not cluster_train)
trainer.train(
paddle.batch(cluster_reader_cluster(cluster_train_file, node_id), 32),
num_passes=30,
event_handler=event_handler)
if __name__ == '__main__':
main()
| 5,340 | 37.702899 | 80 |
py
|
Paddle
|
Paddle-master/doc/v2/howto/cluster/src/word2vec/prepare.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2 as paddle
import tarfile
import os
import pickle
SPLIT_COUNT = 3
N = 5
def file_len(fd):
for i, l in enumerate(fd):
pass
return i + 1
def split_from_reader_by_line(filename, reader, split_count):
fn = open(filename, "w")
for batch_id, batch_data in enumerate(reader()):
batch_data_str = [str(d) for d in batch_data]
fn.write(",".join(batch_data_str))
fn.write("\n")
fn.close()
fn = open(filename, "r")
total_line_count = file_len(fn)
fn.close()
per_file_lines = total_line_count / split_count + 1
cmd = "split -d -a 5 -l %d %s %s-" % (per_file_lines, filename, filename)
os.system(cmd)
word_dict = paddle.dataset.imikolov.build_dict()
with open("word_dict.pickle", "w") as dict_f:
pickle.dump(word_dict, dict_f)
split_from_reader_by_line("train.txt",
paddle.dataset.imikolov.train(word_dict, N),
SPLIT_COUNT)
split_from_reader_by_line("test.txt",
paddle.dataset.imikolov.test(word_dict, N),
SPLIT_COUNT)
| 1,727 | 29.857143 | 77 |
py
|
Paddle
|
Paddle-master/go/pserver/client/c/test/test_train.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2 as paddle
import paddle.v2.dataset.uci_housing as uci_housing
import paddle.v2.master as master
import os
import cPickle as pickle
from paddle.v2.reader.creator import cloud_reader
etcd_ip = os.getenv("MASTER_IP", "127.0.0.1")
etcd_endpoints = "http://" + etcd_ip + ":2379"
print "etcd endpoints: ", etcd_endpoints
def main():
# init
paddle.init(use_gpu=False, trainer_count=1)
# network config
x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(13))
y_predict = paddle.layer.fc(input=x,
param_attr=paddle.attr.Param(name='w'),
size=1,
act=paddle.activation.Linear(),
bias_attr=paddle.attr.Param(name='b'))
y = paddle.layer.data(name='y', type=paddle.data_type.dense_vector(1))
cost = paddle.layer.mse_cost(input=y_predict, label=y)
# create parameters
parameters = paddle.parameters.create(cost)
# create optimizer of new remote updater to pserver
optimizer = paddle.optimizer.Momentum(momentum=0, learning_rate=1e-3)
trainer = paddle.trainer.SGD(cost=cost,
parameters=parameters,
update_equation=optimizer,
is_local=False,
pserver_spec=etcd_endpoints,
use_etcd=True)
# event_handler to print training and testing info
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
# FIXME: for cloud data reader, pass number is managed by master
# should print the server side pass number
if event.batch_id % 100 == 0:
print "Pass %d, Batch %d, Cost %f" % (
event.pass_id, event.batch_id, event.cost)
if isinstance(event, paddle.event.EndPass):
if (event.pass_id + 1) % 10 == 0:
result = trainer.test(
reader=paddle.batch(
uci_housing.test(), batch_size=2),
feeding={'x': 0,
'y': 1})
print "Test %d, %.2f" % (event.pass_id, result.cost)
# training
# NOTE: use uci_housing.train() as reader for non-paddlecloud training
trainer.train(
reader=paddle.batch(
paddle.reader.shuffle(
cloud_reader(
["/pfs/dlnel/public/dataset/uci_housing/uci_housing*"],
etcd_endpoints),
buf_size=500),
batch_size=2),
feeding={'x': 0,
'y': 1},
event_handler=event_handler,
num_passes=30)
if __name__ == '__main__':
main()
| 3,406 | 36.855556 | 76 |
py
|
Paddle
|
Paddle-master/go/pserver/client/c/test/test_mnist.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2 as paddle
import gzip
def softmax_regression(img):
predict = paddle.layer.fc(input=img,
size=10,
act=paddle.activation.Softmax())
return predict
def multilayer_perceptron(img):
# The first fully-connected layer
hidden1 = paddle.layer.fc(input=img, size=128, act=paddle.activation.Relu())
# The second fully-connected layer and the according activation function
hidden2 = paddle.layer.fc(input=hidden1,
size=64,
act=paddle.activation.Relu())
# The thrid fully-connected layer, note that the hidden size should be 10,
# which is the number of unique digits
predict = paddle.layer.fc(input=hidden2,
size=10,
act=paddle.activation.Softmax())
return predict
def convolutional_neural_network(img):
# first conv layer
conv_pool_1 = paddle.networks.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
num_channel=1,
pool_size=2,
pool_stride=2,
act=paddle.activation.Tanh())
# second conv layer
conv_pool_2 = paddle.networks.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
num_channel=20,
pool_size=2,
pool_stride=2,
act=paddle.activation.Tanh())
# The first fully-connected layer
fc1 = paddle.layer.fc(input=conv_pool_2,
size=128,
act=paddle.activation.Tanh())
# The softmax layer, note that the hidden size should be 10,
# which is the number of unique digits
predict = paddle.layer.fc(input=fc1,
size=10,
act=paddle.activation.Softmax())
return predict
def main():
paddle.init(use_gpu=False, trainer_count=1)
# define network topology
images = paddle.layer.data(
name='pixel', type=paddle.data_type.dense_vector(784))
label = paddle.layer.data(
name='label', type=paddle.data_type.integer_value(10))
# Here we can build the prediction network in different ways. Please
# choose one by uncomment corresponding line.
predict = softmax_regression(images)
#predict = multilayer_perceptron(images)
#predict = convolutional_neural_network(images)
cost = paddle.layer.classification_cost(input=predict, label=label)
parameters = paddle.parameters.create(cost)
optimizer = paddle.optimizer.Momentum(
learning_rate=0.1 / 128.0,
momentum=0.9,
regularization=paddle.optimizer.L2Regularization(rate=0.0005 * 128))
trainer = paddle.trainer.SGD(cost=cost,
parameters=parameters,
update_equation=optimizer,
is_local=False,
pserver_spec="localhost:3000")
lists = []
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 1000 == 0:
print "Pass %d, Batch %d, Cost %f, %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics)
elif isinstance(event, paddle.event.EndPass):
result = trainer.test(reader=paddle.batch(
paddle.dataset.mnist.test(), batch_size=128))
print "Test with Pass %d, Cost %f, %s\n" % (
event.pass_id, result.cost, result.metrics)
lists.append((event.pass_id, result.cost,
result.metrics['classification_error_evaluator']))
trainer.train(
reader=paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=8192),
batch_size=128),
event_handler=event_handler,
num_passes=100)
# find the best pass
best = sorted(lists, key=lambda list: float(list[1]))[0]
print 'Best pass is %s, testing Avgcost is %s' % (best[0], best[1])
print 'The classification accuracy is %.2f%%' % (100 - float(best[2]) * 100)
test_creator = paddle.dataset.mnist.test()
test_data = []
for item in test_creator():
test_data.append((item[0], ))
if len(test_data) == 100:
break
# output is a softmax layer. It returns probabilities.
# Shape should be (100, 10)
probs = paddle.infer(
output_layer=predict, parameters=parameters, input=test_data)
print probs.shape
if __name__ == '__main__':
main()
| 5,239 | 34.890411 | 80 |
py
|
atari-irl
|
atari-irl-master/atari_irl/optimizers.py
|
from baselines.ppo2.ppo2 import Model, constfn
from .sampling import PPOBatch, PPOSample
import numpy as np
from collections import namedtuple
"""
Heavily based on the ppo2 implementation found in the OpenAI baselines library,
particularly the ppo_trainsteps function.
"""
BatchingConfig = namedtuple('BatchingInfo', [
'nbatch', 'nbatch_train', 'noptepochs', 'nenvs', 'nsteps', 'nminibatches'
])
def make_batching_config(*, nenvs, nsteps, noptepochs, nminibatches):
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
return BatchingConfig(
nbatch=nbatch, nbatch_train=nbatch_train, noptepochs=noptepochs,
nenvs=nenvs, nsteps=nsteps, nminibatches=nminibatches
)
def ppo_train_steps(
*,
model: Model,
run_info: PPOBatch,
batching_config: BatchingConfig,
lrnow: float,
cliprangenow: float,
nbatch_train: int
): # I'm not quite sure what type mblossvals is
states = run_info.states
nbatch, noptepochs = batching_config.nbatch, batching_config.noptepochs
nenvs, nminibatches = batching_config.nenvs, batching_config.nminibatches
nsteps = batching_config.nsteps
mblossvals = []
if states is None: # nonrecurrent version
inds = np.arange(nbatch)
for _ in range(noptepochs):
np.random.shuffle(inds)
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in run_info.train_args())
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
envsperbatch = nbatch_train // nsteps
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in run_info.train_args())
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
return mblossvals
class PPOOptimizer:
def __init__(
self,
*,
batching_config: BatchingConfig,
lr=3e-4,
cliprange=0.2,
total_timesteps=10e6
):
# Deal with the policy
self.policy = None
# Set things based on the batching config
self.batching_config = batching_config
self.nbatch = self.batching_config.nbatch
self.nupdates = total_timesteps // self.batching_config.nbatch
# Deal with constant arguments
if isinstance(lr, float):
lr = constfn(lr)
else:
assert callable(lr)
if isinstance(cliprange, float):
cliprange = constfn(cliprange)
else:
assert callable(cliprange)
self.lr = lr
self.cliprange = cliprange
def update_opt(self, policy):
self.policy = policy
assert hasattr(policy, 'model')
assert isinstance(policy.model, Model)
def optimize_policy(self, itr: int, samples: PPOSample):
# compute our learning rate and clip ranges
assert self.nbatch % self.batching_config.nminibatches == 0
nbatch_train = self.nbatch // self.batching_config.nminibatches
frac = 1.0 - (itr - 1.0) / self.nupdates
assert frac > 0.0
lrnow = self.lr(frac)
cliprangenow = self.cliprange(frac)
for batch in samples.to_ppo_batches(self.batching_config.nbatch):
# Run the training steps for PPO
mblossvals = ppo_train_steps(
model=self.policy.model,
run_info=batch,
batching_config=self.batching_config,
lrnow=lrnow,
cliprangenow=cliprangenow,
nbatch_train=nbatch_train
)
return np.mean(mblossvals, axis=0)
| 4,263 | 32.574803 | 86 |
py
|
atari-irl
|
atari-irl-master/atari_irl/training.py
|
import numpy as np
from baselines import logger
from baselines.common import explained_variance
from baselines.ppo2.ppo2 import safemean
from baselines.ppo2 import ppo2
from . import policies
from .sampling import PPOBatchSampler, DummyAlgo
from .optimizers import PPOOptimizer, make_batching_config
from collections import deque, namedtuple
import time
PPOBatch = namedtuple('PPOBatch', [
'obs', 'returns', 'masks', 'actions', 'values', 'neglogpacs', 'states',
'epinfos'
])
PPOBatch.train_args = lambda self: (
self.obs, self.returns, self.masks, self.actions, self.values, self.neglogpacs
)
def print_log(
*, model, run_info, batching_config,
lossvals, update, fps, epinfobuf, tnow, tfirststart
):
ev = explained_variance(run_info.values, run_info.returns)
logger.logkv("serial_timesteps", update * batching_config.nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update * batching_config.nbatch)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv('time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
logger.dumpkvs()
def setup_policy(*, model_args, nenvs, ob_space, ac_space, env, save_env, checkpoint):
if checkpoint:
policy = policies.restore_policy_from_checkpoint_dir(
checkpoint_dir=checkpoint, envs=env
)
assert policy.model_args == policy.model_args
if isinstance(policy, policies.EnvPolicy):
assert nenvs == policy.envs.num_envs
assert ob_space == policy.envs.observation_space
assert ac_space == policy.envs.action_space
env = policy.envs
else:
if save_env:
policy = policies.EnvPolicy(model_args=model_args, envs=env)
else:
policy = policies.Policy(model_args)
return policy
def ppo_samples_to_trajectory_format(ppo_samples, num_envs=8):
unravel_index = lambda *args: None
# This is what IRLTRPO/IRLNPO actually uses
# this will totally explode if we don't have them
names_to_indices = {
# The data that IRLTRPO/IRLNPO uses
# we should expect the rllab-formatted code to explode if it
# needs something else
'observations': 0,
'actions': 3,
# The data that PPO uses
'returns': 1,
'dones': 2,
'values': 4,
'neglogpacs': 5
}
unraveled = dict(
(key, unravel_index(index, ppo_samples, num_envs))
for key, index in names_to_indices.items()
)
# This is a special case because TRPO wants advantages, but PPO
# doesn't compute it
returns = unravel_index(1, ppo_samples, num_envs)
values = unravel_index(4, ppo_samples, num_envs)
unraveled['advantages'] = returns - values
T = unraveled['observations'].shape[0]
for key, value in unraveled.items():
assert len(value) == T
trajectories = [dict((key, []) for key in unraveled.keys())]
for t in range(T):
for key in unraveled.keys():
for i in range(num_envs):
trajectories[i][key].append(unraveled[t][i])
for i in range(num_envs):
for key in unraveled.keys():
if key != 'actions':
trajectories[i]
class Learner:
def __init__(self, policy_class, env, *, total_timesteps, nsteps=2048,
ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5,
gamma=0.99, lam=0.95, nminibatches=4, noptepochs=4,
cliprange=0.2, checkpoint=None, save_env=True):
# The random seed should already be set before running this
print(locals())
total_timesteps = int(total_timesteps)
batching_config = make_batching_config(
nenvs=env.num_envs,
nsteps=nsteps,
noptepochs=noptepochs,
nminibatches=nminibatches
)
ob_space = env.observation_space
ac_space = env.action_space
model_args = dict(
policy=policy_class, ob_space=ob_space, ac_space=ac_space,
nbatch_act=batching_config.nenvs,
nbatch_train=batching_config.nbatch_train,
nsteps=batching_config.nsteps,
ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm
)
policy = setup_policy(
model_args=model_args, env=env, nenvs=env.num_envs, checkpoint=checkpoint,
ob_space=ob_space, ac_space=ac_space, save_env=save_env
)
sampler = PPOBatchSampler(DummyAlgo(policy), baselines_venv=env, nsteps=nsteps)
optimizer = PPOOptimizer(
batching_config=batching_config,
lr=lr, cliprange=cliprange, total_timesteps=total_timesteps
)
optimizer.update_opt(policy)
self.gamma = gamma
self.lam = lam
# Set our major learner objects
self.policy = policy
self.model = policy.model
self.sampler = sampler
self.optimizer = optimizer
# Set our last few run configurations
self.callbacks = []
# Initialize the objects that will change as we learn
self._update = 1
self._epinfobuf = deque(maxlen=100)
self._tfirststart = None
self._run_info = None
self._tnow = None
self._fps = None
self._lossvals = None
self._itr = None
@property
def update(self):
return self._update
def obtain_samples(self, itr):
# Run the model on the environments
self._run_info = self.sampler.run()#.to_ppo_batch()
self._epinfobuf.extend(self._run_info.epinfos)
self._itr = itr
def optimize_policy(self, itr):
assert self._itr == itr
if not self._tfirststart:
self._tfirststart = time.time()
tstart = time.time()
# Actually do the optimization
self._lossvals = self.optimizer.optimize_policy(itr, self._run_info)
self._tnow = time.time()
self._fps = int(self.optimizer.nbatch / (self._tnow - tstart))
for check, fn in self.callbacks:
if check(self.update):
fn(**locals())
self._update += 1
if self._update > self.optimizer.nupdates:
logger.log("Warning, exceeded planned number of updates")
def step(self):
self.obtain_samples(self._update)
self.optimize_policy(self._update)
def register_callback(self, check, fn):
self.callbacks.append((check, fn))
@staticmethod
def check_update_interval(freq, include_first=True):
return lambda i: i % freq == 0 or include_first and i == 1
@staticmethod
def print_log(self, **kwargs):
print_log(
model=self.model, batching_config=self.optimizer.batching_config,
update=self.update, epinfobuf=self._epinfobuf,
tfirststart=self._tfirststart, run_info=self._run_info,
lossvals=self._lossvals, fps=self._fps, tnow=self._tnow
)
def learn_and_yield(self, yield_fn, yield_freq, log_freq=None):
if log_freq:
self.register_callback(
self.check_update_interval(log_freq),
self.print_log
)
should_yield = self.check_update_interval(yield_freq)
while self.update < self.optimizer.nupdates:
self.step()
if should_yield(self.update):
yield yield_fn(self)
yield yield_fn(self)
| 7,735 | 32.634783 | 87 |
py
|
atari-irl
|
atari-irl-master/atari_irl/utils.py
|
"""
This may all be thrown away soonish, but I could imagine keeping these design
patterns in some form or other.
I hope that most of our patches to the baselines + gym code can happen in this
library, and not need to move into other parts of the code.
Desiderata:
- Not introduce too many dependencies over Adam's patched baselines library
- Basically work and be easy to use
- Contain most of our other patches over other libraries
- Generate useful information about whether or not we want to keep this
incarnation of things
This is heavily based on
- https://github.com/openai/baselines/blob/master/baselines/ppo2/run_mujoco.py
- https://github.com/AdamGleave/baselines/tree/master/baselines/ppo2
"""
import tensorflow as tf
import numpy as np
from baselines import bench, logger
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common import set_global_seeds
from atari_irl import environments
from atari_irl.environments import one_hot
import gym
import csv
import matplotlib.pyplot as plt
def optional_teardown(context, teardown_on_context_exit=True):
if teardown_on_context_exit:
return context
else:
context.teardown = context.__exit__
def no_args_safe_exit(*args):
args = [None, None, None] if not args else args
context.teardown(*args)
context.__exit__ = no_args_safe_exit
return context
class TfContext:
def __init__(self, ncpu=1):
config = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=ncpu,
inter_op_parallelism_threads=ncpu,
device_count={'GPU': 1},
)
config.gpu_options.allow_growth=True
self.tf_session = tf.Session(config=config)
def __enter__(self):
self.tf_session.__enter__()
return self
def __exit__(self, *args):
self.tf_session.__exit__(*args)
tf.reset_default_graph()
class EnvironmentContext:
def __init__(self, *, env_name=None, make_env=None, seed, n_envs=1, env_modifiers=list(), vec_env_modifiers=list()):
self.env_name = env_name
if make_env is None:
make_env = lambda: gym.make(self.env_name)
self.make_env = make_env
self.n_envs = n_envs
self.env_modifiers = env_modifiers
self.vec_env_modifiers = vec_env_modifiers
self.seed = seed
def __enter__(self):
def make_env(i):
def _thunk():
env = self.make_env()
env.seed(i)
for fn in self.env_modifiers:
env = fn(env)
env = bench.Monitor(env, logger.get_dir(), allow_early_resets=True)
return env
return _thunk
set_global_seeds(self.seed)
self.base_vec_env = SubprocVecEnv([make_env(i + self.seed) for i in range(self.n_envs)])
self.environments = self.base_vec_env
for fn in self.vec_env_modifiers:
self.environments = fn(self.environments)
return self
def __exit__(self, *args):
self.base_vec_env.close()
def read_cols_from_dict(dirname, *cols, start=0, end=-1):
ans = dict([(c, []) for c in cols])
with open(dirname + '/progress.csv', 'r') as f:
reader = csv.DictReader(f)
for row in reader:
for c in cols:
ans[c].append(float(row[c]))
return (ans[c][start:end] for c in cols)
def plot_from_dirname(dirname):
plt.plot(*read_cols_from_dict(dirname,'total_timesteps', 'eprewmean'))
def batched_call(fn, batch_size, args, check_safety=True):
N = args[0].shape[0]
for arg in args:
assert arg.shape[0] == N
# Things get super slow if we don't do this
if N == batch_size:
return fn(*args)
arg0_batches = []
fn_results = []
start = 0
def slice_result(result, subslice):
if isinstance(result, dict):
return dict(
(key, value[subslice])
for key, value in result.items()
)
else:
return result[subslice]
def add_batch(*args_batch, subslice=None):
results_batch = fn(*args_batch)
if subslice:
results_batch = [slice_result(r, subslice) for r in results_batch]
args_batch = [slice_result(r, subslice) for r in args_batch]
fn_results.append(results_batch)
if check_safety:
arg0_batches.append(args_batch[0])
# add data for all of the batches that cleanly fit inside the batch size
for start in range(0, N - batch_size, batch_size):
end = start + batch_size
add_batch(*[arg[start:end] for arg in args])
# add data for the last batch that would run past the end of the data if it
# were full
start += batch_size
if start != N:
remainder_slice = slice(start - N, batch_size)
add_batch(
*(arg[N - batch_size:N] for arg in args),
subslice=remainder_slice
)
# integrity check
if check_safety:
final_arg0 = np.vstack(arg0_batches)
# reshape everything
final_results = []
for i, res in enumerate(fn_results[0]):
if isinstance(res, np.ndarray) or isinstance(res, list):
final_results.append(
np.vstack([results_batch[i] for results_batch in fn_results])
)
elif isinstance(res, dict):
for key, item in res.items():
assert isinstance(item, np.ndarray) or isinstance(item, list)
final_results.append(dict(
(
key,
np.vstack([
results_batch[i][key] for results_batch in fn_results
])
)
for key in res.keys()
))
else:
raise NotImplementedError
# Integrity checks in case I wrecked this
if check_safety:
assert len(final_arg0) == N
assert np.isclose(final_arg0, args[0]).all()
return final_results
class TfEnvContext:
def __init__(self, tf_cfg, env_config):
self.tf_cfg = tf_cfg
self.env_config = env_config
self.seed = env_config['seed']
env_modifiers = environments.env_mapping[env_config['env_name']]
one_hot_code = env_config.pop('one_hot_code')
if one_hot_code:
env_modifiers = environments.one_hot_wrap_modifiers(env_modifiers)
self.env_config.update(env_modifiers)
def __enter__(self):
self.env_context = EnvironmentContext(**self.env_config)
self.env_context.__enter__()
self.train_graph = tf.Graph()
self.tg_context = self.train_graph.as_default()
self.tg_context.__enter__()
self.sess = tf.Session(config=self.tf_cfg)
# from tensorflow.python import debug as tf_debug
# sess = tf_debug.LocalCLIDebugWrapperSession(sess , ui_type='readline')
self.sess_context = self.sess.as_default()
self.sess_context.__enter__()
tf.set_random_seed(self.seed)
return self
def __exit__(self, *args):
self.sess_context.__exit__(*args)
self.tg_context.__exit__(*args)
self.env_context.__exit__(*args)
| 7,278 | 31.066079 | 120 |
py
|
atari-irl
|
atari-irl-master/atari_irl/sampling.py
|
import numpy as np
import tensorflow as tf
from . import utils
from collections import namedtuple, deque
from rllab.misc.overrides import overrides
from rllab.sampler.base import BaseSampler
from sandbox.rocky.tf.samplers.vectorized_sampler import VectorizedSampler
from baselines.ppo2 import ppo2
"""
Heavily based on the ppo2 implementation found in the OpenAI baselines library,
particularly in the PPOSampler class.
"""
# This is a PPO Batch that the OpenAI Baselines PPO code uses as its underlying
# representation
PPOBatch = namedtuple('PPOBatch', [
'obs', 'returns', 'masks', 'actions', 'values', 'neglogpacs', 'states',
'epinfos'
])
PPOBatch.train_args = lambda self: (
self.obs, self.returns, self.masks, self.actions, self.values, self.neglogpacs
)
# This is a full trajectory, using the interface defines by RLLab and the
# AIRL library
class Trajectory:
def __init__(self):
self.observations = []
self.actions = []
self.rewards = []
self.env_infos = {
'dones': []
}
self.agent_infos = {
'values': [],
'neglogpacs': [],
'prob': []
}
self.is_finalized = False
self.added_data = {}
def __getitem__(self, key):
if hasattr(self, key):
return getattr(self, key)
else:
return self.added_data[key]
def __setitem__(self, key, value):
if hasattr(self, key):
setattr(self, key, value)
else:
self.added_data[key] = value
def __contains__(self, key):
return hasattr(self, key) or key in self.added_data
def add_ppo_batch_data(self, obs, act, rew, done, value, neglogpac, prob):
self.observations.append(obs)
self.actions.append(act)
self.rewards.append(rew)
self.env_infos['dones'].append(done)
self.agent_infos['values'].append(value)
self.agent_infos['neglogpacs'].append(neglogpac)
self.agent_infos['prob'].append(prob)
assert np.isclose(1.0, prob.sum())
def finalize(self):
assert not self.is_finalized
self.observations = np.asarray(self.observations)
self.actions = utils.one_hot(self.actions, 6)
self.rewards = np.asarray(self.rewards)
self.is_finalized = True
class Trajectories:
def __init__(self, trajectories, ppo_sample=None):
self.trajectories = trajectories
self.ppo_sample = ppo_sample
def __getitem__(self, idx):
return self.trajectories[idx]
def __len__(self):
return len(self.trajectories)
def to_ppo_sample(self) -> 'PPOSample':
# This is kind of hacky, and ideally we would roundtrip it, but doing
# a proper roundtrip is going to be slower than doing this
if self.ppo_sample is not None:
return self.ppo_sample
class PPOSample:
"""
A trajectory slice generated according to the PPO batch logic.
This can be transformed into both a PPOBatch (for PPO training), and a
Trajectories object which doesn't actually correspond to full trajectories,
(for the discriminator training)
"""
def __init__(
self, obs, rewards, actions, values, dones, neglogpacs, states,
epinfos, sampler
):
self.obs = np.asarray(obs)
self.rewards = np.asarray(rewards)
self.returns = rewards # match PPOBatch
self.actions = np.asarray(actions)
self.values = np.asarray(values)
self.dones = np.asarray(dones)
self.neglogpacs = np.asarray(neglogpacs)
self.states = states
self.epinfos = epinfos
self.sampler = sampler
self.sample_batch_timesteps = self.obs.shape[0]
self.sample_batch_num_envs = self.obs.shape[1]
self.sample_batch_size = (
self.sample_batch_timesteps * self.sample_batch_num_envs
)
self.train_batch_size = self.sampler.model.train_model.X.shape[0].value
assert self.sample_batch_size % self.train_batch_size == 0
self.obs_next = None
self.actions_next = None
self.probabilities = self._get_sample_probabilities()
def to_ppo_batches(self, batch_size):
all_data = self.sampler.process_to_ppo_batch(
self, gamma=self.sampler.gamma, lam=self.sampler.lam
)
if all_data.states is not None:
raise NotImplemented
N = all_data.obs.shape[0]
assert N % batch_size == 0
for start in range(0, N, batch_size):
end = start + batch_size
yield PPOBatch(
all_data.obs[start:end],
all_data.returns[start:end],
all_data.masks[start:end],
all_data.actions[start:end],
all_data.values[start:end],
all_data.neglogpacs[start:end],
None,
None
)
def to_ppo_batch(self):
return self.sampler.process_to_ppo_batch(
self, gamma=self.sampler.gamma, lam=self.sampler.lam
)
def _ravel_time_env_batch_to_train_batch(self, inpt):
assert inpt.shape[0] == self.sample_batch_timesteps
assert inpt.shape[1] == self.sample_batch_num_envs
num_train_batches = self.sample_batch_size // self.train_batch_size
# change the first index into environments, not timesteps
ans = inpt.swapaxes(0, 1
# reshape first indices into # of batches x train batch size
).reshape(
num_train_batches, self.train_batch_size, *inpt.shape[2:]
)
return ans
def _ravel_train_batch_to_time_env_batch(self, inpt):
# reshape things into number of envs x number of timesteps
ans = inpt.reshape(
self.sample_batch_num_envs,
self.sample_batch_timesteps,
*inpt.shape[2:]
# swap the timesteps back into the first index
).swapaxes(0, 1)
assert ans.shape[0] == self.sample_batch_timesteps
assert ans.shape[1] == self.sample_batch_num_envs
return ans
def _get_sample_probabilities(self):
train_batched_obs = self._ravel_time_env_batch_to_train_batch(self.obs)
ps = np.asarray([
# we weirdly don't have direct access to the probabilities anywhere
# so we need to construct this node from teh logits
self.sampler.get_probabilities_for_obs(train_batch_obs)
for train_batch_obs in train_batched_obs
])
return self._ravel_train_batch_to_time_env_batch(ps)
def to_trajectories(self) -> 'Trajectories':
T = len(self.obs)
num_envs = self.obs[0].shape[0]
buffer = [Trajectory() for _ in range(num_envs)]
for t in range(T):
for e in range(num_envs):
buffer[e].add_ppo_batch_data(
self.obs[t][e],
self.actions[t][e],
self.rewards[t][e],
self.dones[t][e],
self.values[t][e],
self.neglogpacs[t][e],
self.probabilities[t][e]
)
for traj in buffer:
traj.finalize()
return Trajectories(buffer, self)
def get_path_key(self, key, pad_val=0.0):
if key == 'observations':
return self.obs
elif key == 'actions':
return self.actions
elif key == 'observations_next':
if self.obs_next is None:
obs = self.obs
self.obs_next = np.r_[
obs[1:],
pad_val*np.expand_dims(np.ones_like(obs[0]), axis=0)
]
return self.obs_next
elif key == 'actions_next':
if self.actions_next is None:
self.actions_next = np.r_[
self.actions[1:],
pad_val*np.expand_dims(np.ones_like(self.actions[0]), axis=0)
]
return self.actions_next
elif key == 'a_logprobs':
"""
alogprobs = self.sampler.get_a_logprobs(
ppo2.sf01(self.obs),
utils.one_hot(ppo2.sf01(self.actions).astype(np.int32), 6)
)
assert np.isclose(
ppo2.sf01(-1 * self.neglogpacs),
alogprobs
).all()
"""
return -1 * self.neglogpacs
else:
raise NotImplementedError
def extract_paths(self, keys, obs_modifier=lambda obs, *args: obs):
data = [
ppo2.sf01(self.get_path_key(key))
for key in keys
]
def process_data(inpt):
key, value = inpt
if 'actions' in key:
return utils.one_hot(value.astype(np.int32), self.sampler.env.action_space.n)
elif 'observations' in key:
return obs_modifier(value, key=key, sample=self)
else:
return value
return map(process_data, zip(keys, data))
class DummyAlgo:
def __init__(self, policy):
self.policy = policy
class PPOBatchSampler(BaseSampler, ppo2.AbstractEnvRunner):
# If you want to use the baselines PPO sampler as a sampler for the
# airl interfaced code, use this.
def __init__(self, algo, *, nsteps, baselines_venv, gamma=0.99, lam=0.95):
model = algo.policy.model
env = baselines_venv
# The biggest weird thing about this piece of code is that it does a
# a bunch of work to handle the context of what happens if the model
# that we're training is actually recurrent.
# This means that we store the observations, states, and dones so that
# we can continue a run.
# We have not actually tested that functionality
ppo2.AbstractEnvRunner.__init__(self, env=env, model=model, nsteps=nsteps)
self.algo = algo
self.env = env
self.model = model
self.nsteps = nsteps
self.gamma = gamma
self.lam = lam
self.cur_sample = None
self._epinfobuf = deque(maxlen=100)
def start_worker(self):
pass
def shutdown_worker(self):
pass
def run(self):
return self._sample()
def get_probabilities_for_obs(self, obs):
tm = self.model.train_model
if obs.shape[1:] != self.env.observation_space.shape and self.env.venv.encoder:
obs = self.env.venv.encoder.base_vector(obs)
return tf.get_default_session().run(
tf.nn.softmax(tm.pd.logits),
{tm.X: obs}
)
def get_a_logprobs(self, obs, acts):
probs = utils.batched_call(
# needs to be a tuple for the batched call to work
lambda obs: (self.get_probabilities_for_obs(obs),),
self.model.train_model.X.shape[0].value,
(obs, ),
check_safety=False
)[0]
return np.log((probs * acts).sum(axis=1))
def _sample(self) -> PPOSample:
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
for _ in range(self.nsteps):
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
should_show = False and np.random.random() > .95
if should_show:
print()
obs_summaries = '\t'.join([f"{o[0,0,0]}{o[0,-1,0]}" for o in self.obs])
act_summaries = '\t'.join([str(a) for a in actions])
print(f"State:\t{obs_summaries}")
print(f"Action:\t{act_summaries}")
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
if should_show:
rew = '\t'.join(['{:.3f}'.format(r) for r in rewards])
print(f"Reward:\t{rew}")
print()
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo:
epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
self._epinfobuf.extend(epinfos)
return PPOSample(
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones,
mb_neglogpacs, mb_states, epinfos, self
)
def _process_ppo_samples(
self, *,
obs, rewards, actions, values, dones, neglogpacs,
states, epinfos,
gamma, lam
) -> PPOBatch:
#batch of steps to batch of rollouts
mb_obs = np.asarray(obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(rewards, dtype=np.float32)
mb_actions = np.asarray(actions)
mb_values = np.asarray(values, dtype=np.float32)
mb_dones = np.asarray(dones, dtype=np.bool)
mb_neglogpacs = np.asarray(neglogpacs, dtype=np.float32)
last_values = self.model.value(self.obs, self.states, self.dones)
#discount/bootstrap off value fn
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + gamma * lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return PPOBatch(
*map(
ppo2.sf01,
(mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)
),
states,
epinfos
)
def process_to_ppo_batch(
self, ppo_sample: PPOSample, *, gamma: float, lam: float
) -> PPOBatch:
return self._process_ppo_samples(
obs=ppo_sample.obs,
rewards=ppo_sample.rewards,
actions=ppo_sample.actions,
values=ppo_sample.values,
dones=ppo_sample.dones,
neglogpacs=ppo_sample.neglogpacs,
states=ppo_sample.states,
epinfos=ppo_sample.epinfos,
gamma=gamma, lam=lam
)
def process_trajectory(self, traj, *, gamma, lam):
def batch_reshape(single_traj_data):
single_traj_data = np.asarray(single_traj_data)
s = single_traj_data.shape
return single_traj_data.reshape(s[0], 1, *s[1:])
agent_info = traj['agent_infos']
# These are trying to deal with the fact that the PPOSampler maintains
# an internal state that it uses to remember information between
# trajectories, which is important if you have a recurrent policy
self.state = None
self.obs[:] = traj['observations'][-1]
self.dones = np.ones(self.env.num_envs)
dones = np.zeros(agent_info['values'].shape)
# This is a full trajectory, so it's a bunch of not-done, followed by
# a single done
dones[-1] = 1
# This is actually kind of weird w/r/t to the PPO code, because the
# batch length is so much longer. Maybe this will work? But if PPO
# ablations don't crash, but fail to learn this is probably why.
return self._process_ppo_samples(
# The easy stuff that we just observe
obs=batch_reshape(traj['observations']),
rewards=np.hstack([batch_reshape(traj['rewards']) for _ in range(8)]),
actions=batch_reshape(traj['actions']).argmax(axis=1),
# now the things from the agent info
values=agent_info['values'],
dones=dones,
neglogpacs=agent_info['neglogpacs'],
states=None, # recurrent trajectories should include states
# and the annotations
epinfos=traj['env_infos'],
gamma=gamma, lam=lam
)
def obtain_samples(self, itr):
self.cur_sample = self._sample()
return self.cur_sample
def process_samples(self, itr, paths):
ppo_batch = self.cur_sample.to_ppo_batch()
self._epinfobuf.extend(ppo_batch.epinfos)
return ppo_batch
@property
def mean_reward(self):
return ppo2.safemean([epinfo['r'] for epinfo in self._epinfobuf])
@property
def mean_length(self):
return ppo2.safemean([epinfo['l'] for epinfo in self._epinfobuf])
class FullTrajectorySampler(VectorizedSampler):
# If you want to use the RLLab sampling code with a baselines-interfaced
# policy, use this.
@overrides
def process_samples(self, itr, paths):
"""
We need to go from paths to PPOBatch shaped samples. This does it in a
way that's pretty hacky and doesn't crash, but isn't overall promising,
because when you tune the PPO hyperparameters to look at single full
trajectories that doesn't work well either
"""
print("Processing samples, albeit hackily!")
samples_data = self.algo.policy.learner.runner.process_trajectory(
paths[0]
)
T = samples_data[0].shape[0]
return PPOBatch(
*([data[:512] for data in samples_data[:-2]] + [None, None])
)
class PPOBatchBuffer(PPOSample):
def __init__(self, ppo_sample, n_batches):
self.n_batches = n_batches
self.cur_idx = 0
T = ppo_sample.obs.shape[0]
assert ppo_sample.rewards.shape[0] == T
assert ppo_sample.actions.shape[0] == T
assert ppo_sample.values.shape[0] == T
assert ppo_sample.dones.shape[0] == T
assert ppo_sample.neglogpacs.shape[0] == T
self.batch_T = T
def fix_shape(shape):
return (self.batch_T * self.n_batches, ) + shape[1:]
super().__init__(
np.zeros(fix_shape(ppo_sample.obs.shape)),
np.zeros(fix_shape(ppo_sample.rewards.shape)),
np.zeros(fix_shape(ppo_sample.actions.shape)),
np.zeros(fix_shape(ppo_sample.values.shape)),
np.zeros(fix_shape(ppo_sample.dones.shape)),
np.zeros(fix_shape(ppo_sample.neglogpacs.shape)),
None,
None,
ppo_sample.sampler
)
def add(self, sample):
if self.cur_idx >= self.n_batches * self.batch_T:
self.cur_idx = 0
for key in ['obs', 'rewards', 'actions', 'values', 'dones', 'neglogpacs']:
s = slice(self.cur_idx, self.cur_idx + self.batch_T)
getattr(self, key)[s] = getattr(sample, key)
self.cur_idx += self.batch_T
def to_ppo_batches(self, batch_size):
for start in range(0, self.batch_T * self.n_batches, batch_size):
end = start + batch_size
s = slice(start, end)
yield self.sampler._process_ppo_samples(
obs=self.obs[s],
rewards=self.rewards[s],
actions=self.actions[s],
values=self.values[s],
dones=self.dones[s],
neglogpacs=self.neglogpacs[s],
states=None, epinfos=None,
gamma=self.sampler.gamma,
lam=self.sampler.lam
)
| 19,581 | 34.53902 | 105 |
py
|
atari-irl
|
atari-irl-master/atari_irl/policies.py
|
import numpy as np
from baselines.ppo2.ppo2 import Model
from . import environments
from .utils import one_hot
import os
import os.path as osp
import joblib
class Policy:
"""
Lets us save, restore, and step a policy forward
Plausibly we'll want to start passing a SerializationContext argument
instead of save_dir, so that we can abstract out the handling of the
load/save logic, and ensuring that the relevant directories exist.
If we do that we'll have to intercept the Model's use of joblib,
maybe using a temporary file.
"""
model_args_fname = 'model_args.pkl'
model_fname = 'model' # extension assigned automatically
annotations_fname = 'annotations.pkl'
def __init__(self, model_args):
self.model_args = model_args
self.model = Model(**model_args)
self.annotations = {}
def step(self, obs):
return self.model.step(obs)
def save(self, save_dir, **kwargs):
"""
Saves a policy, along with other annotations about it
Args:
save_dir: directory to put the policy in
**kwargs: annotations that you want to store along with the model
kind of janky interface, but seems maybe not-that-bad
canonical thing that we'll want to store is the training reward
"""
self.annotations.update(kwargs)
os.makedirs(save_dir, exist_ok=True)
joblib.dump(self.model_args, osp.join(save_dir, self.model_args_fname))
self.model.save(osp.join(osp.join(save_dir, self.model_fname)))
joblib.dump(self.annotations, osp.join(save_dir, self.annotations_fname))
@classmethod
def load(cls, save_dir):
"""
Restore a model completely from storage.
This needs to be a class method, so that we don't have to initialize
a policy in order to get the parameters back.
"""
model_args = joblib.load(osp.join(save_dir, cls.model_args_fname))
policy = cls(model_args)
policy.model.load(osp.join(save_dir, cls.model_fname))
policy.annotations = joblib.load(osp.join(save_dir, cls.annotations_fname))
return policy
class RandomPolicy(Policy):
"""
Take random actions from a given action space.
"""
def __init__(self, action_space):
self.action_space = action_space
def step(self, obs):
acts = [self.action_space.sample() for ob in obs]
return acts, None, None, None
class EnvPolicy(Policy):
"""
Lets us save and restore a policy where the policy depends on some sort of
modification to the environment, expressed as an environment wrapper.
Unfortunately, we still need to initialize the same type of environment
before we can open it here, so that it has a chance
"""
env_params_fname = 'env_params.pkl'
def __init__(self, model_args, envs=None):
super().__init__(model_args)
self.envs = envs
def save(self, save_dir, **kwargs):
assert self.envs
super().save(save_dir, **kwargs)
env_params = environments.serialize_env_wrapper(self.envs)
joblib.dump(env_params, osp.join(save_dir, self.env_params_fname))
@classmethod
def load(cls, save_dir, envs):
import pickle
policy = super().load(save_dir)
# we save the pickle-serialized env_params, so we need pickle to deserialize them
env_params = pickle.loads(joblib.load(osp.join(save_dir, cls.env_params_fname)))
policy.envs = environments.restore_serialized_env_wrapper(env_params, envs)
environments.make_const(policy.envs)
return policy
def restore_policy_from_checkpoint_dir(checkpoint_dir, envs=None):
if EnvPolicy.env_params_fname in os.listdir(checkpoint_dir):
assert envs is not None
return EnvPolicy.load(checkpoint_dir, envs)
else:
return Policy.load(checkpoint_dir)
def run_policy(*, model, environments, render=True):
# Initialize the stuff we want to keep track of
rewards = []
# Initialize our environment
done = [False]
obs = np.zeros((environments.num_envs,) + environments.observation_space.shape)
obs[:] = environments.reset()
# run the policy until done
while not any(done):
actions, _, _, _ = model.step(obs)
obs[:], reward, done, info = environments.step(actions)
rewards.append(reward)
if render:
environments.render()
return sum(rewards[:])
def sample_trajectories(*, model, environments, one_hot_code=False, n_trajectories=10, render=False):
# vectorized environments reset after done
# pirl format: [
# (observations, actions), <- single trajectory same length for both
# (observations, actions),
# ...
# ]
# airl format: [ <- if airl accepts this list, then we're happy
# {observations: numpy array, actions: numpy array}, <- single trajectory
# {observations: numpy array, actions: numpy array},
# ...
# ]
# simple simulated robotics can work with 1 trajectory, 5-10 for harder, scales
# with complexity
completed_trajectories = []
observations = [[] for _ in range(environments.num_envs)]
actions = [[] for _ in range(environments.num_envs)]
rewards = [[] for _ in range(environments.num_envs)]
obs = environments.reset()
while len(completed_trajectories) < n_trajectories:
acts, _, _, _ = model.step(obs)
# We append observation, actions tuples here, since they're defined now
for i, (o, a) in enumerate(zip(obs, acts)):
observations[i].append(o)
actions[i].append(a)
# Figure out our consequences
obs, rews, dones, _ = environments.step(acts)
if render:
environments.render()
# If we're done, then append that trajectory and restart
for i, (r, done) in enumerate(zip(rews, dones)):
rewards[i].append(r)
if done:
completed_trajectories.append({
'observations': np.array(observations[i]),
# TODO(Aaron): get the real dim
'actions': one_hot(actions[i], environments.action_space.n) if one_hot_code else np.vstack(actions[i]),
'rewards': np.array(rewards[i]),
})
observations[i] = []
actions[i] = []
rewards[i] = []
np.random.shuffle(completed_trajectories)
return completed_trajectories[:n_trajectories]
| 6,559 | 34.846995 | 123 |
py
|
atari-irl
|
atari-irl-master/atari_irl/environments.py
|
import pickle
import numpy as np
import tensorflow as tf
from rllab.envs.base import Env
from rllab.envs.gym_env import convert_gym_space
from baselines.common.vec_env import VecEnvWrapper
from baselines.common.vec_env.vec_normalize import VecNormalize
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from baselines.common.atari_wrappers import NoopResetEnv, MaxAndSkipEnv, wrap_deepmind
from gym.spaces.discrete import Discrete
from gym import spaces
from sandbox.rocky.tf.spaces import Box
import gym
import ple
import os
def one_hot(x, dim):
assert isinstance(x, list) or len(x.shape) == 1
ans = np.zeros((len(x), dim))
for n, i in enumerate(x):
ans[n, i] = 1
return ans
def vec_normalize(env):
return VecNormalize(env)
mujoco_modifiers = {
'env_modifiers': [],
'vec_env_modifiers': [vec_normalize]
}
# from baselines.common.cmd_util.make_atari_env
def wrap_env_with_args(Wrapper, **kwargs):
return lambda env: Wrapper(env, **kwargs)
def noop_reset(noop_max):
def _thunk(env):
assert 'NoFrameskip' in env.spec.id
return NoopResetEnv(env, noop_max=noop_max)
return _thunk
def atari_setup(env):
# from baselines.common.atari_wrappers
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
class TimeLimitEnv(gym.Wrapper):
def __init__(self, env, time_limit=500):
gym.Wrapper.__init__(self, env)
self.steps = 0
self.time_limit = time_limit
def reset(self, **kwargs):
self.steps = 0
return self.env.reset(**kwargs)
def step(self, actions):
f1, f2, done, f3 = self.env.step(actions)
self.steps += 1
if self.steps > self.time_limit:
done = True
return f1, f2, done, f3
class VecRewardZeroingEnv(VecEnvWrapper):
def step(self, actions):
_1, reward, _2, _3 = self.venv.step(actions)
return _1, np.zeros((_1.shape[0],)), _2, _3
def reset(self):
return self.venv.reset()
def step_wait(self):
self.venv.step_wait()
class VecIRLRewardEnv(VecEnvWrapper):
def __init__(self, env, *, reward_network):
VecEnvWrapper.__init__(self, env)
self.reward_network = reward_network
self.prev_obs = None
def step(self, acts):
obs, _, done, info = self.venv.step(acts)
assert np.sum(_) == 0
if self.prev_obs is None:
rewards = np.zeros(obs.shape[0])
else:
assert not self.reward_network.score_discrim
rewards = tf.get_default_session(
).run(self.reward_network.reward, feed_dict={
self.reward_network.act_t: acts,
self.reward_network.obs_t: self.prev_obs
})[:, 0]
if self.reward_network.drop_framestack:
self.prev_obs = obs[:, :, :, -1:]
else:
self.prev_obs = obs
assert len(rewards) == len(obs)
return obs, rewards, done, info
def reset(self):
self.prev_obs = None
return self.venv.reset()
def step_wait(self):
self.venv.step_wait()
class EncoderWrappedEnv(VecEnvWrapper):
def __init__(self, env, *, encoder):
VecEnvWrapper.__init__(self, env)
self.encoder = encoder
self.observation_space = spaces.Box(
shape=(self.encoder.d_embedding,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max
)
print("Wrapping with encoder")
def step(self, acts):
obs, rewards, done, info = self.venv.step(acts)
obs = self.encoder.base_vector(obs)
return obs, rewards, done, info
def reset(self):
return self.encoder.base_vector(self.venv.reset())
def step_wait(self):
self.venv.step_wait()
class OneHotDecodingEnv(gym.Wrapper):
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, one_hot_actions):
return self.env.step(np.argmax(one_hot_actions, axis=0))
class VecOneHotEncodingEnv(VecEnvWrapper):
def __init__(self, venv, dim=6):
VecEnvWrapper.__init__(self, venv)
self.dim = self.action_space.n
def step(self, actions):
return self.venv.step(one_hot(actions, self.dim))
def reset(self):
return self.venv.reset()
def step_wait(self):
self.venv.step_wait()
class DummyVecEnvWrapper(VecEnvWrapper):
def step(self, actions):
return self.venv.step(actions)
def reset(self):
return self.venv.reset()
def step_wait(self):
return self.venv.step_wait()
easy_env_modifiers = {
'env_modifiers': [
lambda env: wrap_deepmind(env, frame_stack=False, clip_rewards=False),
wrap_env_with_args(TimeLimitEnv, time_limit=5000)
],
'vec_env_modifiers': [
wrap_env_with_args(DummyVecEnvWrapper)
]
}
import functools
# Episode Life causes us to not actually reset the environments, which means
# that interleaving, and even running the normal sampler a bunch of times
# will give us super short trajectories. Right now we set it to false, but
# that's not an obviously correct way to handle the problem
atari_modifiers = {
'env_modifiers': [
wrap_env_with_args(NoopResetEnv, noop_max=30),
wrap_env_with_args(MaxAndSkipEnv, skip=4),
functools.partial(wrap_deepmind, episode_life=False, frame_stack=False),
],
'vec_env_modifiers': [
wrap_env_with_args(VecFrameStack, nstack=4)
]
}
def one_hot_wrap_modifiers(modifiers):
return {
'env_modifiers': modifiers['env_modifiers'] + [
wrap_env_with_args(OneHotDecodingEnv)
],
'vec_env_modifiers': modifiers['vec_env_modifiers']
}
class ConstantStatistics(object):
def __init__(self, running_mean):
self.mean = running_mean.mean
self.var = running_mean.var
self.count = running_mean.count
def update(self, x):
pass
def update_from_moments(self, _batch_mean, _batch_var, _batch_count):
pass
def serialize_env_wrapper(env_wrapper):
venv = env_wrapper.venv
env_wrapper.venv = None
serialized = pickle.dumps(env_wrapper)
env_wrapper.venv = venv
return serialized
def restore_serialized_env_wrapper(env_wrapper, venv):
env_wrapper.venv = venv
env_wrapper.num_envs = venv.num_envs
if hasattr(env_wrapper, 'ret'):
env_wrapper.ret = np.zeros(env_wrapper.num_envs)
return env_wrapper
def make_const(norm):
'''Monkey patch classes such as VecNormalize that use a
RunningMeanStd (or compatible class) to keep track of statistics.'''
for k, v in norm.__dict__.items():
if hasattr(v, 'update_from_moments'):
setattr(norm, k, ConstantStatistics(v))
def wrap_action_space(action_space):
return Box(0, 1, shape=action_space.n)
# Copied from https://github.com/HumanCompatibleAI/population-irl/blob/master/pirl/irl/airl.py
# this hacks around airl being built on top of rllib, and not using gym
# environments
class VecGymEnv(Env):
def __init__(self, venv):
self.venv = venv
self._observation_space = convert_gym_space(venv.observation_space)
self._action_space = convert_gym_space(venv.action_space)
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
if isinstance(self._action_space, Box):
return self._action_space
else:
return wrap_action_space(self._action_space)
def terminate(self):
# Normally we'd close environments, but pirl.experiments handles this.
pass
@property
def vectorized(self):
return True
def vec_env_executor(self, n_envs, max_path_length):
# SOMEDAY: make these parameters have an effect?
# We're powerless as the environments have already been created.
# But I'm not too bothered by this, as we can tweak them elsewhere.
return self.venv
def reset(self, **kwargs):
print("Reset")
self.venv.reset(**kwargs)
class JustPress1Environment(gym.Env):
def __init__(self):
super().__init__()
self.reward_range = (0, 1)
self.action_space = gym.spaces.Discrete(6)
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(210, 160, 3))
self.black = np.zeros(self.observation_space.shape).astype(np.uint8)
self.white = np.ones(self.observation_space.shape).astype(np.uint8) * 255
self.random_seed = 0
self.np_random = np.random.RandomState(0)
class Ale:
def lives(self):
return 1
self.ale = Ale()
def seed(self, seed=None):
if seed is None:
seed = 0
self.random_seed = seed
self.np_random.seed(seed)
def is_done(self):
return self.np_random.random_sample() > .99
def step(self, action):
if action == 0:
return self.black, 0.0, self.is_done(), {}
else:
return self.white, 1.0, self.is_done(), {}
def reset(self):
return self.black
def render(self):
raise NotImplementedError
def get_action_meanings(self):
return ['NOOP', 'OP', 'USELESS1', 'USELESS2', 'USELESS3', 'USELESS4']
class SimonSaysEnvironment(JustPress1Environment):
def __init__(self):
super().__init__()
self.correct = np.zeros(self.observation_space.shape).astype(np.uint8)
self.incorrect = np.zeros(self.observation_space.shape).astype(np.uint8)
boundary = self.correct.shape[1] // 2
self.correct[:,:boundary] = 255
self.incorrect[:,boundary:] = 255
self.next_move = self.np_random.randint(2)
self.obs_map = {
0: self.black,
1: self.white
}
self.turns = 0
@staticmethod
def isint(n):
return isinstance(n, np.int64) or isinstance(n, int)
def set_next_move_get_obs(self):
assert self.next_move is None
self.next_move = self.np_random.randint(2)
return self.obs_map[self.next_move]
def step(self, action):
reward = 0.0
self.turns += 1
if self.next_move is not None:
if self.isint(action) and action == self.next_move:
reward = 2.0
obs = self.correct
else:
obs = self.incorrect
self.next_move = None
else:
obs = self.set_next_move_get_obs()
return obs, reward, self.turns >= 100, {'next_move': self.next_move}
def reset(self):
self.turns = 0
self.next_move = None
return self.set_next_move_get_obs()
class VisionSaysEnvironment(SimonSaysEnvironment):
def __init__(self):
super().__init__()
self.zero = np.zeros(self.observation_space.shape).astype(np.uint8)
self.one = np.zeros(self.observation_space.shape).astype(np.uint8)
self.one[50:150, 120:128, :] = 255
self.zero[50:150, 100:108, :] = 255
self.zero[50:150, 140:148, :] = 255
self.zero[50:58, 100:148, :] = 255
self.zero[142:150, 100:148, :] = 255
self.obs_map = {
0: self.one,
1: self.zero
}
def state_preprocessor(d):
return np.array([d[key] for key in sorted(d.keys())])
def make_ple_game(game_class, obs_type, **kwargs):
class PLEGame(gym.Env):
def __init__(self):
os.putenv('SDL_VIDEODRIVER', 'fbcon')
os.environ["SDL_VIDEODRIVER"] = "dummy"
super().__init__()
self.ple = ple.PLE(
game_class(**kwargs),
state_preprocessor=state_preprocessor,
display_screen=False
)
self.ple.init()
self.reward_range = (
min(self.ple.game.rewards.values()),
max(self.ple.game.rewards.values())
)
self.obs_type = obs_type
if self.obs_type == 'rgb':
self.get_obs = self.ple.getScreenRGB
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(*self.ple.getScreenDims(), 3)
)
elif self.obs_type == 'state_vector':
self.get_obs = self.ple.getGameState
self.observation_space = gym.spaces.Box(
low=-1000, high=1000, shape=self.get_obs().shape, dtype=np.float64
)
else:
assert False, "obs_type must be rgb or state_vector"
self.action_space = gym.spaces.Discrete(6)
assert len(self.ple.getActionSet()) < 6
self._actions = self.ple.getActionSet()
self._actions += [
None for _ in range(6 - len(self._actions))
]
self._action_mapping = self.ple.game.actions
self._action_mapping['NOOP'] = None
self.ale = self.ple
self.np_random = np.random.RandomState(0)
def seed(self, seed=None):
self.ple.rng.seed(seed)
self.np_random.seed(seed)
def is_done(self):
return self.ple.game_over()
def step(self, action):
reward = self.ple.act( self._actions[action])
return self.get_obs(), reward, self.is_done(), self.ple.game.getGameState()
def reset(self):
self.ple.reset_game()
return self.get_obs()
def render(self, *args):
return self.ple.getScreenRGB()
def get_action_meanings(self):
reverse_dict = dict(zip(
self._action_mapping.values(),
self._action_mapping.keys()
))
ans = [reverse_dict[a] for a in self._actions]
ans[0] = 'NOOP'
return ans
return PLEGame
gym.envs.register(
id='VisionSays-v0',
entry_point='atari_irl.environments:VisionSaysEnvironment'
)
gym.envs.register(
id='SimonSays-v0',
entry_point='atari_irl.environments:SimonSaysEnvironment'
)
no_modifiers = {
'env_modifiers': [],
'vec_env_modifiers': []
}
PLEPong = make_ple_game(ple.games.pong.Pong, 'rgb')
PLEPongState = make_ple_game(ple.games.pong.Pong, 'state_vector')
PLECatcher = make_ple_game(ple.games.catcher.Catcher, 'rgb', init_lives=10000)
PLECatcherState = make_ple_game(ple.games.catcher.Catcher, 'state_vector', init_lives=10000)
gym.envs.register(
id='PLEPong-v0',
max_episode_steps=100000,
entry_point='atari_irl.environments:PLEPong'
)
gym.envs.register(
id='PLEPongState-v0',
max_episode_steps=100000,
entry_point='atari_irl.environments:PLEPongState'
)
gym.envs.register(
id='PLECatcher-v0',
max_episode_steps=100000,
entry_point='atari_irl.environments:PLECatcher'
)
gym.envs.register(
id='PLECatcherState-v0',
max_episode_steps=100000,
entry_point='atari_irl.environments:PLECatcherState'
)
env_mapping = {
'PongNoFrameskip-v4': atari_modifiers,
'EnduroNoFrameskip-v4': atari_modifiers,
'CartPole-v1': mujoco_modifiers,
'VisionSays-v0': easy_env_modifiers,
'SimonSays-v0': easy_env_modifiers,
'PLEPong-v0': atari_modifiers,
'PLEPongState-v0': no_modifiers,
'PLECatcher-v0': atari_modifiers,
'PLECatcherState-v0': no_modifiers,
}
| 15,534 | 27.821892 | 94 |
py
|
atari-irl
|
atari-irl-master/atari_irl/__init__.py
|
from . import utils, training, policies, environments, irl, sampling, encoding
| 78 | 78 | 78 |
py
|
atari-irl
|
atari-irl-master/atari_irl/encoding.py
|
from atari_irl import utils
import tensorflow as tf
import numpy as np
import joblib
import os.path as osp
from baselines.a2c.utils import conv, fc, conv_to_fc
from baselines.ppo2 import ppo2
from collections import deque
def batch_norm(name, x):
shape = (1, *x.shape[1:])
with tf.variable_scope(name):
mean = tf.get_variable('mean', shape, initializer=tf.constant_initializer(0.0))
variance = tf.get_variable('variance', shape, initializer=tf.constant_initializer(1.0))
offset = tf.get_variable('offset', shape, initializer=tf.constant_initializer(0.0))
scale = tf.get_variable('scale', shape, initializer=tf.constant_initializer(1.0))
return tf.nn.batch_normalization(
x, mean, variance, offset, scale, 0.001, name
)
def dcgan_cnn(images, dout, **conv_kwargs):
activ = lambda name, inpt: tf.nn.leaky_relu(batch_norm(name, inpt), alpha=0.2)
l1 = activ('l1', conv(images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs))
l2 = activ('l2', conv(l1, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
l3 = activ('l3', conv(l2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h = fc(conv_to_fc(l3), nh=512, scope='final')
out = activ('out', fc(h, 'fc1', nh=dout, init_scale=np.sqrt(2)))
return out, l3.shape
def decoder_cnn(embedding, start_conv_shape, dclasses):
activ = lambda name, inpt: tf.nn.relu(inpt) # batch_norm(name, inpt))
enhance = fc(embedding, 'out_shape', nh=np.prod(start_conv_shape))
start_conv = tf.reshape(enhance, [-1, *start_conv_shape])
tf.layers.conv2d_transpose(start_conv, 64, 3, strides=1)
l1 = activ('l3inv', tf.layers.conv2d_transpose(start_conv, 64, 3, strides=1))
l2 = activ('l2inv', tf.layers.conv2d_transpose(l1, 32, 4, strides=2))
l3 = tf.layers.conv2d_transpose(l2, dclasses + 1, 8, strides=4)
return l3
class NormalAutoEncoder:
unk_mean = 255.0 / 2
def _check_obs(self, obs):
assert (obs >= 0).all()
assert (obs <= 255).all()
if self.trim_score:
if not (obs[:, :10, :, :] == 87).all():
obs[:, :10, :, :] = 87
@staticmethod
def _process_obs_tensor(obs):
return tf.cast(obs, tf.float32)
def _get_frame(self):
return self.obs_t[:, :, :, -1:]
def _get_final_encoding(self):
self.noise = tf.placeholder(tf.float32, [None, self.d_embedding], name='noise')
return self.cnn_embedding + self.noise
def __init__(
self, *,
obs_shape, d_embedding,
embedding_weight=0.01,
obs_dtype=tf.int16,
d_classes=0,
trim_score=False,
**conv_kwargs
):
self.kwargs = {
'obs_shape': obs_shape,
'd_embedding': d_embedding,
'embedding_weight': embedding_weight,
'obs_dtype': tf.int32,
'trim_score': trim_score
}
self.obs_dtype = obs_dtype
self.obs_shape = obs_shape
self.d_embedding = d_embedding
self.embedding_weight = embedding_weight
self.obs_dtype = obs_dtype
self.trim_score = trim_score
with tf.variable_scope('autoencoder') as scope:
with tf.variable_scope('encoder') as _es:
self.obs_t = tf.placeholder(self.obs_dtype, list((None,) + self.obs_shape), name='obs')
processed_obs_t = self._process_obs_tensor(self.obs_t)
h_obs, final_conv_shape = dcgan_cnn(processed_obs_t, d_embedding, **conv_kwargs)
self.cnn_embedding = h_obs
self._final_conv_shape = tuple(s.value for s in final_conv_shape[1:])
self.encoding = self._get_final_encoding()
self.encoder_scope = _es
self.encoding_shape = tuple(s.value for s in self.encoding.shape[1:])
with tf.variable_scope('decoder') as _ds:
# This part of the observation model handles our class predictions
self.preds = decoder_cnn(
self.encoding,
self._final_conv_shape,
0
)
self.decoder_scope = _ds
self.params = tf.trainable_variables(scope='autoencoder')
with tf.variable_scope('optimization') as _os:
# self.nobs_t = tf.placeholder(obs_dtype, list((None,) + self.dOshape), name='nobs')
# processed_jobs_t = self.nobs_t
self.frame = self._get_frame()
s = -1*(self.preds - tf.cast(self.frame, tf.float32)) ** 2
self.loss = -tf.reduce_mean(
# add the log probabilities to get the probability for a whole
# image
tf.reduce_sum(s, axis=[1, 2])
) + self.embedding_weight * tf.reduce_mean(
# regularize the encoding
tf.reduce_sum(self.encoding ** 2, axis=1)
)
self.lr = tf.placeholder(tf.float64, (), name='lr')
self.step = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
self.optimization_scope = _os
def train_step(self, *, lr, obs, noise=None):
self._check_obs(obs)
if noise is None:
noise = np.zeros((obs.shape[0], self.d_embedding))
loss, _ = tf.get_default_session().run(
[self.loss, self.step], feed_dict={
self.lr: lr,
self.obs_t: obs,
self.noise: noise
}
)
return loss
def encode(self, obs, *args):
self._check_obs(obs)
noise = np.zeros((obs.shape[0], self.d_embedding))
return tf.get_default_session().run(self.encoding, feed_dict={
self.obs_t: obs,
self.noise: noise
})
def decode(self, encoding):
preds = tf.get_default_session().run(self.preds, feed_dict={
self.encoding: encoding
})
img = preds
return img[:, :, :, -1]
def base_vector(self, obs, *args):
self._check_obs(obs)
return tf.get_default_session().run(self.cnn_embedding, feed_dict={self.obs_t: obs})
def compare(self, obs, disp_p=0.01):
img = self.decode(self.encode(obs))
full_img = []
for i in range(len(img)):
if np.random.random() < disp_p:
full_img.append(np.hstack([img[i], obs[i, :, :, -1]]))
return np.vstack(full_img), np.mean((img - obs[:, :, :, -1]) ** 2)
def save(self, save_path):
ps = tf.get_default_session().run(self.params)
joblib.dump({'params': ps, 'kwargs': self.kwargs}, save_path)
@classmethod
def load(cls, load_path):
data = joblib.load(load_path)
self = cls(**data['kwargs'])
loaded_params = data['params']
restores = []
for p, loaded_p in zip(self.params, loaded_params):
restores.append(p.assign(loaded_p))
tf.get_default_session().run(restores)
return self
class VariationalAutoEncoder:
unk_mean = 255.0 / 2
@staticmethod
def _check_obs(obs):
assert (obs >= 0).all()
assert (obs <= 255).all()
@staticmethod
def _process_obs_tensor(obs):
return tf.cast(obs, tf.float32)
def _get_frame(self):
return self.obs_t[:, :, :, -1:]
def _get_final_encoding(self):
self.noise = tf.placeholder(tf.float32, [None, self.d_embedding], name='noise')
return self.cnn_embedding + self.noise
def __init__(
self, *,
obs_shape, d_classes, d_embedding,
embedding_weight=0.01,
obs_dtype=tf.int16,
**conv_kwargs
):
self.kwargs = {
'obs_shape': obs_shape,
'd_classes': d_classes,
'd_embedding': d_embedding,
'embedding_weight': embedding_weight,
'obs_dtype': tf.int32
}
self.obs_dtype = obs_dtype
self.obs_shape = obs_shape
self.d_classes = d_classes
self.d_embedding = d_embedding
self.embedding_weight = embedding_weight
self.obs_dtype = obs_dtype
with tf.variable_scope('autoencoder') as scope:
with tf.variable_scope('encoder') as _es:
self.obs_t = tf.placeholder(self.obs_dtype, list((None,) + self.obs_shape), name='obs')
processed_obs_t = self._process_obs_tensor(self.obs_t)
h_obs, final_conv_shape = dcgan_cnn(processed_obs_t, d_embedding, **conv_kwargs)
self.cnn_embedding = h_obs
self._final_conv_shape = tuple(s.value for s in final_conv_shape[1:])
self.encoding = self._get_final_encoding()
self.encoder_scope = _es
self.encoding_shape = tuple(s.value for s in self.encoding.shape[1:])
with tf.variable_scope('decoder') as _ds:
# This part of the observation model handles our class predictions
self.logits = decoder_cnn(
self.encoding,
self._final_conv_shape,
self.d_classes
)
#self.logits = tf.clip_by_value(self.logits, -1e6, 1e6)
self.logp_class = tf.nn.log_softmax(self.logits)
# this part of the observation model handles our softclass parameters
means = tf.get_variable(
'mean', [self.d_classes], dtype=tf.float32,
initializer=tf.random_uniform_initializer(
#[self.d_classes],
maxval=255,
minval=-255
)
)
sigsqs = tf.clip_by_value(
tf.get_variable(
'sigsq', [self.d_classes], dtype=tf.float32,
initializer=tf.random_normal_initializer([self.d_classes], stddev=0.1)
), 0, 10
)
self.dist = tf.distributions.Normal(loc=means, scale=sigsqs)
# if we want to assign an "unknown" class, give a uniform
# distribution over pixel values
unk_value = tf.constant((1.0 / 255))
if self.unk_mean == 0.0:
unk_value = tf.constant((1.0 / (255 * 2)))
self.decoder_scope = _ds
self.params = tf.trainable_variables(scope='autoencoder')
with tf.variable_scope('optimization') as _os:
# self.nobs_t = tf.placeholder(obs_dtype, list((None,) + self.dOshape), name='nobs')
# processed_jobs_t = self.nobs_t
self.frame = self._get_frame()
# Calculate the log probability for the pixel value for each
# individual class
self.logps = tf.concat([
# For the normal classes, it's based on the gaussian
# distribution for each class
self.logp_class[:, :, :, :-1] + self.dist.log_prob(
tf.cast(self.frame, tf.float32)
),
# For the "unk" class, it's a uniform probability over pixel values
self.logp_class[:, :, :, -1:] + tf.log(unk_value)
], axis=3)
# get the log probabilities by marginalizing over our class probabiltiies
s = tf.reduce_logsumexp(self.logps, axis=3)
self.loss = -tf.reduce_mean(
# add the log probabilities to get the probability for a whole
# image
tf.reduce_sum(s, axis=[1, 2])
) + self.embedding_weight * tf.reduce_mean(
# regularize the encoding
tf.reduce_sum(self.encoding ** 2, axis=1)
)
self.lr = tf.placeholder(tf.float64, (), name='lr')
self.step = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
self.optimization_scope = _os
def train_step(self, *, lr, obs, noise=None):
self._check_obs(obs)
if noise is None:
noise = np.zeros((obs.shape[0], self.d_embedding))
loss, _ = tf.get_default_session().run(
[self.loss, self.step], feed_dict={
self.lr: lr,
self.obs_t: obs,
self.noise: noise
}
)
return loss
def encode(self, obs, *args):
self._check_obs(obs)
noise = np.zeros((obs.shape[0], self.d_embedding))
return tf.get_default_session().run(self.encoding, feed_dict={
self.obs_t: obs,
self.noise: noise
})
def decode(self, encoding):
preds, means, stds = tf.get_default_session().run(
[self.logp_class, self.dist.loc, self.dist.scale], feed_dict={
self.encoding: encoding
})
means = np.hstack([means, np.array([self.unk_mean])])
img = (np.exp(preds) * means).sum(axis=3)
return img
def base_vector(self, obs, *args):
self._check_obs(obs)
return tf.get_default_session().run(self.cnn_embedding, feed_dict={self.obs_t: obs})
def compare(self, obs, disp_p=0.01):
img = self.decode(self.encode(obs))
full_img = []
for i in range(len(img)):
if np.random.random() < disp_p:
full_img.append(np.hstack([img[i], obs[i, :, :, -1]]))
return np.vstack(full_img), np.mean((img - obs[:, :, :, -1]) ** 2)
def save(self, save_path):
ps = tf.get_default_session().run(self.params)
joblib.dump({'params': ps, 'kwargs': self.kwargs}, save_path)
@classmethod
def load(cls, load_path):
data = joblib.load(load_path)
self = cls(**data['kwargs'])
loaded_params = data['params']
restores = []
for p, loaded_p in zip(self.params, loaded_params):
restores.append(p.assign(loaded_p))
tf.get_default_session().run(restores)
return self
class ScoreTrimmedVariationalAutoEncoder(VariationalAutoEncoder):
@staticmethod
def _check_obs(obs):
assert (obs >= 0).all()
assert (obs <= 255).all()
if not (obs[:, :10, :, :] == 87).all():
obs[:, :10, :, :] = 87
class NextStepVariationalAutoEncoder(VariationalAutoEncoder):
unk_mean = 0.0
def __init__(self, num_actions=6, **kwargs):
self.num_actions = num_actions
super().__init__(**kwargs)
self.kwargs = kwargs
self.kwargs['num_actions'] = num_actions
def _check_acts(self, acts):
assert len(acts.shape) == 1
assert (acts >= 0).all()
assert (acts < self.num_actions).all()
def _get_frame(self):
# Now our frame is the difference between the current
self.nobs_t = tf.placeholder(self.obs_dtype, list((None,) + self.obs_shape), name='obs')
return self.nobs_t[:, :, :, -1:] - self.obs_t[:, :, :, -1:]
def _get_final_encoding(self):
embedding = super()._get_final_encoding()
self.acts_t = tf.placeholder(tf.int32, [None], name='actions')
self.action_embeddings = tf.get_variable(
'action_embeddings',
[self.num_actions, self.d_embedding],
dtype=tf.float32,
initializer=tf.random_normal_initializer(
[self.d_embedding],
stddev=0.1
)
)
self.action_modifier = tf.nn.embedding_lookup(
self.action_embeddings, self.acts_t
)
return embedding + self.action_modifier
def train_step(self, *, lr, obs, acts, nobs, noise=None):
self._check_obs(obs)
if noise is None:
noise = np.zeros((obs.shape[0], self.d_embedding))
frame, loss, _ = tf.get_default_session().run(
[self.frame, self.loss, self.step], feed_dict={
self.lr: lr,
self.obs_t: obs,
self.acts_t: acts,
self.nobs_t: nobs,
self.noise: noise
}
)
return loss, frame
def encode(self, obs, acts):
self._check_obs(obs)
self._check_acts(acts)
noise = np.zeros((obs.shape[0], self.d_embedding))
return tf.get_default_session().run(self.encoding, feed_dict={
self.obs_t: obs,
self.noise: noise,
self.acts_t: acts
})
def compare(self, obs, acts, nobs, disp_p=0.01):
img = self.decode(self.encode(obs, acts))
full_img = []
for i in range(len(img)):
if np.random.random() < disp_p:
full_img.append(np.hstack([img[i], nobs[i, :, :, -1] - obs[i, :, :, -1]]))
return np.vstack(full_img)
def autoencode(*, tf_cfg, env_cfg):
with utils.TfEnvContext(tf_cfg, env_cfg) as context:
utils.logger.configure()
vae = VariationalAutoEncoder(
obs_shape=context.env_context.environments.observation_space.shape,
d_classes=20,
d_embedding=30,
embedding_weight=0.01
)
LR = 1e-4
tf.get_default_session().run(tf.local_variables_initializer())
tf.get_default_session().run(tf.global_variables_initializer())
buffer = deque(maxlen=500)
env = context.env_context.environments
env.reset()
num_timesteps = 10000
for i in range(num_timesteps):
lr = LR * (num_timesteps - i) * 1.0 / num_timesteps
obs = []
for t in range(128):
acts = [env.action_space.sample() for _ in range(env.num_envs)]
obs.append(env.step(acts)[0])
obs = np.array(obs).astype(np.uint8)
obs[:, :, :10, :, :] = 87.0
obs_batch = ppo2.sf01(obs)
if i == 0:
initial_obs = obs[:, 0, :, :, :]
for n in range(500):
loss = vae.train_step(lr=2.5e-4, obs=initial_obs[:100])
if n % 100 == 0:
print(f"Initial burn in {n}/1000: {loss}")
joblib.dump(
vae.compare(initial_obs[:120], disp_p=1),
osp.join(utils.logger.get_dir(), 'overfit_check.pkl')
)
if i % 100 == 0:
joblib.dump(
vae.compare(obs_batch),
osp.join(utils.logger.get_dir(), f'img_{i}.pkl')
)
#for epoch in range(4):
# for idx in np.random.permutation([i for i in range(len(buffer))]):
# vae.train_step(lr=lr, obs=buffer[idx])
if i < 1000 or i % 1000 == 0:
vae.save(osp.join(utils.logger.get_dir(), f'vae_{i}.pkl'))
buffer.append(obs_batch)
utils.logger.logkv(
'score',
vae.train_step(
lr=lr,
obs=buffer[np.random.randint(len(buffer))]
)
)
utils.logger.dumpkvs()
if __name__ == '__main__':
tf_config = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=8,
inter_op_parallelism_threads=8,
device_count={'GPU': 1},
log_device_placement=False
)
tf_config.gpu_options.allow_growth = True
env_config = {
'env_name': 'PongNoFrameskip-v4',
'n_envs': 8,
'seed': 32,
'one_hot_code': False
}
autoencode(tf_cfg=tf_config, env_cfg=env_config)
| 19,671 | 36.541985 | 103 |
py
|
atari-irl
|
atari-irl-master/atari_irl/irl.py
|
import tensorflow as tf
import numpy as np
import pickle
from rllab.misc import logger
from rllab.baselines.zero_baseline import ZeroBaseline
from rllab.misc.overrides import overrides
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.policies.base import StochasticPolicy
from sandbox.rocky.tf.distributions.categorical import Categorical
from sandbox.rocky.tf.spaces.box import Box
from airl.algos.irl_trpo import IRLTRPO
from airl.models.airl_state import AIRL
from airl.utils.log_utils import rllab_logdir
from airl.models.fusion_manager import RamFusionDistr
from airl.utils import TrainingIterator
from airl.models.architectures import relu_net
from baselines.ppo2.policies import CnnPolicy, MlpPolicy
from baselines.a2c.utils import conv, fc, conv_to_fc
from .environments import VecGymEnv, wrap_env_with_args, VecRewardZeroingEnv, VecOneHotEncodingEnv, EncoderWrappedEnv
from .utils import one_hot, TfEnvContext
from . import sampling, training, utils, optimizers, policies, encoding
from sandbox.rocky.tf.misc import tensor_utils
import joblib
import time
from collections import defaultdict, namedtuple
class DiscreteIRLPolicy(StochasticPolicy, Serializable):
"""
Wraps our ppo2-based Policy to fit the interface that AIRL uses.
"""
def __init__(
self,
*,
name,
policy_model,
num_envs,
env_spec,
wrapped_env_action_space,
action_space,
observation_space,
batching_config,
init_location=None,
encoder=None
):
Serializable.quick_init(self, locals())
assert isinstance(wrapped_env_action_space, Box)
self._dist = Categorical(wrapped_env_action_space.shape[0])
# this is going to be serialized, so we can't add in the envs or
# wrappers
self.init_args = dict(
name=name,
policy_model=policy_model,
init_location=init_location
)
ent_coef = 0.01
vf_coef = 0.5
max_grad_norm = 0.5
model_args = dict(
policy=policy_model,
ob_space=observation_space,
ac_space=action_space,
nbatch_act=batching_config.nenvs,
nbatch_train=batching_config.nbatch_train,
nsteps=batching_config.nsteps,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm
)
self.num_envs=num_envs
with tf.variable_scope(name) as scope:
policy = policies.Policy(model_args)
self.model = policy.model
self.act_model = self.model.act_model
self.scope = scope
StochasticPolicy.__init__(self, env_spec)
self.name = name
self.probs = tf.nn.softmax(self.act_model.pd.logits)
obs_var = self.act_model.X
self.tensor_values = lambda **kwargs: tf.get_default_session().run(self.get_params())
self._f_dist = tensor_utils.compile_function(
inputs=[obs_var],
outputs=self.probs
)
if init_location:
data = joblib.load(open(init_location, 'rb'))
self.restore_from_snapshot(data['policy_params'])
@property
def vectorized(self):
return True
def dist_info_sym(self, obs_var, state_info_vars=None):
return dict(prob=self.probs)
@overrides
def get_action(self, observation):
obs = np.array([observation])
action, _, _, _ = self.act_model.step(obs)
# TODO(Aaron) get the real dim
return one_hot(action, self.action_space.shape[0]), dict(prob=self._f_dist(obs))
def _get_actions_right_shape(self, observations):
actions, values, _, neglogpacs = self.act_model.step(observations)
return (
one_hot(actions, self.action_space.shape[0]),
dict(
prob=self._f_dist(observations),
values=values.reshape(self.num_envs, 1),
neglogpacs=neglogpacs.reshape(self.num_envs, 1)
)
)
def get_actions(self, observations):
N = observations.shape[0]
batch_size = self.act_model.X.shape[0].value
final_actions, agent_info = utils.batched_call(
self._get_actions_right_shape,
batch_size,
(observations,)
)
for key, value in agent_info.items():
if len(value.shape) == 2 and value.shape[1] == 1:
agent_info[key] = value.reshape((value.shape[0],))
# Integrity checks in case I wrecked this
assert len(final_actions) == N
for key in agent_info.keys():
assert len(agent_info[key]) == N
return final_actions, agent_info
def get_params_internal(self, **tags):
return self.scope.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
def get_reparam_action_sym(self, obs_var, action_var, old_dist_info_vars):
"""
Given observations, old actions, and distribution of old actions, return a symbolically reparameterized
representation of the actions in terms of the policy parameters
:param obs_var:
:param action_var:
:param old_dist_info_vars:
:return:
"""
raise NotImplemented
def log_diagnostics(self, paths):
pass
@property
def distribution(self):
return self._dist
def restore_param_values(self, params):
param_tensors = self.get_params()
restores = []
for tf_tensor, np_array in zip(param_tensors, params):
restores.append(tf_tensor.assign(np_array))
tf.get_default_session().run(restores)
def show_run_in_gym_env(self, venv):
dones = [False]
obs = venv.reset()
while not any(dones):
actions, _ = self.get_actions(obs)
obs, reward, dones, _ = venv.step(actions)
print(reward)
#venv.render()
def get_itr_snapshot(self):
return {
'config': self.init_args,
# unfortunately get_params() is already taken...
'tf_params': self.tensor_values()
}
def restore_from_snapshot(self, data):
"""
Restore a policy from snapshot data.
Note that this only restores the model part of the policy -- the
learner doesn't actually get its state repaired, and so for instances
the step size will be different than it was when you saved.
"""
for key, value in data['config'].items():
if self.init_args[key] != value:
print(f"Warning: different values for {key}")
self.restore_param_values(data['tf_params'])
def batch_norm(x, name):
shape = (1, *x.shape[1:])
with tf.variable_scope(name):
mean = tf.get_variable('mean', shape, initializer=tf.constant_initializer(0.0))
variance = tf.get_variable('variance', shape, initializer=tf.constant_initializer(1.0))
offset = tf.get_variable('offset', shape, initializer=tf.constant_initializer(0.0))
scale = tf.get_variable('scale', shape, initializer=tf.constant_initializer(1.0))
return tf.nn.batch_normalization(x, mean, variance, offset, scale, 0.001, name)
def dcgan_cnn(unscaled_images, **conv_kwargs):
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = lambda name, inpt: tf.nn.leaky_relu(batch_norm(inpt, name))
h = activ('l1', conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs))
h2 = activ('l2', conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ('l3', conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
return conv_to_fc(h3)
def cnn_net(x, actions=None, dout=1, **conv_kwargs):
h = dcgan_cnn(x, **conv_kwargs)
activ = lambda name, inpt: tf.nn.leaky_relu(batch_norm(inpt, name))
if actions is not None:
assert dout == 1
h = tf.concat([actions, h], axis=1)
h_final = activ('h_final', fc(h, 'fc1', nh=512, init_scale=np.sqrt(2)))
return fc(h_final, 'output', nh=dout, init_scale=np.sqrt(2))
def mlp_net(x, layers=2, actions=None, dout=1):
if actions is not None:
x = tf.concat([x, actions], axis=1)
return relu_net(x, layers=layers, dout=dout)
class AtariAIRL(AIRL):
"""
This actually fits the AIRL interface! Yay!
Args:
fusion (bool): Use trajectories from old iterations to train.
state_only (bool): Fix the learned reward to only depend on state.
score_discrim (bool): Use log D - log 1-D as reward (if true you should not need to use an entropy bonus)
max_itrs (int): Number of training iterations to run per fit step.
"""
#TODO(Aaron): Figure out what all of these args mean
def __init__(self, *,
env_spec, # No good default, but we do need to have it
expert_trajs=None,
reward_arch=cnn_net,
reward_arch_args={},
value_fn_arch=cnn_net,
score_discrim=False,
discount=1.0,
state_only=False,
max_itrs=100,
fusion=False,
name='airl',
drop_framestack=False,
only_show_scores=False,
rescore_expert_trajs=True,
encoder_loc=None
):
super(AIRL, self).__init__()
# Write down everything that we're going to need in order to restore
# this. All of these arguments are serializable, so it's pretty easy
self.init_args = dict(
model=AtariAIRL,
env_spec=env_spec,
expert_trajs=expert_trajs,
reward_arch=reward_arch,
reward_arch_args=reward_arch_args,
value_fn_arch=value_fn_arch,
score_discrim=score_discrim,
discount=discount,
state_only=state_only,
max_itrs=max_itrs,
fusion=fusion,
name=name,
rescore_expert_trajs=rescore_expert_trajs,
drop_framestack=drop_framestack,
only_show_scores=only_show_scores,
encoder_loc=encoder_loc
)
self.encoder = None if not encoder_loc else encoding.VariationalAutoEncoder.load(encoder_loc)
self.encode_fn = None
if self.encoder:
if state_only:
self.encode_fn = self.encoder.base_vector
else:
self.encode_fn = self.encoder.encode
if fusion:
self.fusion = RamFusionDistr(100, subsample_ratio=0.5)
else:
self.fusion = None
if self.encoder:
self.dO = self.encoder.encoding_shape
self.dOshape = self.encoder.encoding_shape
else:
self.dO = env_spec.observation_space.flat_dim
self.dOshape = env_spec.observation_space.shape
if drop_framestack:
assert len(self.dOshape) == 3
self.dOshape = (*self.dOshape[:-1], 1)
self.dU = env_spec.action_space.flat_dim
assert isinstance(env_spec.action_space, Box)
self.score_discrim = score_discrim
self.gamma = discount
assert value_fn_arch is not None
#self.set_demos(expert_trajs)
self.expert_trajs = expert_trajs
self.state_only = state_only
self.max_itrs = max_itrs
self.drop_framestack = drop_framestack
self.only_show_scores = only_show_scores
self.expert_cache = None
self.rescore_expert_trajs = rescore_expert_trajs
# build energy model
with tf.variable_scope(name) as _vs:
# Should be batch_size x T x dO/dU
obs_dtype = tf.int8 if reward_arch == cnn_net else tf.float32
self.obs_t = tf.placeholder(obs_dtype, list((None,) + self.dOshape), name='obs')
self.nobs_t = tf.placeholder(obs_dtype, list((None,) + self.dOshape), name='nobs')
self.act_t = tf.placeholder(tf.float32, [None, self.dU], name='act')
self.nact_t = tf.placeholder(tf.float32, [None, self.dU], name='nact')
self.labels = tf.placeholder(tf.float32, [None, 1], name='labels')
self.lprobs = tf.placeholder(tf.float32, [None, 1], name='log_probs')
self.lr = tf.placeholder(tf.float32, (), name='lr')
with tf.variable_scope('discrim') as dvs:
rew_input = self.obs_t
with tf.variable_scope('reward'):
if self.state_only:
self.reward = reward_arch(
rew_input, dout=1, **reward_arch_args
)
else:
print("Not state only", self.act_t)
self.reward = reward_arch(
rew_input, actions=self.act_t,
dout=1, **reward_arch_args
)
# value function shaping
with tf.variable_scope('vfn'):
fitted_value_fn_n = value_fn_arch(self.nobs_t, dout=1)
with tf.variable_scope('vfn', reuse=True):
self.value_fn = fitted_value_fn = value_fn_arch(self.obs_t, dout=1)
# Define log p_tau(a|s) = r + gamma * V(s') - V(s)
self.qfn = self.reward + self.gamma*fitted_value_fn_n
log_p_tau = self.reward + self.gamma*fitted_value_fn_n - fitted_value_fn
log_q_tau = self.lprobs
log_pq = tf.reduce_logsumexp([log_p_tau, log_q_tau], axis=0)
self.discrim_output = tf.exp(log_p_tau-log_pq)
self.accuracy, self.update_accuracy = tf.metrics.accuracy(
labels=self.labels,
predictions=self.discrim_output > 0.5
)
self.loss = -tf.reduce_mean(self.labels*(log_p_tau-log_pq) + (1-self.labels)*(log_q_tau-log_pq))
self.step = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
self._make_param_ops(_vs)
self.grad_reward = tf.gradients(self.reward, [self.obs_t, self.act_t])
self.modify_obs = self.get_ablation_modifiers()
self.score_mean = 0
self.score_std = 1
def change_kwargs(self, **kwargs):
for key, value in kwargs.items():
# All of these are used in graph construction, and so we can't
# just changing the parameter value won't do anything here
assert key not in {
'model',
'env_spec',
'dO',
'dOshape',
'dU',
'discount',
'gamma',
'drop_framestack',
'reward_arch',
'reward_arch_args',
'value_fn_arch',
'state_only',
'name'
}
# We have to serialize it
assert key in self.init_args
# And here's a whitelist just to be safe
assert key in {
'rescore_expert_trajs'
}
self.__setattr__(key, value)
def get_itr_snapshot(self):
return {
'config': self.init_args,
'tf_params': self.get_params()
}
def restore_from_snapshot(self, data):
for key, value in data['config'].items():
if self.init_args[key] != value:
print(f"Warning: different values for {key}")
self.set_params(data['tf_params'])
def get_ablation_modifiers(self):
def process_obs(obs, key=None, sample=None):
if self.drop_framestack:
obs = obs[:, :, :, -1:]
if self.only_show_scores:
obs = obs.copy()
obs[:, :, :42, :] *= 0
obs[:, 10:, :, :] *= 0
if self.encoder and key:
# we're using an encoder wrapped environment, and have
# already transformed it
if len(obs.shape) == 2:
return obs
assert sample
assert not self.drop_framestack
assert not self.only_show_scores
if 'next' in key:
nacts, = sample.extract_paths(keys=('actions_next',))
obs = self.encode_fn(obs, nacts.argmax(axis=1))
else:
acts, = sample.extract_paths(keys=('actions',))
obs = self.encode_fn(obs, acts.argmax(axis=1))
return obs
return process_obs
def _process_discrim_output(self, score):
score = np.clip(score, 1e-7, 1-1e-7)
score = np.log(score) - np.log(1-score)
score = score[:, 0]
return np.clip((score - self.score_mean) / self.score_std, -3, 3), score
@overrides
def fit(self, paths, policy=None, batch_size=256, logger=None, lr=1e-3, itr=0, **kwargs):
if isinstance(self.expert_trajs[0], dict):
print("Warning: Processing state out of dictionary")
self._insert_next_state(self.expert_trajs)
expert_obs_base, expert_obs_next_base, expert_acts, expert_acts_next = \
self.extract_paths(self.expert_trajs, keys=(
'observations', 'observations_next',
'actions', 'actions_next'
))
else:
expert_obs_base, expert_obs_next_base, expert_acts, expert_acts_next, _ = \
self.expert_trajs
#expert_probs = paths.sampler.get_a_logprobs(
obs, obs_next, acts, acts_next, path_probs = paths.extract_paths((
'observations', 'observations_next', 'actions', 'actions_next', 'a_logprobs'
), obs_modifier=self.modify_obs)
expert_obs = expert_obs_base
expert_obs_next = expert_obs_next_base
raw_discrim_scores = []
# Train discriminator
for it in TrainingIterator(self.max_itrs, heartbeat=5):
nobs_batch, obs_batch, nact_batch, act_batch, lprobs_batch = \
self.sample_batch(obs_next, obs, acts_next, acts, path_probs, batch_size=batch_size)
nexpert_obs_batch, expert_obs_batch, nexpert_act_batch, expert_act_batch = \
self.sample_batch(
expert_obs_next,
expert_obs,
expert_acts_next,
expert_acts,
# expert_probs,
batch_size=batch_size
)
expert_lprobs_batch = paths.sampler.get_a_logprobs(
expert_obs_batch,
expert_act_batch
)
expert_obs_batch = self.modify_obs(expert_obs_batch)
nexpert_obs_batch = self.modify_obs(nexpert_obs_batch)
if self.encoder:
expert_obs_batch = self.encode_fn(expert_obs_batch, expert_act_batch.argmax(axis=1))
nexpert_obs_batch = self.encode_fn(nexpert_obs_batch, nexpert_act_batch.argmax(axis=1))
# Build feed dict
labels = np.zeros((batch_size*2, 1))
labels[batch_size:] = 1.0
obs_batch = np.concatenate([obs_batch, expert_obs_batch], axis=0)
nobs_batch = np.concatenate([nobs_batch, nexpert_obs_batch], axis=0)
act_batch = np.concatenate([act_batch, expert_act_batch], axis=0)
nact_batch = np.concatenate([nact_batch, nexpert_act_batch], axis=0)
lprobs_batch = np.expand_dims(np.concatenate([lprobs_batch, expert_lprobs_batch], axis=0), axis=1).astype(np.float32)
feed_dict = {
self.act_t: act_batch,
self.obs_t: obs_batch,
self.nobs_t: nobs_batch,
self.nact_t: nact_batch,
self.labels: labels,
self.lprobs: lprobs_batch,
self.lr: lr
}
loss, _, acc, scores = tf.get_default_session().run(
[self.loss, self.step, self.update_accuracy, self.discrim_output],
feed_dict=feed_dict
)
# we only want the average score for the non-expert demos
non_expert_slice = slice(0, batch_size)
score, raw_score = self._process_discrim_output(scores[non_expert_slice])
assert len(score) == batch_size
assert np.sum(labels[non_expert_slice]) == 0
raw_discrim_scores.append(raw_score)
it.record('loss', loss)
it.record('accuracy', acc)
it.record('avg_score', np.mean(score))
if it.heartbeat:
print(it.itr_message())
mean_loss = it.pop_mean('loss')
print('\tLoss:%f' % mean_loss)
mean_acc = it.pop_mean('accuracy')
print('\tAccuracy:%f' % mean_acc)
mean_score = it.pop_mean('avg_score')
if logger:
logger.record_tabular('GCLDiscrimLoss', mean_loss)
logger.record_tabular('GCLDiscrimAccuracy', mean_acc)
logger.record_tabular('GCLMeanScore', mean_score)
# set the center for our normal distribution
scores = np.hstack(raw_discrim_scores)
self.score_std = np.std(scores)
self.score_mean = np.mean(scores)
return mean_loss
@overrides
def eval(self, samples, show_grad=False, **kwargs):
"""
Return bonus
"""
if self.score_discrim:
obs, obs_next, acts, path_probs = samples.extract_paths(
('observations', 'observations_next', 'actions', 'a_logprobs'),
obs_modifier=self.modify_obs
)
path_probs = np.expand_dims(path_probs, axis=1)
scores = tf.get_default_session().run(
self.discrim_output,
feed_dict={
self.act_t: acts,
self.obs_t: obs,
self.nobs_t: obs_next,
self.lprobs: path_probs
}
)
score, _ = self._process_discrim_output(scores)
else:
obs, acts = samples.extract_paths(
('observations', 'actions'), obs_modifier=self.modify_obs
)
reward = tf.get_default_session().run(
self.reward, feed_dict={self.act_t: acts, self.obs_t: obs}
)
score = reward[:,0]
if np.isnan(np.mean(score)):
import pdb; pdb.set_trace()
# TODO(Aaron, maybe): do something with show_grad
return samples._ravel_train_batch_to_time_env_batch(score)
def policy_config(
name='policy',
policy=DiscreteIRLPolicy,
policy_model=CnnPolicy,
init_location=None,
):
return dict(
name=name,
policy=policy,
policy_model=policy_model,
init_location=init_location,
)
def reward_model_config(
*,
# These are serializable, but also there's no reasonably default value
# so we have to provide it
env_spec,
expert_trajs,
ablation='none',
model=AtariAIRL,
state_only=False,
reward_arch=cnn_net,
value_fn_arch=cnn_net,
score_discrim=True,
max_itrs=1000,
drop_framestack=False,
only_show_scores=False,
encoder_loc=None
):
return dict(
model=model,
state_only=state_only,
reward_arch=reward_arch,
value_fn_arch=value_fn_arch,
env_spec=env_spec,
expert_trajs=expert_trajs,
score_discrim=score_discrim,
max_itrs=max_itrs,
drop_framestack=drop_framestack,
only_show_scores=only_show_scores,
encoder_loc=encoder_loc
)
def training_config(
n_itr=1000,
discount=0.99,
batch_size=5000,
max_path_length=100,
entropy_weight=0.01,
step_size=0.01,
irl_model_wt=1.0,
zero_environment_reward=True,
buffer_batch_size=16,
policy_update_freq=1,
):
return dict(
n_itr=n_itr,
discount=discount,
batch_size=batch_size,
max_path_length=max_path_length,
entropy_weight=entropy_weight,
step_size=step_size,
store_paths=False,
irl_model_wt=irl_model_wt,
zero_environment_reward=zero_environment_reward,
buffer_batch_size=buffer_batch_size,
policy_update_freq=policy_update_freq,
)
def make_irl_policy(policy_cfg, *, wrapped_venv, baselines_venv):
policy_fn = policy_cfg.pop('policy')
policy = policy_fn(
num_envs=baselines_venv.num_envs,
env_spec=wrapped_venv.spec,
wrapped_env_action_space=wrapped_venv.action_space,
action_space=baselines_venv.action_space,
observation_space=baselines_venv.observation_space,
**policy_cfg
)
# Put this back in so we don't get sad if we reuse the config
policy_cfg['policy'] = policy_fn
return policy
def make_irl_model(model_cfg):
model_kwargs = dict(model_cfg)
model_cls = model_kwargs.pop('model')
return model_cls(**model_kwargs)
Ablation = namedtuple('Ablation', [
'policy_modifiers', 'discriminator_modifiers', 'training_modifiers'
])
def get_ablation_modifiers(*, irl_model, ablation):
irl_reward_wrappers = [
wrap_env_with_args(VecRewardZeroingEnv),
#wrap_env_with_args(VecIRLRewardEnv, reward_network=irl_model)
]
# Default to wrapping the environment with the irl rewards
ablations = defaultdict(lambda: Ablation(
policy_modifiers={'baseline_wrappers': irl_reward_wrappers},
discriminator_modifiers={},
training_modifiers={}
))
ablations['train_rl'] = Ablation(
policy_modifiers={'baseline_wrappers': []},
discriminator_modifiers={},
training_modifiers={
'irl_model_wt': 0.0,
# TODO(Aaron): Figure out if this should be false...
'zero_environment_reward': True,
'skip_discriminator': True
}
)
ablations['train_discriminator'] = Ablation(
policy_modifiers={'baseline_wrappers': irl_reward_wrappers},
discriminator_modifiers={
'rescore_expert_trajs': False
},
training_modifiers={
'skip_policy_update': True
}
)
ablations['run_expert'] = Ablation(
policy_modifiers={'baseline_wrappers': []},
discriminator_modifiers={},
training_modifiers={
'irl_model_wt': 0.0,
'zero_envvironment_reward': True,
'skip_discriminator': True,
'skip_policy_update': True
}
)
return ablations[ablation]
def add_ablation(cfg, ablation_modifiers):
for key in ablation_modifiers.keys():
if key in cfg and cfg[key] != ablation_modifiers[key]:
print(
f"Warning: Overriding provided value {cfg[key]} "
f"for {key} with {ablation_modifiers[key]} for ablation"
)
cfg.update(ablation_modifiers)
return cfg
def rllab_wrap_venv(envs):
return TfEnv(VecGymEnv(envs))
def get_training_kwargs(
*,
venv,
ablation='normal', nsteps_sampler=128, nsteps_model=128,
reward_model_cfg={}, policy_cfg={}, training_cfg={}
):
envs = rllab_wrap_venv(venv)
policy_cfg = policy_config(**policy_cfg)
reward_model_cfg = reward_model_config(
env_spec=envs.spec,
ablation=ablation,
**reward_model_cfg
)
training_cfg = training_config(**training_cfg)
# Unfortunately we need to construct a reward model in order to handle the
# ablations, since in the normal case we need to pass it as an argument to
# the policy in order to wrap its environment and look at the irl rewards
irl_model = make_irl_model(reward_model_cfg)
# Handle the ablations and default value overrides
ablation_modifiers = get_ablation_modifiers(
irl_model=irl_model, ablation=ablation
)
irl_model.change_kwargs(**ablation_modifiers.discriminator_modifiers)
# Construct our fixed training keyword arguments. Other values for these
# are incorrect
baseline_wrappers = ablation_modifiers.policy_modifiers.pop(
'baseline_wrappers'
)
if irl_model.encoder and policy_cfg['policy_model'] == MlpPolicy:
baseline_wrappers += [wrap_env_with_args(EncoderWrappedEnv, encoder=irl_model.encoder)]
baselines_venv = venv
for fn in baseline_wrappers + [wrap_env_with_args(VecOneHotEncodingEnv, dim=6)]:
print("Wrapping baseline with function")
baselines_venv = fn(baselines_venv)
baselines_venv = baselines_venv
assert nsteps_sampler % nsteps_model == 0
batching_config = training.make_batching_config(
nenvs=baselines_venv.num_envs,
nsteps=nsteps_model,
noptepochs=4,
nminibatches=4
)
policy_cfg['batching_config'] = batching_config
policy_cfg['encoder'] = irl_model.encoder
training_kwargs = dict(
env=envs,
policy=make_irl_policy(
add_ablation(policy_cfg, ablation_modifiers.policy_modifiers),
wrapped_venv=envs,
baselines_venv=baselines_venv
),
irl_model=irl_model,
baseline=ZeroBaseline(env_spec=envs.spec),
ablation=ablation,
sampler_args=dict(
baselines_venv=baselines_venv,
nsteps=nsteps_sampler,
),
optimizer_args=dict(
batching_config=batching_config,
lr=3e-4,
cliprange=0.2,
total_timesteps=10e6
),
)
training_kwargs.update(
add_ablation(training_cfg, ablation_modifiers.training_modifiers)
)
if policy_cfg['init_location']:
snapshot = joblib.load(open(policy_cfg['init_location'], 'rb'))
training_kwargs['init_pol_params'] = snapshot['policy_params']['tf_params']
return training_kwargs, policy_cfg, reward_model_cfg, training_cfg
class IRLRunner(IRLTRPO):
def __init__(
self,
*args,
ablation='none',
skip_policy_update=False,
skip_discriminator=False,
optimizer=None,
optimizer_args={},
buffer_batch_size=16,
policy_update_freq=1,
**kwargs
):
if optimizer is None:
optimizer = optimizers.PPOOptimizer(**optimizer_args)
IRLTRPO.__init__(self, *args, optimizer=optimizer, **kwargs)
self.ablation = ablation
self.skip_policy_update = skip_policy_update
self.skip_discriminator = skip_discriminator
self.buffer_batch_size = buffer_batch_size
self.policy_update_freq = policy_update_freq
@overrides
def init_opt(self):
self.optimizer.update_opt(self.policy)
@overrides
def get_itr_snapshot(self, itr):
return dict(
itr=itr,
ablation=self.ablation,
reward_params=self.irl_model.get_itr_snapshot(),
policy_params=self.policy.get_itr_snapshot()
)
def restore_from_snapshot(self, snapshot):
data = joblib.load(open(snapshot, 'rb'))
self.irl_model.restore_from_snapshot(data['reward_params'])
self.policy.restore_from_snapshot(data['policy_params'])
@overrides
def obtain_samples(self, itr):
return super(IRLRunner, self).obtain_samples(itr)
@overrides
def optimize_policy(self, itr, samples):
self.optimizer.optimize_policy(itr, samples)
@overrides
def compute_irl(self, samples, itr=0):
if self.no_reward:
logger.record_tabular(
'OriginalTaskAverageReturn', samples.sampler.mean_reward
)
samples.rewards *= 0
if self.irl_model_wt <=0:
return samples
if self.train_irl:
max_itrs = self.discrim_train_itrs
lr=1e-3
mean_loss = self.irl_model.fit(
samples,
policy=self.policy, itr=itr, max_itrs=max_itrs, lr=lr,
logger=logger
)
logger.record_tabular('IRLLoss', mean_loss)
self.__irl_params = self.irl_model.get_params()
#probs = self.irl_model.eval(samples, gamma=self.discount, itr=itr)
#probs_flat = np.concatenate(probs) # trajectory length varies
#logger.record_tabular('IRLRewardMean', np.mean(probs_flat))
#logger.record_tabular('IRLRewardMax', np.max(probs_flat))
#logger.record_tabular('IRLRewardMin', np.min(probs_flat))
#if self.irl_model_wt > 0.0:
# samples.rewards += self.irl_model_wt * probs
return samples
def _train_setup(self):
sess = tf.get_default_session()
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
if self.init_pol_params is not None:
self.policy.restore_param_values(self.init_pol_params)
if self.init_irl_params is not None:
self.irl_model.set_params(self.init_irl_params)
self.start_worker()
return time.time()
def _log_train_itr(self, itr, start_time, itr_start_time):
logger.record_tabular(
"PolicyBufferOriginalTaskRewardMean",
self.sampler.mean_reward
)
logger.record_tabular(
"PolicyBufferEpisodeLengthMean",
self.sampler.mean_length
)
if itr % 10 == 0 and itr != 0:
logger.log("Saving snapshot...")
params = self.get_itr_snapshot(itr)
if self.store_paths:
raise NotImplementedError
logger.save_itr_params(itr, params)
logger.log(f"Saved in directory {logger.get_snapshot_dir()}")
logger.record_tabular('Time', time.time() - start_time)
logger.record_tabular('ItrTime', time.time() - itr_start_time)
logger.dump_tabular(with_prefix=False)
if self.plot:
self.update_plot()
if self.pause_for_plot:
input("Plotting evaluation run: Press Enter to "
"continue...")
def train(self):
start_time = self._train_setup()
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
with logger.prefix('itr #%d | ' % itr):
logger.record_tabular('Itr', itr)
logger.log("Obtaining samples...")
samples = self.obtain_samples(itr)
if not self.skip_discriminator:
logger.log("Optimizing discriminator...")
# The fact that we're not using the reward labels from here
# means that the policy optimization is effectively getting
# an off-by-one issue. I'm not sure that this would fix the
# issues that we're seeing, but it's definitely different
# from the original algorithm and so we should try fixing
# it anyway.
samples = self.compute_irl(samples, itr=itr)
if not self.skip_policy_update:
logger.log("Optimizing policy...")
# Another issue is that the expert trajectories start from
# a particular set of random seeds, and that controls how
# the resets happen. This means that the difference between
# environment seeds might be enough to make the
# discriminator's job easy.
self.optimize_policy(itr, samples)
self._log_train_itr(
itr,
start_time=start_time,
itr_start_time=itr_start_time
)
def buffered_sample_train_policy(self, itr, ppo_itr, buffer):
for i in range(self.buffer_batch_size):
batch = self.obtain_samples(ppo_itr)
logger.log(f"Sampled iteration {i}")
train_itr = (itr > 0 or self.skip_discriminator) and i % self.policy_update_freq == 0
if not self.skip_discriminator:
# overwrite the rewards with the IRL model
if self.irl_model_wt > 0.0:
#logger.log("Overwriting batch rewards...")
assert np.isclose(np.sum(batch.rewards), 0)
if not self.skip_policy_update and train_itr:
batch.rewards *= 0
batch.rewards += self.irl_model.eval(batch, gamma=self.discount, itr=itr)
logger.log(f"GCL Score Average: {np.mean(batch.rewards)}")
buffer.add(batch)
if not self.skip_policy_update and train_itr:
logger.log("Optimizing policy...")
self.optimize_policy(ppo_itr, batch)
ppo_itr += 1
del batch
return ppo_itr
def buffered_train(self):
start_time = self._train_setup()
ppo_itr = 0
buffer = None
if not self.skip_discriminator:
batch = self.obtain_samples(ppo_itr)
buffer = sampling.PPOBatchBuffer(batch, self.buffer_batch_size)
logger.log("Buffer initialized")
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
with logger.prefix('itr #%d | ' % itr):
logger.record_tabular('Itr', itr)
logger.log("Obtaining samples...")
ppo_itr = self.buffered_sample_train_policy(
itr, ppo_itr, buffer
)
if not self.skip_discriminator:
logger.log("Optimizing discriminator...")
# The fact that we're not using the reward labels from here
# means that the policy optimization is effectively getting
# an off-by-one issue. I'm not sure that this would fix the
# issues that we're seeing, but it's definitely different
# from the original algorithm and so we should try fixing
# it anyway.
self.compute_irl(buffer, itr=itr)
self._log_train_itr(
itr,
start_time=start_time,
itr_start_time=itr_start_time
)
# Heavily based on implementation in https://github.com/HumanCompatibleAI/population-irl/blob/master/pirl/irl/airl.py
def airl(
log_dir,
*,
tf_cfg, env_config, reward_model_cfg={}, policy_cfg={}, training_cfg={},
ablation='normal'
):
with TfEnvContext(tf_cfg, env_config) as context:
training_kwargs, policy_cfg, reward_model_cfg, training_cfg = get_training_kwargs(
venv=context.env_context.environments,
reward_model_cfg=reward_model_cfg,
policy_cfg=policy_cfg,
training_cfg=training_cfg,
ablation=ablation,
)
print("Training arguments: ", training_kwargs)
algo = IRLRunner(
**training_kwargs,
sampler_cls=sampling.PPOBatchSampler,
)
irl_model = algo.irl_model
policy = algo.policy
with rllab_logdir(algo=algo, dirname=log_dir):
print("Training!")
algo.buffered_train()
#algo.train()
# need to return these explicitly because they don't survive
# across tensorflow sessions
reward_params = irl_model.get_params()
policy_params = policy.tensor_values()
policy = policy_cfg, policy_params
reward = reward_model_cfg, reward_params
return reward, policy
| 40,339 | 35.407942 | 129 |
py
|
atari-irl
|
atari-irl-master/atari_irl/behavioral_cloning.py
|
from atari_irl import encoding, utils
import tensorflow as tf
import joblib
from airl.models.architectures import relu_net
import os.path as osp
import numpy as np
from gym.spaces import Discrete
from baselines.common.distributions import make_pdtype
cnn_fn = lambda obs_tensor, n_actions: encoding.dcgan_cnn(obs_tensor, n_actions)[0]
relu_fn = lambda obs_tensor, n_actions: relu_net(obs_tensor, layers=4, dout=n_actions)
class Cloner:
def __init__(self, *, obs_shape, n_actions, encoding_fn=cnn_fn, **conv_kwargs):
self.obs_dtype = tf.float32
self.obs_shape = obs_shape
self.n_actions = n_actions
self.kwargs = {
'obs_shape': obs_shape,
'n_actions': n_actions
}
with tf.variable_scope('behavioral_cloning') as scope:
self.obs_t = tf.placeholder(self.obs_dtype, list((None,) + self.obs_shape), name='obs')
self.logits = encoding_fn(self.obs_t, self.n_actions)
self.logp_class = tf.nn.log_softmax(self.logits)
self.act_t = tf.placeholder(tf.float32, [None, self.n_actions], name='act')
# Optimization
log_loss_example = tf.reduce_sum(self.act_t * self.logp_class, 1, keepdims=True)#[1, 2])
print(log_loss_example.shape)
self.loss = -tf.reduce_mean(log_loss_example)
self.lr = tf.placeholder(tf.float64, (), name='lr')
self.opt_step = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
# Actions
self.pd, self.pi = make_pdtype(Discrete(n_actions)).pdfromlatent(self.logp_class)
self.a = self.pd.sample()
self.neglogp = self.pd.neglogp(self.a)
self.params = tf.trainable_variables(scope='behavioral_cloning')
def save(self, save_path):
print(f"Saving to {save_path}")
ps = tf.get_default_session().run(self.params)
joblib.dump({'params': ps, 'kwargs': self.kwargs}, save_path)
@classmethod
def load(cls, load_path, **kwargs):
data = joblib.load(load_path)
kwargs.update(data['kwargs'])
self = cls(**kwargs)
loaded_params = data['params']
restores = []
for p, loaded_p in zip(self.params, loaded_params):
restores.append(p.assign(loaded_p))
tf.get_default_session().run(restores)
return self
def train_step(self, *, lr, obs, act):
loss, _ = tf.get_default_session().run(
[self.loss, self.opt_step], feed_dict={
self.obs_t: obs,
self.act_t: act,
self.lr: lr
}
)
return loss
def step(self, obs):
actions, neglogps = tf.get_default_session().run(
[self.a, self.neglogp], feed_dict={
self.obs_t: obs
}
)
return actions, np.zeros(obs.shape[0]), None, neglogps
def check(self, obs):
actions, neglogps, logits = tf.get_default_session().run(
[self.a, self.neglogp, self.logits], feed_dict={
self.obs_t: obs
}
)
return actions, neglogps, logits
def train(self, *, obs, act, lr=1e-4, batch_size=1024, epochs=500):
T = obs.shape[0]
n_batches = (T // batch_size) - 1
print(f"Splitting {T} timesteps into {n_batches} batches")
for e in range(epochs):
order = np.random.permutation(T)
obs = obs[order]
act = act[order]
losses = []
for b in range((T // batch_size) - 1):
lr *= .9995
s = slice(batch_size*b, batch_size*(b+1))
loss = self.train_step(lr=lr, obs=obs[s], act=act[s])
if b % 50 == 0:
print(f"Epoch {e} batch {b}: {loss}")
losses.append(loss)
print(f"Epoch {e}: {np.mean(losses)}")
| 4,034 | 36.361111 | 100 |
py
|
atari-irl
|
atari-irl-master/scripts/train_airl.py
|
from atari_irl import utils, environments, irl
from arguments import add_atari_args, add_trajectory_args, add_irl_args, env_context_for_args
import argparse
from baselines import logger
import tensorflow as tf
import pickle
import joblib
from baselines.ppo2.policies import CnnPolicy, MlpPolicy
from atari_irl.irl import cnn_net, mlp_net
def train_airl(args):
tf_cfg = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=args.n_cpu,
inter_op_parallelism_threads=args.n_cpu,
device_count={'GPU': 1},
log_device_placement=False
)
tf_cfg.gpu_options.allow_growth = True
logger.configure()
reward, policy_params = irl.airl(
logger.get_dir(),
tf_cfg=tf_cfg,
training_cfg={
'n_itr': args.n_iter,
'batch_size': args.batch_size,
'entropy_weight': args.entropy_wt,
'buffer_batch_size': args.ppo_itrs_in_batch,
'policy_update_freq': args.policy_update_freq
},
env_config={
'env_name': args.env,
'n_envs': args.num_envs,
'seed': args.seed,
'one_hot_code': args.one_hot_code
},
policy_cfg={
'init_location': None if args.init_location == 'none' else args.init_location,
'policy_model': CnnPolicy if args.policy_type == 'cnn' else MlpPolicy
},
reward_model_cfg={
'expert_trajs': joblib.load(open(args.trajectories_file, 'rb')),
'state_only': args.state_only,
'drop_framestack': args.drop_discriminator_framestack,
'only_show_scores': args.only_show_discriminator_scores,
'reward_arch': cnn_net if args.reward_type == 'cnn' else mlp_net,
'value_fn_arch': cnn_net if args.reward_type == 'cnn' else mlp_net,
'encoder_loc': None if not args.encoder else args.encoder,
'max_itrs': args.discriminator_itrs
},
ablation=args.ablation
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
add_atari_args(parser)
add_trajectory_args(parser)
add_irl_args(parser)
args = parser.parse_args()
train_airl(args)
| 2,290 | 33.712121 | 93 |
py
|
atari-irl
|
atari-irl-master/scripts/train_ae.py
|
from atari_irl import utils, encoding
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from baselines.ppo2 import ppo2
import joblib
import os.path as osp
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--env', help='environment', type=str, default='PongNoFrameskip-v4')
parser.add_argument('--num_envs', help='number of environments', type=int, default=8)
parser.add_argument('--seed', help='random seed', type=int, default=0)
parser.add_argument(
'--encoder_type',
help='type of encoder',
choices=['score_trimmed', 'next_step', 'non_pixel_class', 'pixel_class'],
type=str, default='pixel_class'
)
args = parser.parse_args()
encoder = encoding.VariationalAutoEncoder
if args.encoder_type == 'score_trimmed':
encoder = encoding.ScoreTrimmedVariationalAutoEncoder
elif args.encoder_type == 'next_step':
encoder = encoding.NextStepVariationalAutoEncoder
elif args.encoder_type == 'non_pixel_class':
encoder = encoding.NormalAutoEncoder
tf_cfg = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=8,
inter_op_parallelism_threads=8,
device_count={'GPU': 1},
log_device_placement=False
)
tf_cfg.gpu_options.allow_growth = True
env_cfg = {
'env_name': args.env,
'n_envs': args.num_envs,
'seed': args.seed,
'one_hot_code': False
}
extra_args = {}
if args.encoder_type == 'non_pixel_class' and 'Pong' in args.env:
extra_args['trim_score'] = True
with utils.TfEnvContext(tf_cfg, env_cfg) as context:
utils.logger.configure()
dirname = utils.logger.get_dir()
print(f"logging in {dirname}")
vae = encoder(
obs_shape=context.env_context.environments.observation_space.shape,
d_embedding=40,
d_classes=30,
embedding_weight=0.001,
**extra_args
)
LR = 1e-4
def args(*, lr, obs, noise_scale=.2):
return {
'lr': lr,
'obs': obs,
'noise': np.random.randn(obs.shape[0], vae.d_embedding) * noise_scale
}
tf.get_default_session().run(tf.local_variables_initializer())
tf.get_default_session().run(tf.global_variables_initializer())
buffer = deque(maxlen=100)
env = context.env_context.environments
env.reset()
num_timesteps = 2500
losses = []
for i in range(num_timesteps):
lr = LR * (num_timesteps - i) * 1.0 / num_timesteps
obs = []
for t in range(32):
acts = [env.action_space.sample() for _ in range(env.num_envs)]
obs.append(env.step(acts)[0])
obs_arr = np.array(obs).astype(np.uint8)
obs.clear()
del obs
obs_batch = ppo2.sf01(obs_arr)
del obs_arr
if i % 100 == 0:
img, loss = vae.compare(obs_batch, disp_p=.1)
losses.append((i, loss))
utils.logger.info(losses[-1])
fname = osp.join(dirname, f'vae_{i}.pkl')
utils.logger.info(f"Saving vae in {fname}")
vae.save(fname)
joblib.dump(img, open(osp.join(dirname, f'img_sample_{i}.pkl'), 'wb'))
joblib.dump(losses, open(osp.join(dirname, f'losses_{i}.pkl'), 'wb'))
utils.logger.info(f"{i}: {vae.train_step(**args(lr=lr, obs=obs_batch, noise_scale=0.0))}")
buffer.append(obs_batch)
for bi in range(len(buffer)):
loss = vae.train_step(**args(lr=lr, obs=buffer[bi], noise_scale=0.0))
| 3,580 | 32.157407 | 98 |
py
|
atari-irl
|
atari-irl-master/scripts/arguments.py
|
#from atari_irl import utils, environments, training, policies, irl
from atari_irl import utils, environments
def add_bool_feature(parser, name, default=True):
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument('--' + name, dest=name, action='store_true')
feature_parser.add_argument('--no-' + name, dest=name, action='store_false')
parser.set_defaults(**{name: default})
def add_atari_args(parser):
# see baselines.common.cmd_util
parser.add_argument('--env', help='environment ID', default='PongNoFrameskip-v4')
parser.add_argument('--n_cpu', help='Number of CPUs', default=8)
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num_timesteps', type=int, default=int(10e6))
parser.add_argument('--num_envs', type=int, default=8)
parser.add_argument('--policy_type', default='cnn')
parser.add_argument('--ent_coef', type=float, default=0.01)
add_bool_feature(parser, 'one_hot_code')
def add_trajectory_args(parser):
parser.add_argument('--num_trajectories', help='number of trajectories', type=int, default=8)
parser.add_argument(
'--trajectories_file',
help='file to write the trajectories to',
default='trajectories.pkl'
)
add_bool_feature(parser, 'render')
def add_expert_args(parser):
parser.add_argument(
'--expert_path',
help='file for the expert policy',
default='experts/new_expert'
)
parser.add_argument(
'--nsteps',
help='length of time in a minibatch step',
type=int, default=128
)
parser.add_argument(
'--expert_type',
help='type of the expert',
choices=['baselines_ppo', 'irl', 'clone', 'random'],
default='baselines_ppo'
)
def add_irl_args(parser):
parser.add_argument('--irl_seed', help='seed for the IRL tensorflow session', type=int, default=0)
parser.add_argument(
'--irl_policy_file',
help='filename for the IRL policy',
default='irl_policy_params.pkl'
)
parser.add_argument(
'--irl_reward_file',
help='filename for the IRL reward',
default='irl_reward.pkl'
)
parser.add_argument('--discount', help='discount rate for the IRL policy', default=.99)
parser.add_argument(
'--n_iter',
help='number of iterations for irl training',
type=int, default=500
)
parser.add_argument(
'--batch_size',
help='batch size for each iteration',
type=int, default=5000
)
parser.add_argument(
'--ablation',
help='what ablation to run',
choices=['none', 'train_rl', 'train_discriminator', 'run_expert'],
type=str, default='none'
)
parser.add_argument(
'--entropy_wt',
help='entropy_weight',
type=float, default=0.01
)
parser.add_argument(
'--init_location',
help='location to initialize training from',
type=str, default='none'
)
parser.add_argument(
'--encoder',
help='encoder location',
type=str, default=''
)
parser.add_argument(
'--discriminator_itrs',
help='number of iterations for discriminator',
type=int, default=100
)
parser.add_argument(
'--ppo_itrs_in_batch',
help='number of PPO steps in batch',
type=int, default=1
)
parser.add_argument(
'--policy_update_freq',
help='how frequently to update the PPO policy while doing batched sampling',
type=int, default=1
)
parser.add_argument('--reward_type', default='cnn')
add_bool_feature(parser, 'state_only', default=False)
add_bool_feature(parser, 'drop_discriminator_framestack', default=False)
add_bool_feature(parser, 'only_show_discriminator_scores', default=False)
def env_context_for_args(args):
env_modifiers = environments.env_mapping[args.env]
if args.one_hot_code:
env_modifiers = environments.one_hot_wrap_modifiers(env_modifiers)
return utils.EnvironmentContext(
env_name=args.env,
n_envs=args.num_envs,
seed=args.seed,
**env_modifiers
)
def tf_context_for_args(args):
return utils.TfContext(
ncpu=args.n_cpu
)
| 4,330 | 31.320896 | 102 |
py
|
atari-irl
|
atari-irl-master/scripts/trajectory_to_gif.py
|
import numpy as np
import pickle
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
import argparse
def make_gif(traj, fname, title=''):
fig, ax = plt.subplots(figsize=(2, 2))
def update(i):
if i % 20 == 0:
print(i)
im_normed = traj[i]
ax.imshow(im_normed)
ax.set_title(title, fontsize=20)
ax.set_axis_off()
anim = FuncAnimation(fig, update, frames=np.arange(0, min(1000, len(traj))), interval=50)
anim.save(fname, dpi=80, writer='imagemagick')
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--trajectories_file', default='trajectories.pkl')
parser.add_argument('--output_file', default='trajectories.gif')
parser.add_argument('--title', default='')
args = parser.parse_args()
ts = pickle.load(open(args.trajectories_file, 'rb'))
make_gif(ts[0]['observations'], args.output_file, args.title)
| 1,064 | 28.583333 | 93 |
py
|
atari-irl
|
atari-irl-master/scripts/train_expert.py
|
from atari_irl import utils, training, policies
import argparse
from arguments import add_atari_args, add_expert_args, env_context_for_args, tf_context_for_args
from baselines.ppo2.policies import MlpPolicy, CnnPolicy
import os.path as osp
import os
def train_expert(args):
utils.logger.configure()
with tf_context_for_args(args):
with env_context_for_args(args) as context:
learner = training.Learner(
CnnPolicy if args.policy_type == 'cnn' else MlpPolicy,
context.environments,
total_timesteps=args.num_timesteps,
vf_coef=0.5, ent_coef=args.ent_coef,
nsteps=args.nsteps, noptepochs=4, nminibatches=4,
gamma=0.99, lam=0.95,
lr=lambda alpha: alpha * 2.5e-4,
cliprange=lambda alpha: alpha * 0.1
)
for policy, update, mean_reward in learner.learn_and_yield(
lambda l: (l.policy, l.update, l.mean_reward), 100,
log_freq=1
):
checkdir = osp.join(utils.logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, 'update-{}'.format(update))
print('Saving to', savepath)
policy.save(
savepath,
mean_reward=learner.mean_reward,
update=update,
seed=args.seed
)
policy.save(
args.expert_path,
mean_reward=learner.mean_reward, update=update, seed=args.seed
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
add_atari_args(parser)
add_expert_args(parser)
args = parser.parse_args()
assert not args.one_hot_code
train_expert(args)
| 1,935 | 34.851852 | 96 |
py
|
atari-irl
|
atari-irl-master/scripts/generate_trajectories.py
|
from atari_irl import utils, policies, environments, irl, training, sampling, behavioral_cloning
import pickle
from arguments import add_atari_args, add_trajectory_args, add_expert_args, tf_context_for_args, env_context_for_args
import argparse
import tensorflow as tf
import joblib
from baselines.ppo2.policies import CnnPolicy, MlpPolicy
from atari_irl.irl import cnn_net
from airl.models.architectures import relu_net
def generate_trajectories(args):
# environments are not one hot coded, so we don't wrap this
env_modifiers = environments.env_mapping[args.env]
#if args.expert_type == 'irl':
# env_modifiers = environments.one_hot_wrap_modifiers(env_modifiers)
utils.logger.configure()
with utils.TfContext(ncpu=args.n_cpu):
with utils.EnvironmentContext(
env_name=args.env,
n_envs=args.num_envs,
seed=args.seed,
**env_modifiers
) as context:
if args.expert_type == 'baselines_ppo':
policy = policies.EnvPolicy.load(args.expert_path, context.environments)
model = policy.model
envs = policy.envs
elif args.expert_type == 'irl':
policy_cfg = irl.policy_config(
init_location=args.expert_path
)
policy_cfg['batching_config'] = training.make_batching_config(
nenvs=args.num_envs,
nsteps=128,
noptepochs=4,
nminibatches=4
)
irl_policy = irl.make_irl_policy(
policy_cfg,
wrapped_venv=irl.rllab_wrap_venv(context.environments),
baselines_venv=context.environments
)
sess = tf.get_default_session()
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
irl_policy.restore_from_snapshot(
joblib.load(open(args.expert_path, 'rb'))['policy_params']
)
model = irl_policy.model
envs = context.environments
elif args.expert_type == 'clone':
model = behavioral_cloning.Cloner.load(args.expert_path)
envs = context.environments
elif args.expert_type == 'random':
envs = context.environments
model = policies.RandomPolicy(envs.action_space)
else:
raise NotImplementedError
ts = policies.sample_trajectories(
model=model,
environments=envs,
n_trajectories=args.num_trajectories,
one_hot_code=args.one_hot_code,
render=args.render
)
pickle.dump(ts, open(args.trajectories_file, 'wb'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
add_atari_args(parser)
add_expert_args(parser)
add_trajectory_args(parser)
args = parser.parse_args()
generate_trajectories(args)
| 3,171 | 37.216867 | 117 |
py
|
atari-irl
|
atari-irl-master/scripts/cache_trajectories.py
|
from atari_irl import sampling, irl, utils
from arguments import add_atari_args, add_trajectory_args, add_irl_args, env_context_for_args
import argparse
from baselines import logger
import tensorflow as tf
import numpy as np
import pickle
import joblib
from baselines.ppo2.policies import CnnPolicy, MlpPolicy
from atari_irl.irl import cnn_net
from airl.models.architectures import relu_net
def train_airl(args):
tf_cfg = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=args.n_cpu,
inter_op_parallelism_threads=args.n_cpu,
device_count={'GPU': 1},
log_device_placement=False
)
tf_cfg.gpu_options.allow_growth = True
env_config = {
'env_name': args.env,
'n_envs': args.num_envs,
'seed': args.seed,
'one_hot_code': args.one_hot_code
}
with utils.TfEnvContext(tf_cfg, env_config) as context:
ts = joblib.load(open(args.trajectories_file, 'rb'))
training_kwargs, _, _, _ = irl.get_training_kwargs(
venv=context.env_context.environments,
training_cfg={
'n_itr': args.n_iter,
'batch_size': args.batch_size,
'entropy_weight': args.entropy_wt
},
policy_cfg={
'init_location': None if args.init_location == 'none' else args.init_location,
'policy_model': CnnPolicy if args.policy_type == 'cnn' else MlpPolicy
},
reward_model_cfg={
'expert_trajs': ts,
'state_only': args.state_only,
'drop_framestack': args.drop_discriminator_framestack,
'only_show_scores': args.only_show_discriminator_scores,
'reward_arch': cnn_net if args.policy_type == 'cnn' else relu_net,
'value_fn_arch': cnn_net if args.policy_type == 'cnn' else relu_net
},
ablation=args.ablation
)
algo = irl.IRLRunner(
**training_kwargs,
sampler_cls=sampling.PPOBatchSampler,
)
def fill_trajectories(paths):
algo.irl_model.eval_expert_probs(paths, algo.policy, insert=True)
algo.irl_model._insert_next_state(paths)
fill_trajectories(ts)
for t in ts:
del t['agent_infos']
T = len(ts)
ans = []
keys = ('observations', 'observations_next', 'actions', 'actions_next', 'a_logprobs')
for key in keys:
print(key)
batch = []
for i in range(T):
batch.append(ts[i][key].copy())
del ts[i][key]
ans.append(np.concatenate(batch).astype(np.float32))
for i in reversed(range(len(batch))):
del batch[i]
del batch
joblib.dump(ans, open(args.cache_path, 'wb'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
add_atari_args(parser)
add_trajectory_args(parser)
add_irl_args(parser)
parser.add_argument('--cache_path', default='cache.pkl')
args = parser.parse_args()
train_airl(args)
| 3,211 | 35.089888 | 94 |
py
|
atari-irl
|
atari-irl-master/scripts/train_clone.py
|
import argparse
import tensorflow as tf
from atari_irl import utils, behavioral_cloning
import os.path as osp
import joblib
from arguments import add_atari_args, add_trajectory_args, add_expert_args, tf_context_for_args, env_context_for_args
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
add_atari_args(parser)
add_expert_args(parser)
add_trajectory_args(parser)
parser.add_argument('--clone_path', default='clone.pkl')
parser.add_argument('--epochs', default=500, type=int)
args = parser.parse_args()
tf_cfg = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=args.n_cpu,
inter_op_parallelism_threads=args.n_cpu,
device_count={'GPU': 1},
log_device_placement=False
)
tf_cfg.gpu_options.allow_growth = True
env_config = {
'env_name': args.env,
'n_envs': args.num_envs,
'seed': args.seed,
'one_hot_code': args.one_hot_code
}
with utils.TfEnvContext(tf_cfg, env_config) as context:
utils.logger.configure()
#encoder = encoding.NextStepVariationalAutoEncoder.load('../scripts/encoders/run3/vae_850.pkl')
expert_obs_base, expert_obs_next_base, expert_acts, expert_acts_next, _ = \
joblib.load(args.trajectories_file)
del expert_obs_next_base
del expert_acts_next
del _
clone = behavioral_cloning.Cloner(
obs_shape=expert_obs_base.shape[1:],
n_actions=context.env_context.environments.action_space.n
)
tf.get_default_session().run(tf.local_variables_initializer())
tf.get_default_session().run(tf.global_variables_initializer())
obs = expert_obs_base
act = expert_acts
clone.train(obs=obs, act=act, epochs=args.epochs)
clone.save(args.clone_path)
| 1,942 | 32.5 | 117 |
py
|
atari-irl
|
atari-irl-master/scripts/run_irl_policy.py
|
from atari_irl import utils, environments, irl
import pickle
from arguments import add_atari_args, add_trajectory_args, add_irl_args, tf_context_for_args, env_context_for_args
import argparse
import tensorflow as tf
from sandbox.rocky.tf.envs.base import TfEnv
def run_irl_policy(args):
with tf_context_for_args(args):
with env_context_for_args(args) as context:
envs = environments.VecGymEnv(context.environments)
envs = TfEnv(envs)
policy = irl.make_irl_policy(
irl.policy_config(),
envs=envs,
sess=tf.get_default_session()
)
policy.restore_param_values(args.irl_policy_file)
policy.show_run_in_gym_env(context.environments)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
add_atari_args(parser)
add_irl_args(parser)
args = parser.parse_args()
run_irl_policy(args)
| 1,006 | 30.46875 | 114 |
py
|
atari-irl
|
atari-irl-master/tests/test_irl.py
|
from atari_irl import irl, utils, environments, policies, training, sampling
import tensorflow as tf
import numpy as np
import pickle
from baselines.ppo2 import ppo2
def assert_trajectory_formatted(samples):
print(f"Found {len(samples)} trajectories")
for sample in samples:
assert 'observations' in sample
assert 'actions' in sample
T = len(sample['observations'])
print(f"\tFound trajectory of length {T}")
if not hasattr(sample['observations'], 'shape'):
print("\tTime index is list, not numpy dimension")
assert np.array(sample['observations']).shape == (T, 84, 84, 4)
assert np.array(sample['actions']).shape == (T, 6)
class TestAtariIRL:
def setup_method(self, method):
self.env = 'PongNoFrameskip-v4'
env_modifiers = environments.env_mapping[self.env]
self.env_modifiers = environments.one_hot_wrap_modifiers(env_modifiers)
self.config = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=8,
inter_op_parallelism_threads=8,
device_count={'GPU': 1},
)
self.config.gpu_options.allow_growth=True
def test_sample_shape(self):
def check_base_policy_sampler(algo, env_context):
print("Checking straightforward policy trajectory sampler")
policy_samples = policies.sample_trajectories(
model=algo.policy.learner.model,
environments=env_context.environments,
one_hot_code=True,
n_trajectories=10,
render=False
)
assert len(policy_samples) == 10
assert_trajectory_formatted(policy_samples)
def check_irl_discriminator_sampler(algo, env_context):
print("Checking discriminator sampler")
#env_context.environments.reset()
algo.start_worker()
irl_discriminator_samples = algo.obtain_samples(0)
assert_trajectory_formatted(irl_discriminator_samples)
with utils.EnvironmentContext(
env_name=self.env, n_envs=8, seed=0, **self.env_modifiers
) as env_context:
with irl.IRLContext(self.config, env_config={
'seed': 0,
'env_name': 'PongNoFrameskip-v4',
'one_hot_code': True
}):
training_kwargs, _, _, _ = irl.get_training_kwargs(
venv=env_context.environments,
reward_model_cfg={
'expert_trajs': pickle.load(open('scripts/short_trajectories.pkl', 'rb')),
}
)
print("Training arguments: ", training_kwargs)
algo = irl.IRLRunner(**training_kwargs)
check_base_policy_sampler(algo, env_context)
check_irl_discriminator_sampler(algo, env_context)
def test_vectorized_sampler_processing_to_ppo_results(self):
with utils.EnvironmentContext(
env_name=self.env, n_envs=1, seed=0, **self.env_modifiers
) as env_context:
with irl.IRLContext(self.config, env_config={
'seed': 0,
'env_name': 'PongNoFrameskip-v4',
'one_hot_code': True
}):
training_kwargs, _, _, _ = irl.get_training_kwargs(
venv=env_context.environments,
reward_model_cfg={
'expert_trajs': pickle.load(open('scripts/short_trajectories.pkl', 'rb')),
}
)
training_kwargs['batch_size'] = 50
print("Training arguments: ", training_kwargs)
env_context.environments.reset()
algo = irl.IRLRunner(**training_kwargs)
algo.start_worker()
vectorized_samples = algo.obtain_samples(0)
# check some basic things about the vectorized samples
# We should only have one path
assert len(vectorized_samples) == 1
assert_trajectory_formatted(vectorized_samples)
# It shouldn't be super short
assert len(vectorized_samples[0]['actions']) > 100
sampler = sampling.PPOBatchSampler(
model=algo.policy.learner.model,
env=env_context.environments,
nsteps=128*env_context.environments.num_envs
)
# These are very different because the policy is
# non-deterministic. This test is only checking that the
# shapes are right, and we need something more deterministic to
# determine that the return calculation is also correct
ppo_processed = sampler.process_trajectory(
vectorized_samples[0], gamma=0.99, lam=0.95
).train_args()
ppo_generated = sampler.process_to_ppo_batch(
sampler.run(), gamma=0.99, lam=0.95
).train_args()
assert len(ppo_processed) == len(ppo_generated)
# the indices before the states and episode infos
for i in range(len(ppo_processed)):
assert ppo_processed[i][:128].shape == ppo_generated[i].shape
def test_ppo_sampling_roundtrips(self):
with utils.EnvironmentContext(
env_name=self.env, n_envs=8, seed=0, **self.env_modifiers
) as env_context:
with irl.IRLContext(self.config, env_config={
'seed': 0,
'env_name': 'PongNoFrameskip-v4',
'one_hot_code': True
}):
training_kwargs, _, _, _ = irl.get_training_kwargs(
venv=env_context.environments,
reward_model_cfg={
'expert_trajs': pickle.load(open('scripts/short_trajectories.pkl', 'rb')),
}
)
training_kwargs['batch_size'] = 50
print("Training arguments: ", training_kwargs)
env_context.environments.reset()
algo = irl.IRLRunner(**training_kwargs)
ppo_sample = algo.policy.learner.runner.sample()
trajectories = ppo_sample.to_trajectories()
assert_trajectory_formatted(trajectories.trajectories)
roundtrip_sample = trajectories.to_ppo_sample()
assert (ppo_sample.obs == roundtrip_sample.obs).all()
assert (ppo_sample.rewards == roundtrip_sample.rewards).all()
assert (ppo_sample.actions == roundtrip_sample.actions).all()
assert (ppo_sample.values == roundtrip_sample.values).all()
assert (ppo_sample.dones == roundtrip_sample.dones).all()
assert (ppo_sample.neglogpacs == roundtrip_sample.neglogpacs).all()
assert ppo_sample.states == roundtrip_sample.states
assert ppo_sample.epinfos == roundtrip_sample.epinfos
assert ppo_sample.sampler == roundtrip_sample.sampler
def test_ppo_sampling_raveling(self):
with utils.EnvironmentContext(
env_name=self.env, n_envs=8, seed=0, **self.env_modifiers
) as env_context:
with irl.IRLContext(self.config, env_config={
'seed': 0,
'env_name': 'PongNoFrameskip-v4',
'one_hot_code': True
}):
training_kwargs, _, _, _ = irl.get_training_kwargs(
venv=env_context.environments,
reward_model_cfg={
'expert_trajs': pickle.load(open('scripts/short_trajectories.pkl', 'rb')),
}
)
training_kwargs['batch_size'] = 50
print("Training arguments: ", training_kwargs)
env_context.environments.reset()
algo = irl.IRLRunner(**training_kwargs)
ppo_sample = algo.policy.learner.runner.sample()
train_batch_raveled_obs = ppo_sample._ravel_time_env_batch_to_train_batch(
ppo_sample.obs
)
# check that the second chunk of the first batch is the same as
# the second environment in the ppo sample. This shows that we
# stacked the environments correctly
assert np.isclose(
train_batch_raveled_obs[0][ppo_sample.obs.shape[0]:],
ppo_sample.obs[:, 1]
).all()
# check that the roundtrip works, as a sanity check
assert np.isclose(
ppo_sample.obs, ppo_sample._ravel_train_batch_to_time_env_batch(
train_batch_raveled_obs
)
).all()
def test_ppo_sampling_probs_calculation(self):
with utils.EnvironmentContext(
env_name=self.env, n_envs=8, seed=0, **self.env_modifiers
) as env_context:
with irl.IRLContext(self.config, env_config={
'seed': 0,
'env_name': 'PongNoFrameskip-v4',
'one_hot_code': True
}):
training_kwargs, _, _, _ = irl.get_training_kwargs(
venv=env_context.environments,
reward_model_cfg={
'expert_trajs': pickle.load(open('scripts/short_trajectories.pkl', 'rb')),
}
)
training_kwargs['batch_size'] = 50
print("Training arguments: ", training_kwargs)
env_context.environments.reset()
algo = irl.IRLRunner(**training_kwargs)
ppo_sample = algo.policy.learner.runner.sample()
# check that the probabilities are probabilities and sum to one
sums = ppo_sample.probabilities.sum(axis=2)
assert np.isclose(sums, np.ones(sums.shape)).all()
# the probabilities are consistent with the neglogpacs
one_hot_actions = utils.one_hot(
ppo_sample.actions.reshape(128 * 8), 6
).reshape(128, 8, 6)
neglogpacs = -1 * np.log(
(ppo_sample.probabilities * one_hot_actions).sum(axis=2)
)
assert np.isclose(neglogpacs, ppo_sample.neglogpacs).all()
| 10,559 | 43.1841 | 98 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/create-experiment.py
|
import pkg_resources
import argparse
import shutil
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Create an experiment folder")
parser.add_argument('--name', dest='name', required=True)
args = parser.parse_args()
folder = pkg_resources.resource_filename(__name__, 'experiments')
if not os.path.exists(folder):
print("Creating a folder 'experiments/' where all experiments will be stored.")
os.mkdir(folder)
folder = os.path.join(folder, args.name)
if os.path.exists(folder):
raise ValueError('An experiment with this name already exists')
os.mkdir(folder)
os.mkdir(os.path.join(folder, 'data'))
os.mkdir(os.path.join(folder, 'models'))
os.mkdir(os.path.join(folder, 'gens'))
os.mkdir(os.path.join(folder, 'gens', 'test'))
os.mkdir(os.path.join(folder, 'gens', 'valid'))
print(f'Experiment {args.name} created.')
| 974 | 28.545455 | 87 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/average-checkpoints.py
|
"""This file is nearly word-for-word taken from the folder tools in OpenNMT"""
import pkg_resources
import argparse
import torch
import os
def average_checkpoints(checkpoint_files):
vocab = None
opt = None
avg_model = None
avg_generator = None
for i, checkpoint_file in enumerate(checkpoint_files):
m = torch.load(checkpoint_file, map_location='cpu')
model_weights = m['model']
generator_weights = m['generator']
if i == 0:
vocab, opt = m['vocab'], m['opt']
avg_model = model_weights
avg_generator = generator_weights
else:
for (k, v) in avg_model.items():
avg_model[k].mul_(i).add_(model_weights[k]).div_(i + 1)
for (k, v) in avg_generator.items():
avg_generator[k].mul_(i).add_(generator_weights[k]).div_(i + 1)
return {"vocab": vocab, 'opt': opt, 'optim': None,
"generator": avg_generator, "model": avg_model}
def main():
parser = argparse.ArgumentParser(description='This script merges checkpoints of the same model')
parser.add_argument('--folder', dest="folder", help="experiment name")
parser.add_argument('--steps', dest="steps", nargs="+", help="checkpoints step numbers")
args = parser.parse_args()
expfolder = pkg_resources.resource_filename(__name__, 'experiments')
model_folder = os.path.join(expfolder, args.folder, 'models')
assert os.path.exists(model_folder), f'{model_folder} is not a valid folder'
checkpoint_files = [os.path.join(model_folder, f'model_step_{step}.pt') for step in args.steps]
avg_cp = average_checkpoints(checkpoint_files)
torch.save(avg_cp, os.path.join(model_folder, 'avg_model.pt'))
if __name__ == "__main__":
main()
| 1,847 | 33.867925 | 100 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/translate.py
|
#!/usr/bin/env python
from onmt.bin.translate import main
if __name__ == "__main__":
main()
| 98 | 13.142857 | 35 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/batch_translate.py
|
import subprocess
import functools
import argparse
import torch
import os
import re
partial_shell= = functools.partial(subprocess.run, shell=True,
stdout=subprocess.PIPE)
def shell(cmd):
"""Execute cmd as if from the command line"""
completed_process = partial_shell(cmd)
return completed_process.stdout.decde('utf8')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
| 434 | 23.166667 | 62 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/train.py
|
#!/usr/bin/env python
from onmt.bin.train import main
if __name__ == "__main__":
main()
| 94 | 12.571429 | 31 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/preprocess.py
|
#!/usr/bin/env python
from onmt.bin.preprocess import main
if __name__ == "__main__":
main()
| 99 | 13.285714 | 36 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/data/make-dataset.py
|
"""
In this file we build the RotoWire dataset so that it can be used in OpenNMT
and it can be used by our proposed hierarchical model.
All tables are represented as a sequence, where every ENT_SIZE tokens are one
entity, so that seq.view(ENT_SIZE, -1) separates all entities.
Each entity starts with <ent> token, for learning entity repr
A lot of this file comes from previous work on this dataset:
https://github.com/ratishsp/data2text-plan-py/blob/master/scripts/create_dataset.py
"""
from more_itertools import collapse
import pkg_resources
import json, os, re
import argparse
# OpenNMT has a fancy pipe
DELIM = "│"
# I manually checked and there are at most 24 elements in an entity
ENT_SIZE = 24
bs_keys = ['START_POSITION', 'MIN', 'PTS', 'FGM', 'FGA', 'FG_PCT', 'FG3M',
'FG3A', 'FG3_PCT', 'FTM', 'FTA', 'FT_PCT', 'OREB', 'DREB', 'REB',
'AST', 'TO', 'STL', 'BLK', 'PF', 'FIRST_NAME', 'SECOND_NAME']
ls_keys = ['PTS_QTR1', 'PTS_QTR2', 'PTS_QTR3', 'PTS_QTR4', 'PTS', 'FG_PCT',
'FG3_PCT', 'FT_PCT', 'REB', 'AST', 'TOV', 'WINS', 'LOSSES', 'CITY',
'NAME']
ls_keys = [f'TEAM-{key}' for key in ls_keys]
def _build_home(entry):
"""The team who hosted the game"""
records = [DELIM.join(['<ent>', '<ent>'])]
for key in ls_keys:
records.append(DELIM.join([
entry['home_line'][key].replace(' ', '_'),
key
]))
# Contrary to previous work, IS_HOME is now a unique token at the end
records.append(DELIM.join(['yes', 'IS_HOME']))
# We pad the entity to size ENT_SIZE with OpenNMT <blank> token
records.extend([DELIM.join(['<blank>', '<blank>'])] * (ENT_SIZE - len(records)))
return records
def _build_vis(entry):
"""The visiting team"""
records = [DELIM.join(['<ent>', '<ent>'])]
for key in ls_keys:
records.append(DELIM.join([
entry['vis_line'][key].replace(' ', '_'),
key
]))
# Contrary to previous work, IS_HOME is now a unique token at the end
records.append(DELIM.join(['no', 'IS_HOME']))
# We pad the entity to size ENT_SIZE with OpenNMT <blank> token
records.extend([DELIM.join(['<blank>', '<blank>'])] * (ENT_SIZE - len(records)))
return records
def get_player_idxs(entry):
# In 4 instances the Clippers play against the Lakers
# Both are from LA... We simply devide in half the players
# In all 4, there are 26 players so we return 13-25 & 0-12
# as it is always visiting first and home second.
if entry['home_city'] == entry['vis_city']:
assert entry['home_city'] == 'Los Angeles'
return ([str(idx) for idx in range(13, 26)],
[str(idx) for idx in range(13)])
nplayers = len(entry['box_score']['PTS'])
home_players, vis_players = list(), list()
for i in range(nplayers):
player_city = entry['box_score']['TEAM_CITY'][str(i)]
if player_city == entry['home_city']:
home_players.append(str(i))
else:
vis_players.append(str(i))
return home_players, vis_players
def box_preprocess(entry, remove_na=True):
home_players, vis_players = get_player_idxs(entry)
all_entities = list() # will contain all records of the input table
for is_home, player_idxs in enumerate([vis_players, home_players]):
for player_idx in player_idxs:
player = [DELIM.join(['<ent>', '<ent>'])]
for key in bs_keys:
val = entry['box_score'][key][player_idx]
if remove_na and val == 'N/A': continue
player.append(DELIM.join([
val.replace(' ', '_'),
key
]))
is_home_str = 'yes' if is_home else 'no'
player.append(DELIM.join([is_home_str, 'IS_HOME']))
# We pad the entity to size ENT_SIZE with OpenNMT <blank> token
player.extend([DELIM.join(['<blank>', '<blank>'])] * (ENT_SIZE - len(player)))
all_entities.append(player)
all_entities.append(_build_home(entry))
all_entities.append(_build_vis(entry))
return list(collapse(all_entities))
def _clean_summary(summary, tokens):
"""
In here, we slightly help the copy mechanism
When we built the source sequence, we took all multi-words value
and repalaced spaces by underscores. We replace those as well in
the summaries, so that the copy mechanism knows it was a copy.
It only happens with city names like "Los Angeles".
"""
summary = ' '.join(summary)
for token in tokens:
val = token.split(DELIM)[0]
if '_' in val:
val_no_underscore = val.replace('_', ' ')
summary = summary.replace(val_no_underscore, val)
return summary
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--folder', dest='folder', required=True,
help='Save the preprocessed dataset to this folder')
parser.add_argument('--keep-na', dest='keep_na', action='store_true',
help='Activate to keep NA in the dataset')
args = parser.parse_args()
if not os.path.exists(args.folder):
print('Creating folder to store preprocessed dataset at:')
print(args.folder)
os.mkdir(args.folder)
for setname in ['train', 'valid', 'test']:
filename = f'rotowire/{setname}.json'
filename = pkg_resources.resource_filename(__name__, filename)
with open(filename, encoding='utf8', mode='r') as f:
data = json.load(f)
input_filename = os.path.join(args.folder, f'{setname}_input.txt')
output_filename = os.path.join(args.folder, f'{setname}_output.txt')
with open(input_filename, mode='w', encoding='utf8') as inputf:
with open(output_filename, mode='w', encoding='utf8') as outputf:
for entry in data:
input = box_preprocess(entry)
inputf.write(' '.join(input) + '\n')
outputf.write(_clean_summary(entry['summary'], input) + '\n')
| 6,186 | 36.49697 | 90 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/opts.py
|
""" Implementation of all available options """
from __future__ import print_function
import configargparse
from onmt.models.sru import CheckSRU
def config_opts(parser):
parser.add('-config', '--config', required=False,
is_config_file_arg=True, help='config file path')
parser.add('-save_config', '--save_config', required=False,
is_write_out_config_file_arg=True,
help='config file save path')
def model_opts(parser):
"""
These options are passed to the construction of the model.
Be careful with these as they will be used during translation.
"""
# Embedding Options
group = parser.add_argument_group('Model-Embeddings')
group.add('--src_word_vec_size', '-src_word_vec_size',
type=int, default=500,
help='Word embedding size for src.')
group.add('--tgt_word_vec_size', '-tgt_word_vec_size',
type=int, default=500,
help='Word embedding size for tgt.')
group.add('--word_vec_size', '-word_vec_size', type=int, default=-1,
help='Word embedding size for src and tgt.')
group.add('--share_decoder_embeddings', '-share_decoder_embeddings',
action='store_true',
help="Use a shared weight matrix for the input and "
"output word embeddings in the decoder.")
group.add('--share_embeddings', '-share_embeddings', action='store_true',
help="Share the word embeddings between encoder "
"and decoder. Need to use shared dictionary for this "
"option.")
group.add('--position_encoding', '-position_encoding', action='store_true',
help="Use a sin to mark relative words positions. "
"Necessary for non-RNN style models.")
group = parser.add_argument_group('Model-Embedding Features')
group.add('--feat_merge', '-feat_merge', type=str, default='concat',
choices=[None, 'concat', 'sum', 'mlp'],
help="Merge action for incorporating features embeddings. "
"Options [concat|sum|mlp].")
group.add('--feat_merge_activation', '-feat_merge_activation',
type=str, default='ReLU', choices = [None, 'ReLU', 'Tanh'])
group.add('--feat_vec_size', '-feat_vec_size', type=int, default=-1,
help="If specified, feature embedding sizes "
"will be set to this. Otherwise, feat_vec_exponent "
"will be used.")
group.add('--feat_vec_exponent', '-feat_vec_exponent',
type=float, default=0.7,
help="If -feat_merge_size is not set, feature "
"embedding sizes will be set to N^feat_vec_exponent "
"where N is the number of values the feature takes.")
# Encoder-Decoder Options
group = parser.add_argument_group('Model- Encoder-Decoder')
group.add('--model_type', '-model_type', default='text',
choices=['text', 'table', 'img', 'audio', 'vec'],
help="Type of source model to use. Allows "
"the system to incorporate non-text inputs. "
"Options are [text|img|audio|vec].")
group.add('--model_dtype', '-model_dtype', default='fp32',
choices=['fp32', 'fp16'],
help='Data type of the model.')
group.add('--encoder_type', '-encoder_type', type=str, default='rnn',
choices=['rnn', 'brnn', 'mean', 'transformer', 'htransformer','cnn'],
help="Type of encoder layer to use. Non-RNN layers "
"are experimental. Options are "
"[rnn|brnn|mean|transformer|cnn].")
group.add('--decoder_type', '-decoder_type', type=str, default='rnn',
choices=['rnn', 'hrnn', 'transformer', 'cnn'],
help="Type of decoder layer to use. Non-RNN layers "
"are experimental. Options are "
"[rnn|hrrn|transformer|cnn].")
group.add('--layers', '-layers', type=int, default=-1,
help='Number of layers in enc/dec.')
# Encoder
group.add('--enc_layers', '-enc_layers', type=int, default=2,
help='Number of layers in the encoder')
group.add('--units_layers', '-units_layers', type=int, default=2,
help='Number of layers in the units encoder (when hierarchical)')
group.add('--chunks_layers', '-chunks_layers', type=int, default=2,
help='Number of layers in the chunks encoder (when hierarchical)')
group.add('--glu_depth', '-glu_depth', type=int, default=2,
help='Number of glu layers in the encoder (when hierarchical)')
group.add('--units_glu_depth', '-units_glu_depth', type=int, default=2,
help='Number of glu layers in the units_encoder (when hierarchical)')
group.add('--chunks_glu_depth', '-chunks_glu_depth', type=int, default=2,
help='Number of glu layers in the chunks_encoder (when hierarchical)')
group.add('--units_heads', '-units_heads', type=int, default=2,
help='Number of heads in the units encoder (when hierarchical)')
group.add('--chunks_heads', '-chunks_heads', type=int, default=2,
help='Number of heads in the chunks encoder (when hierarchical)')
# Decoder
group.add('--dec_layers', '-dec_layers', type=int, default=2,
help='Number of layers in the decoder')
group.add('--rnn_size', '-rnn_size', type=int, default=-1,
help="Size of rnn hidden states. Overwrites "
"enc_rnn_size and dec_rnn_size")
group.add('--enc_rnn_size', '-enc_rnn_size', type=int, default=500,
help="Size of encoder rnn hidden states. "
"Must be equal to dec_rnn_size except for "
"speech-to-text.")
group.add('--dec_rnn_size', '-dec_rnn_size', type=int, default=500,
help="Size of decoder rnn hidden states. "
"Must be equal to enc_rnn_size except for "
"speech-to-text.")
group.add('--audio_enc_pooling', '-audio_enc_pooling',
type=str, default='1',
help="The amount of pooling of audio encoder, "
"either the same amount of pooling across all layers "
"indicated by a single number, or different amounts of "
"pooling per layer separated by comma.")
group.add('--cnn_kernel_width', '-cnn_kernel_width', type=int, default=3,
help="Size of windows in the cnn, the kernel_size is "
"(cnn_kernel_width, 1) in conv layer")
group.add('--input_feed', '-input_feed', type=int, default=1,
help="Feed the context vector at each time step as "
"additional input (via concatenation with the word "
"embeddings) to the decoder.")
group.add('--bridge', '-bridge', action="store_true",
help="Have an additional layer between the last encoder "
"state and the first decoder state")
group.add('--rnn_type', '-rnn_type', type=str, default='LSTM',
choices=['LSTM', 'GRU', 'SRU'],
action=CheckSRU,
help="The gate type to use in the RNNs")
# group.add('--residual', '-residual', action="store_true",
# help="Add residual connections between RNN layers.")
group.add('--brnn', '-brnn', action=DeprecateAction,
help="Deprecated, use `encoder_type`.")
group.add('--context_gate', '-context_gate', type=str, default=None,
choices=['source', 'target', 'both'],
help="Type of context gate to use. "
"Do not select for no context gate.")
# Attention options
group = parser.add_argument_group('Model- Attention')
group.add('--global_attention', '-global_attention',
type=str, default='general',
choices=['dot', 'general', 'mlp', 'none'],
help="The attention type to use: "
"dotprod or general (Luong) or MLP (Bahdanau)")
group.add('--global_attention_function', '-global_attention_function',
type=str, default="softmax", choices=["softmax", "sparsemax"])
group.add('--self_attn_type', '-self_attn_type',
type=str, default="scaled-dot",
help='Self attention type in Transformer decoder '
'layer -- currently "scaled-dot" or "average" ')
group.add('--max_relative_positions', '-max_relative_positions',
type=int, default=0,
help="Maximum distance between inputs in relative "
"positions representations. "
"For more detailed information, see: "
"https://arxiv.org/pdf/1803.02155.pdf")
group.add('--heads', '-heads', type=int, default=8,
help='Number of heads for transformer self-attention')
group.add('--transformer_ff', '-transformer_ff', type=int, default=2048,
help='Size of hidden transformer feed-forward')
group.add('--aan_useffn', '-aan_useffn', action="store_true",
help='Turn on the FFN layer in the AAN decoder')
group.add('--use_pos', '-use_pos', action='store_true',
help='Use position (ie column name) instead of value for the hierarchical attention on the units level')
# Alignement options
group = parser.add_argument_group('Model - Alignement')
group.add('--lambda_align', '-lambda_align', type=float, default=0.0,
help="Lambda value for alignement loss of Garg et al (2019)"
"For more detailed information, see: "
"https://arxiv.org/abs/1909.02074")
group.add('--alignment_layer', '-alignment_layer', type=int, default=-3,
help='Layer number which has to be supervised.')
group.add('--alignment_heads', '-alignment_heads', type=int, default=None,
help='N. of cross attention heads per layer to supervised with')
group.add('--full_context_alignment', '-full_context_alignment',
action="store_true",
help='Whether alignment is conditioned on full target context.')
# Generator and loss options.
group = parser.add_argument_group('Generator')
group.add('--copy_attn', '-copy_attn', action="store_true",
help='Train copy attention layer.')
group.add('--copy_attn_type', '-copy_attn_type',
type=str, default=None,
choices=['dot', 'general', 'mlp', 'none'],
help="The copy attention type to use. Leave as None to use "
"the same as -global_attention.")
group.add('--generator_function', '-generator_function', default="softmax",
choices=["softmax", "sparsemax"],
help="Which function to use for generating "
"probabilities over the target vocabulary (choices: "
"softmax, sparsemax)")
group.add('--copy_attn_force', '-copy_attn_force', action="store_true",
help='When available, train to copy.')
group.add('--reuse_copy_attn', '-reuse_copy_attn', action="store_true",
help="Reuse standard attention for copy")
group.add('--copy_loss_by_seqlength', '-copy_loss_by_seqlength',
action="store_true",
help="Divide copy loss by length of sequence")
group.add('--coverage_attn', '-coverage_attn', action="store_true",
help='Train a coverage attention layer.')
group.add('--lambda_coverage', '-lambda_coverage', type=float, default=0.0,
help='Lambda value for coverage loss of See et al (2017)')
group.add('--loss_scale', '-loss_scale', type=float, default=0,
help="For FP16 training, the static loss scale to use. If not "
"set, the loss scale is dynamically computed.")
group.add('--apex_opt_level', '-apex_opt_level', type=str, default="O1",
choices=["O0", "O1", "O2", "O3"],
help="For FP16 training, the opt_level to use."
"See https://nvidia.github.io/apex/amp.html#opt-levels.")
def preprocess_opts(parser):
""" Pre-procesing options """
# Data options
group = parser.add_argument_group('Data')
group.add('--data_type', '-data_type', default="text",
help="Type of the source input. "
"Options are [text|img|audio|vec].")
group.add('--train_src', '-train_src', required=True, nargs='+',
help="Path(s) to the training source data")
group.add('--train_tgt', '-train_tgt', required=True, nargs='+',
help="Path(s) to the training target data")
group.add('--train_align', '-train_align', nargs='+', default=[None],
help="Path(s) to the training src-tgt alignment")
group.add('--train_ids', '-train_ids', nargs='+', default=[None],
help="ids to name training shards, used for corpus weighting")
group.add('--valid_src', '-valid_src',
help="Path to the validation source data")
group.add('--valid_tgt', '-valid_tgt',
help="Path to the validation target data")
group.add('--valid_align', '-valid_align', default=None,
help="Path(s) to the validation src-tgt alignment")
group.add('--src_dir', '-src_dir', default="",
help="Source directory for image or audio files.")
group.add('--save_data', '-save_data', required=True,
help="Output file for the prepared data")
group.add('--max_shard_size', '-max_shard_size', type=int, default=0,
help="""Deprecated use shard_size instead""")
group.add('--shard_size', '-shard_size', type=int, default=1000000,
help="Divide src_corpus and tgt_corpus into "
"smaller multiple src_copus and tgt corpus files, then "
"build shards, each shard will have "
"opt.shard_size samples except last shard. "
"shard_size=0 means no segmentation "
"shard_size>0 means segment dataset into multiple shards, "
"each shard has shard_size samples")
group.add('--num_threads', '-num_threads', type=int, default=1,
help="Number of shards to build in parallel.")
group.add('--overwrite', '-overwrite', action="store_true",
help="Overwrite existing shards if any.")
# Dictionary options, for text corpus
group = parser.add_argument_group('Vocab')
# if you want to pass an existing vocab.pt file, pass it to
# -src_vocab alone as it already contains tgt vocab.
group.add('--src_vocab', '-src_vocab', default="",
help="Path to an existing source vocabulary. Format: "
"one word per line.")
group.add('--tgt_vocab', '-tgt_vocab', default="",
help="Path to an existing target vocabulary. Format: "
"one word per line.")
group.add('--features_vocabs_prefix', '-features_vocabs_prefix',
type=str, default='',
help="Path prefix to existing features vocabularies")
group.add('--src_vocab_size', '-src_vocab_size', type=int, default=50000,
help="Size of the source vocabulary")
group.add('--tgt_vocab_size', '-tgt_vocab_size', type=int, default=50000,
help="Size of the target vocabulary")
group.add('--vocab_size_multiple', '-vocab_size_multiple',
type=int, default=1,
help="Make the vocabulary size a multiple of this value")
group.add('--src_words_min_frequency',
'-src_words_min_frequency', type=int, default=0)
group.add('--tgt_words_min_frequency',
'-tgt_words_min_frequency', type=int, default=0)
group.add('--dynamic_dict', '-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add('--share_vocab', '-share_vocab', action='store_true',
help="Share source and target vocabulary")
# Truncation options, for text corpus
group = parser.add_argument_group('Pruning')
group.add('--src_seq_length', '-src_seq_length', type=int, default=50,
help="Maximum source sequence length")
group.add('--src_seq_length_trunc', '-src_seq_length_trunc',
type=int, default=None,
help="Truncate source sequence length.")
group.add('--tgt_seq_length', '-tgt_seq_length', type=int, default=50,
help="Maximum target sequence length to keep.")
group.add('--tgt_seq_length_trunc', '-tgt_seq_length_trunc',
type=int, default=None,
help="Truncate target sequence length.")
group.add('--lower', '-lower', action='store_true', help='lowercase data')
group.add('--filter_valid', '-filter_valid', action='store_true',
help='Filter validation data by src and/or tgt length')
# Data processing options
group = parser.add_argument_group('Random')
group.add('--shuffle', '-shuffle', type=int, default=0,
help="Shuffle data")
group.add('--seed', '-seed', type=int, default=3435,
help="Random seed")
group = parser.add_argument_group('Logging')
group.add('--report_every', '-report_every', type=int, default=100000,
help="Report status every this many sentences")
group.add('--log_file', '-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add('--log_file_level', '-log_file_level', type=str,
action=StoreLoggingLevelAction,
choices=StoreLoggingLevelAction.CHOICES,
default="0")
# Options most relevant to speech
group = parser.add_argument_group('Speech')
group.add('--sample_rate', '-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add('--window_size', '-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
group.add('--window_stride', '-window_stride', type=float, default=.01,
help="Window stride for spectrogram in seconds.")
group.add('--window', '-window', default='hamming',
help="Window type for spectrogram generation.")
# Option most relevant to image input
group.add('--image_channel_size', '-image_channel_size',
type=int, default=3,
choices=[3, 1],
help="Using grayscale image can training "
"model faster and smaller")
def train_opts(parser):
""" Training and saving options """
group = parser.add_argument_group('General')
group.add('--data', '-data', required=True,
help='Path prefix to the ".train.pt" and '
'".valid.pt" file path from preprocess.py')
group.add('--data_ids', '-data_ids', nargs='+', default=[None],
help="In case there are several corpora.")
group.add('--data_weights', '-data_weights', type=int, nargs='+',
default=[1], help="""Weights of different corpora,
should follow the same order as in -data_ids.""")
group.add('--save_model', '-save_model', default='model',
help="Model filename (the model will be saved as "
"<save_model>_N.pt where N is the number "
"of steps")
group.add('--save_checkpoint_steps', '-save_checkpoint_steps',
type=int, default=5000,
help="""Save a checkpoint every X steps""")
group.add('--keep_checkpoint', '-keep_checkpoint', type=int, default=-1,
help="Keep X checkpoints (negative: keep all)")
# GPU
group.add('--gpuid', '-gpuid', default=[], nargs='*', type=int,
help="Deprecated see world_size and gpu_ranks.")
group.add('--gpu_ranks', '-gpu_ranks', default=[], nargs='*', type=int,
help="list of ranks of each process.")
group.add('--world_size', '-world_size', default=1, type=int,
help="total number of distributed processes.")
group.add('--gpu_backend', '-gpu_backend',
default="nccl", type=str,
help="Type of torch distributed backend")
group.add('--gpu_verbose_level', '-gpu_verbose_level', default=0, type=int,
help="Gives more info on each process per GPU.")
group.add('--master_ip', '-master_ip', default="localhost", type=str,
help="IP of master for torch.distributed training.")
group.add('--master_port', '-master_port', default=10000, type=int,
help="Port of master for torch.distributed training.")
group.add('--queue_size', '-queue_size', default=400, type=int,
help="Size of queue for each process in producer/consumer")
group.add('--seed', '-seed', type=int, default=-1,
help="Random seed used for the experiments "
"reproducibility.")
# Init options
group = parser.add_argument_group('Initialization')
group.add('--param_init', '-param_init', type=float, default=0.1,
help="Parameters are initialized over uniform distribution "
"with support (-param_init, param_init). "
"Use 0 to not use initialization")
group.add('--param_init_glorot', '-param_init_glorot', action='store_true',
help="Init parameters with xavier_uniform. "
"Required for transformer.")
group.add('--train_from', '-train_from', default='', type=str,
help="If training from a checkpoint then this is the "
"path to the pretrained model's state_dict.")
group.add('--reset_optim', '-reset_optim', default='none',
choices=['none', 'all', 'states', 'keep_states'],
help="Optimization resetter when train_from.")
# Pretrained word vectors
group.add('--pre_word_vecs_enc', '-pre_word_vecs_enc',
help="If a valid path is specified, then this will load "
"pretrained word embeddings on the encoder side. "
"See README for specific formatting instructions.")
group.add('--pre_word_vecs_dec', '-pre_word_vecs_dec',
help="If a valid path is specified, then this will load "
"pretrained word embeddings on the decoder side. "
"See README for specific formatting instructions.")
# Fixed word vectors
group.add('--fix_word_vecs_enc', '-fix_word_vecs_enc',
action='store_true',
help="Fix word embeddings on the encoder side.")
group.add('--fix_word_vecs_dec', '-fix_word_vecs_dec',
action='store_true',
help="Fix word embeddings on the decoder side.")
# Optimization options
group = parser.add_argument_group('Optimization- Type')
group.add('--batch_size', '-batch_size', type=int, default=64,
help='Maximum batch size for training')
group.add('--batch_type', '-batch_type', default='sents',
choices=["sents", "tokens"],
help="Batch grouping for batch_size. Standard "
"is sents. Tokens will do dynamic batching")
group.add('--pool_factor', '-pool_factor', type=int, default=8192,
help="""Factor used in data loading and batch creations.
It will load the equivalent of `pool_factor` batches,
sort them by the according `sort_key` to produce
homogeneous batches and reduce padding, and yield
the produced batches in a shuffled way.
Inspired by torchtext's pool mechanism.""")
group.add('--normalization', '-normalization', default='sents',
choices=["sents", "tokens"],
help='Normalization method of the gradient.')
group.add('--accum_count', '-accum_count', type=int, nargs='+',
default=[1],
help="Accumulate gradient this many times. "
"Approximately equivalent to updating "
"batch_size * accum_count batches at once. "
"Recommended for Transformer.")
group.add('--accum_steps', '-accum_steps', type=int, nargs='+',
default=[0], help="Steps at which accum_count values change")
group.add('--valid_steps', '-valid_steps', type=int, default=10000,
help='Perfom validation every X steps')
group.add('--valid_batch_size', '-valid_batch_size', type=int, default=32,
help='Maximum batch size for validation')
group.add('--max_generator_batches', '-max_generator_batches',
type=int, default=32,
help="Maximum batches of words in a sequence to run "
"the generator on in parallel. Higher is faster, but "
"uses more memory. Set to 0 to disable.")
group.add('--train_steps', '-train_steps', type=int, default=100000,
help='Number of training steps')
group.add('--single_pass', '-single_pass', action='store_true',
help="Make a single pass over the training dataset.")
group.add('--epochs', '-epochs', type=int, default=0,
help='Deprecated epochs see train_steps')
group.add('--early_stopping', '-early_stopping', type=int, default=0,
help='Number of validation steps without improving.')
group.add('--early_stopping_criteria', '-early_stopping_criteria',
nargs="*", default=None,
help='Criteria to use for early stopping.')
group.add('--optim', '-optim', default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'adam',
'sparseadam', 'adafactor', 'fusedadam'],
help="Optimization method.")
group.add('--adagrad_accumulator_init', '-adagrad_accumulator_init',
type=float, default=0,
help="Initializes the accumulator values in adagrad. "
"Mirrors the initial_accumulator_value option "
"in the tensorflow adagrad (use 0.1 for their default).")
group.add('--max_grad_norm', '-max_grad_norm', type=float, default=5,
help="If the norm of the gradient vector exceeds this, "
"renormalize it to have the norm equal to "
"max_grad_norm")
group.add('--dropout', '-dropout', type=float, default=[0.3], nargs='+',
help="Dropout probability; applied in LSTM stacks.")
group.add('--attention_dropout', '-attention_dropout', type=float,
default=[0.1], nargs='+',
help="Attention Dropout probability.")
group.add('--dropout_steps', '-dropout_steps', type=int, nargs='+',
default=[0], help="Steps at which dropout changes.")
group.add('--truncated_decoder', '-truncated_decoder', type=int, default=0,
help="""Truncated bptt.""")
group.add('--adam_beta1', '-adam_beta1', type=float, default=0.9,
help="The beta1 parameter used by Adam. "
"Almost without exception a value of 0.9 is used in "
"the literature, seemingly giving good results, "
"so we would discourage changing this value from "
"the default without due consideration.")
group.add('--adam_beta2', '-adam_beta2', type=float, default=0.999,
help='The beta2 parameter used by Adam. '
'Typically a value of 0.999 is recommended, as this is '
'the value suggested by the original paper describing '
'Adam, and is also the value adopted in other frameworks '
'such as Tensorflow and Keras, i.e. see: '
'https://www.tensorflow.org/api_docs/python/tf/train/Adam'
'Optimizer or https://keras.io/optimizers/ . '
'Whereas recently the paper "Attention is All You Need" '
'suggested a value of 0.98 for beta2, this parameter may '
'not work well for normal models / default '
'baselines.')
group.add('--label_smoothing', '-label_smoothing', type=float, default=0.0,
help="Label smoothing value epsilon. "
"Probabilities of all non-true labels "
"will be smoothed by epsilon / (vocab_size - 1). "
"Set to zero to turn off label smoothing. "
"For more detailed information, see: "
"https://arxiv.org/abs/1512.00567")
group.add('--average_decay', '-average_decay', type=float, default=0,
help="Moving average decay. "
"Set to other than 0 (e.g. 1e-4) to activate. "
"Similar to Marian NMT implementation: "
"http://www.aclweb.org/anthology/P18-4020 "
"For more detail on Exponential Moving Average: "
"https://en.wikipedia.org/wiki/Moving_average")
group.add('--average_every', '-average_every', type=int, default=1,
help="Step for moving average. "
"Default is every update, "
"if -average_decay is set.")
# learning rate
group = parser.add_argument_group('Optimization- Rate')
group.add('--learning_rate', '-learning_rate', type=float, default=1.0,
help="Starting learning rate. "
"Recommended settings: sgd = 1, adagrad = 0.1, "
"adadelta = 1, adam = 0.001")
group.add('--learning_rate_decay', '-learning_rate_decay',
type=float, default=0.5,
help="If update_learning_rate, decay learning rate by "
"this much if steps have gone past "
"start_decay_steps")
group.add('--start_decay_steps', '-start_decay_steps',
type=int, default=50000,
help="Start decaying every decay_steps after "
"start_decay_steps")
group.add('--decay_steps', '-decay_steps', type=int, default=10000,
help="Decay every decay_steps")
group.add('--decay_method', '-decay_method', type=str, default="none",
choices=['noam', 'noamwd', 'rsqrt', 'none'],
help="Use a custom decay rate.")
group.add('--warmup_steps', '-warmup_steps', type=int, default=4000,
help="Number of warmup steps for custom decay.")
group = parser.add_argument_group('Logging')
group.add('--report_every', '-report_every', type=int, default=50,
help="Print stats at this interval.")
group.add('--log_file', '-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add('--log_file_level', '-log_file_level', type=str,
action=StoreLoggingLevelAction,
choices=StoreLoggingLevelAction.CHOICES,
default="0")
group.add('--exp_host', '-exp_host', type=str, default="",
help="Send logs to this crayon server.")
group.add('--exp', '-exp', type=str, default="",
help="Name of the experiment for logging.")
# Use Tensorboard for visualization during training
group.add('--tensorboard', '-tensorboard', action="store_true",
help="Use tensorboard for visualization during training. "
"Must have the library tensorboard >= 1.14.")
group.add("--tensorboard_log_dir", "-tensorboard_log_dir",
type=str, default="runs/onmt",
help="Log directory for Tensorboard. "
"This is also the name of the run.")
group = parser.add_argument_group('Speech')
# Options most relevant to speech
group.add('--sample_rate', '-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add('--window_size', '-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
# Option most relevant to image input
group.add('--image_channel_size', '-image_channel_size',
type=int, default=3, choices=[3, 1],
help="Using grayscale image can training "
"model faster and smaller")
def translate_opts(parser):
""" Translation / inference options """
group = parser.add_argument_group('Model')
group.add('--model', '-model', dest='models', metavar='MODEL',
nargs='+', type=str, default=[], required=True,
help="Path to model .pt file(s). "
"Multiple models can be specified, "
"for ensemble decoding.")
group.add('--fp32', '-fp32', action='store_true',
help="Force the model to be in FP32 "
"because FP16 is very slow on GTX1080(ti).")
group.add('--avg_raw_probs', '-avg_raw_probs', action='store_true',
help="If this is set, during ensembling scores from "
"different models will be combined by averaging their "
"raw probabilities and then taking the log. Otherwise, "
"the log probabilities will be averaged directly. "
"Necessary for models whose output layers can assign "
"zero probability.")
group = parser.add_argument_group('Data')
group.add('--data_type', '-data_type', default="text",
help="Type of the source input. Options: [text|img].")
group.add('--src', '-src', required=True,
help="Source sequence to decode (one line per "
"sequence)")
group.add('--src_dir', '-src_dir', default="",
help='Source directory for image or audio files')
group.add('--tgt', '-tgt',
help='True target sequence (optional)')
group.add('--shard_size', '-shard_size', type=int, default=10000,
help="Divide src and tgt (if applicable) into "
"smaller multiple src and tgt files, then "
"build shards, each shard will have "
"opt.shard_size samples except last shard. "
"shard_size=0 means no segmentation "
"shard_size>0 means segment dataset into multiple shards, "
"each shard has shard_size samples")
group.add('--output', '-output', default='pred.txt',
help="Path to output the predictions (each line will "
"be the decoded sequence")
group.add('--report_align', '-report_align', action='store_true',
help="Report alignment for each translation.")
group.add('--report_time', '-report_time', action='store_true',
help="Report some translation time metrics")
# Options most relevant to summarization.
group.add('--dynamic_dict', '-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add('--share_vocab', '-share_vocab', action='store_true',
help="Share source and target vocabulary")
group = parser.add_argument_group('Random Sampling')
group.add('--random_sampling_topk', '-random_sampling_topk',
default=1, type=int,
help="Set this to -1 to do random sampling from full "
"distribution. Set this to value k>1 to do random "
"sampling restricted to the k most likely next tokens. "
"Set this to 1 to use argmax or for doing beam "
"search.")
group.add('--random_sampling_temp', '-random_sampling_temp',
default=1., type=float,
help="If doing random sampling, divide the logits by "
"this before computing softmax during decoding.")
group.add('--seed', '-seed', type=int, default=829,
help="Random seed")
group = parser.add_argument_group('Beam')
group.add('--beam_size', '-beam_size', type=int, default=5,
help='Beam size')
group.add('--min_length', '-min_length', type=int, default=0,
help='Minimum prediction length')
group.add('--max_length', '-max_length', type=int, default=100,
help='Maximum prediction length.')
group.add('--max_sent_length', '-max_sent_length', action=DeprecateAction,
help="Deprecated, use `-max_length` instead")
# Alpha and Beta values for Google Length + Coverage penalty
# Described here: https://arxiv.org/pdf/1609.08144.pdf, Section 7
group.add('--stepwise_penalty', '-stepwise_penalty', action='store_true',
help="Apply penalty at every decoding step. "
"Helpful for summary penalty.")
group.add('--length_penalty', '-length_penalty', default='none',
choices=['none', 'wu', 'avg'],
help="Length Penalty to use.")
group.add('--ratio', '-ratio', type=float, default=-0.,
help="Ratio based beam stop condition")
group.add('--coverage_penalty', '-coverage_penalty', default='none',
choices=['none', 'wu', 'summary'],
help="Coverage Penalty to use.")
group.add('--alpha', '-alpha', type=float, default=0.,
help="Google NMT length penalty parameter "
"(higher = longer generation)")
group.add('--beta', '-beta', type=float, default=-0.,
help="Coverage penalty parameter")
group.add('--block_ngram_repeat', '-block_ngram_repeat',
type=int, default=0,
help='Block repetition of ngrams during decoding.')
group.add('--ignore_when_blocking', '-ignore_when_blocking',
nargs='+', type=str, default=[],
help="Ignore these strings when blocking repeats. "
"You want to block sentence delimiters.")
group.add('--replace_unk', '-replace_unk', action="store_true",
help="Replace the generated UNK tokens with the "
"source token that had highest attention weight. If "
"phrase_table is provided, it will look up the "
"identified source token and give the corresponding "
"target token. If it is not provided (or the identified "
"source token does not exist in the table), then it "
"will copy the source token.")
group.add('--phrase_table', '-phrase_table', type=str, default="",
help="If phrase_table is provided (with replace_unk), it will "
"look up the identified source token and give the "
"corresponding target token. If it is not provided "
"(or the identified source token does not exist in "
"the table), then it will copy the source token.")
group = parser.add_argument_group('Logging')
group.add('--verbose', '-verbose', action="store_true",
help='Print scores and predictions for each sentence')
group.add('--log_file', '-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add('--log_file_level', '-log_file_level', type=str,
action=StoreLoggingLevelAction,
choices=StoreLoggingLevelAction.CHOICES,
default="0")
group.add('--attn_debug', '-attn_debug', action="store_true",
help='Print best attn for each word')
group.add('--dump_attn', '-dump_attn', action="store_true",
help="Dump attention score to this folder")
group.add('--align_debug', '-align_debug', action="store_true",
help='Print best align for each word')
group.add('--dump_beam', '-dump_beam', type=str, default="",
help='File to dump beam information to.')
group.add('--n_best', '-n_best', type=int, default=1,
help="If verbose is set, will output the n_best "
"decoded sentences")
group = parser.add_argument_group('Efficiency')
group.add('--batch_size', '-batch_size', type=int, default=30,
help='Batch size')
group.add('--batch_type', '-batch_type', default='sents',
choices=["sents", "tokens"],
help="Batch grouping for batch_size. Standard "
"is sents. Tokens will do dynamic batching")
group.add('--gpu', '-gpu', type=int, default=-1,
help="Device to run on")
# Options most relevant to speech.
group = parser.add_argument_group('Speech')
group.add('--sample_rate', '-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add('--window_size', '-window_size', type=float, default=.02,
help='Window size for spectrogram in seconds')
group.add('--window_stride', '-window_stride', type=float, default=.01,
help='Window stride for spectrogram in seconds')
group.add('--window', '-window', default='hamming',
help='Window type for spectrogram generation')
# Option most relevant to image input
group.add('--image_channel_size', '-image_channel_size',
type=int, default=3, choices=[3, 1],
help="Using grayscale image can training "
"model faster and smaller")
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class StoreLoggingLevelAction(configargparse.Action):
""" Convert string to logging level """
import logging
LEVELS = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
"NOTSET": logging.NOTSET
}
CHOICES = list(LEVELS.keys()) + [str(_) for _ in LEVELS.values()]
def __init__(self, option_strings, dest, help=None, **kwargs):
super(StoreLoggingLevelAction, self).__init__(
option_strings, dest, help=help, **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
# Get the key 'value' in the dict, or just use 'value'
level = StoreLoggingLevelAction.LEVELS.get(value, value)
setattr(namespace, self.dest, level)
class DeprecateAction(configargparse.Action):
""" Deprecate action """
def __init__(self, option_strings, dest, help=None, **kwargs):
super(DeprecateAction, self).__init__(option_strings, dest, nargs=0,
help=help, **kwargs)
def __call__(self, parser, namespace, values, flag_name):
help = self.help if self.help is not None else ""
msg = "Flag '%s' is deprecated. %s" % (flag_name, help)
raise configargparse.ArgumentTypeError(msg)
| 42,843 | 51.893827 | 118 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/train_single.py
|
#!/usr/bin/env python
"""Training on a single process."""
import os
import torch
from onmt.inputters.inputter import build_dataset_iter, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple
from onmt.model_builder import build_model
from onmt.utils.optimizers import Optimizer
from onmt.utils.misc import set_random_seed
from onmt.trainer import build_trainer
from onmt.models import build_model_saver
from onmt.utils.logging import init_logger, logger
from onmt.utils.parse import ArgumentParser
def _check_save_model_path(opt):
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def _tally_parameters(model):
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
else:
dec += param.nelement()
return enc + dec, enc, dec
def configure_process(opt, device_id):
if device_id >= 0:
torch.cuda.set_device(device_id)
set_random_seed(opt.seed, device_id >= 0)
def main(opt, device_id, batch_queue=None, semaphore=None):
# NOTE: It's important that ``opt`` has been validated and updated
# at this point.
configure_process(opt, device_id)
init_logger(opt.log_file)
assert len(opt.accum_count) == len(opt.accum_steps), \
'Number of accum_count values must match number of accum_steps'
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
model_opt = ArgumentParser.ckpt_model_opts(checkpoint["opt"])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
vocab = checkpoint['vocab']
else:
checkpoint = None
model_opt = opt
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
# Report src and tgt vocab sizes, including for features
for side in ['src', 'tgt']:
f = fields[side]
try:
f_iter = iter(f)
except TypeError:
f_iter = [(side, f)]
for sn, sf in f_iter:
if sf.use_vocab:
logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))
# Build model.
model = build_model(model_opt, opt, fields, checkpoint)
n_params, enc, dec = _tally_parameters(model)
logger.info('encoder: %d' % enc)
logger.info('decoder: %d' % dec)
logger.info('* number of parameters: %d' % n_params)
_check_save_model_path(opt)
# Build optimizer.
optim = Optimizer.from_opt(model, opt, checkpoint=checkpoint)
# Build model saver
model_saver = build_model_saver(model_opt, opt, model, fields, optim)
trainer = build_trainer(
opt, device_id, model, fields, optim, model_saver=model_saver)
if batch_queue is None:
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None:
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
else:
assert semaphore is not None, \
"Using batch_queue requires semaphore as well"
def _train_iter():
while True:
batch = batch_queue.get()
semaphore.release()
yield batch
train_iter = _train_iter()
valid_iter = build_dataset_iter(
"valid", fields, opt, is_train=False)
if len(opt.gpu_ranks):
logger.info('Starting training on GPU: %s' % opt.gpu_ranks)
else:
logger.info('Starting training on CPU, could be very slow')
train_steps = opt.train_steps
if opt.single_pass and train_steps > 0:
logger.warning("Option single_pass is enabled, ignoring train_steps.")
train_steps = 0
trainer.train(
train_iter,
train_steps,
save_checkpoint_steps=opt.save_checkpoint_steps,
valid_iter=valid_iter,
valid_steps=opt.valid_steps)
if trainer.report_manager.tensorboard_writer is not None:
trainer.report_manager.tensorboard_writer.close()
| 4,977 | 32.863946 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/model_builder.py
|
"""
This file is for models creation, which consults options
and creates each encoder and decoder accordingly.
"""
import re
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders import str2enc
from onmt.decoders import str2dec
from onmt.modules import Embeddings, CopyGenerator, TableEmbeddings
from onmt.modules.util_class import Cast
from onmt.utils.misc import use_gpu
from onmt.utils.logging import logger
from onmt.utils.parse import ArgumentParser
def build_embeddings(opt, text_field, for_encoder=True):
"""
Args:
opt: the option in current environment.
text_field(TextMultiField): word and feats field.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
if opt.model_type == 'table' and for_encoder:
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
# value field
field = text_field[0][1]
word_padding_idx = field.vocab.stoi[field.pad_token]
word_vocab_size = len(field.vocab)
# pos field
field = text_field[1][1]
feat_padding_idx = field.vocab.stoi[field.pad_token]
feat_vocab_size = len(field.vocab)
ent_idx = text_field.base_field.vocab.stoi['<ent>']
return TableEmbeddings(
word_vec_size=emb_dim,
word_vocab_size=word_vocab_size,
word_padding_idx=word_padding_idx,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
feat_vocab_size=feat_vocab_size,
feat_padding_idx=feat_padding_idx,
merge=opt.feat_merge,
merge_activation=opt.feat_merge_activation,
dropout=opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
ent_idx=ent_idx
)
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
pad_indices = [f.vocab.stoi[f.pad_token] for _, f in text_field]
word_padding_idx, feat_pad_indices = pad_indices[0], pad_indices[1:]
num_embs = [len(f.vocab) for _, f in text_field]
num_word_embeddings, num_feat_embeddings = num_embs[0], num_embs[1:]
fix_word_vecs = opt.fix_word_vecs_enc if for_encoder \
else opt.fix_word_vecs_dec
return Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam",
fix_word_vecs=fix_word_vecs
)
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.model_type in ["text", "table"]:
enc_type = opt.encoder_type
else:
enc_type = opt.model_type
return str2enc[enc_type].from_opt(opt, embeddings)
def build_decoder(opt, embeddings, dims=None):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
dec_type = "ifrnn" if opt.decoder_type == "rnn" and opt.input_feed \
else opt.decoder_type
if dims is not None:
return str2dec[dec_type].from_opt(opt, embeddings, dims)
return str2dec[dec_type].from_opt(opt, embeddings)
def load_test_model(opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
vocab = checkpoint['vocab']
if inputters.old_style_vocab(vocab):
fields = inputters.load_old_vocab(
vocab, opt.data_type, dynamic_dict=model_opt.copy_attn
)
else:
fields = vocab
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint,
opt.gpu)
if opt.fp32:
model.float()
model.eval()
model.generator.eval()
return fields, model, model_opt
def build_base_model(model_opt, fields, gpu, checkpoint=None, gpu_id=None):
"""Build a model from opts.
Args:
model_opt: the option loaded from checkpoint. It's important that
the opts have been updated and validated. See
:class:`onmt.utils.parse.ArgumentParser`.
fields (dict[str, torchtext.data.Field]):
`Field` objects for the model.
gpu (bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
gpu_id (int or NoneType): Which GPU to use.
Returns:
the NMTModel.
"""
# Build embeddings.
if model_opt.model_type in ["text", 'table']:
src_field = fields["src"]
src_emb = build_embeddings(model_opt, src_field)
else:
src_emb = None
# Build encoder.
encoder = build_encoder(model_opt, src_emb)
if isinstance(encoder.embeddings, TableEmbeddings):
if getattr(model_opt, 'use_pos', True):
dims = (
encoder.embeddings.embedding_size,
encoder.embeddings.pos_embeddings.embedding_dim
)
else:
dims = encoder.embeddings.embedding_size
else:
dims = None
# Build decoder.
tgt_field = fields["tgt"]
tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
assert src_field.base_field.vocab == tgt_field.base_field.vocab, \
"preprocess with -share_vocab if you use share_embeddings"
tgt_emb.word_lut.weight = src_emb.word_lut.weight
decoder = build_decoder(model_opt, tgt_emb, dims)
# Build NMTModel(= encoder + decoder).
if gpu and gpu_id is not None:
device = torch.device("cuda", gpu_id)
elif gpu and not gpu_id:
device = torch.device("cuda")
elif not gpu:
device = torch.device("cpu")
model = onmt.models.NMTModel(encoder, decoder)
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size,
len(fields["tgt"].base_field.vocab)),
Cast(torch.float32),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
tgt_base_field = fields["tgt"].base_field
vocab_size = len(tgt_base_field.vocab)
pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]
generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
# This preserves backward-compat for models using customed layernorm
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = {fix_key(k): v
for k, v in checkpoint['model'].items()}
# end of patch for backward compatibility
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec)
model.generator = generator
model.to(device)
if model_opt.model_dtype == 'fp16':
model.half()
return model
def build_model(model_opt, opt, fields, checkpoint):
logger.info('Building model...')
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
logger.info(model)
return model
| 9,581 | 34.227941 | 81 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/__init__.py
|
""" Main entry point of the ONMT library """
from __future__ import division, print_function
import onmt.inputters
import onmt.encoders
import onmt.decoders
import onmt.models
import onmt.utils
import onmt.modules
from onmt.trainer import Trainer
import sys
import onmt.utils.optimizers
onmt.utils.optimizers.Optim = onmt.utils.optimizers.Optimizer
sys.modules["onmt.Optim"] = onmt.utils.optimizers
ENT_SIZE = 24 # Used for hierarchical training on RotoWire
# For Flake
__all__ = [onmt.inputters, onmt.encoders, onmt.decoders, onmt.models,
onmt.utils, onmt.modules, "Trainer"]
__version__ = "1.0.0"
| 615 | 25.782609 | 69 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/trainer.py
|
"""
This is the loadable seq2seq trainer library that is
in charge of training details, loss compute, and statistics.
See train.py for a use case of this library.
Note: To make this a general library, we implement *only*
mechanism things here(i.e. what to do), and leave the strategy
things to users(i.e. how to do it). Also see train.py(one of the
users of this library) for the strategy things we do.
"""
import torch
import traceback
import onmt.utils
from onmt.utils.logging import logger
def build_trainer(opt, device_id, model, fields, optim, model_saver=None):
"""
Simplify `Trainer` creation based on user `opt`s*
Args:
opt (:obj:`Namespace`): user options (usually from argument parsing)
model (:obj:`onmt.models.NMTModel`): the model to train
fields (dict): dict of fields
optim (:obj:`onmt.utils.Optimizer`): optimizer used during training
data_type (str): string describing the type of data
e.g. "text", "img", "audio"
model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object
used to save the model
"""
tgt_field = dict(fields)["tgt"].base_field
train_loss = onmt.utils.loss.build_loss_compute(model, tgt_field, opt)
valid_loss = onmt.utils.loss.build_loss_compute(
model, tgt_field, opt, train=False)
trunc_size = opt.truncated_decoder # Badly named...
shard_size = opt.max_generator_batches if opt.model_dtype == 'fp32' else 0
norm_method = opt.normalization
accum_count = opt.accum_count
accum_steps = opt.accum_steps
n_gpu = opt.world_size
average_decay = opt.average_decay
average_every = opt.average_every
dropout = opt.dropout
dropout_steps = opt.dropout_steps
if device_id >= 0:
gpu_rank = opt.gpu_ranks[device_id]
else:
gpu_rank = 0
n_gpu = 0
gpu_verbose_level = opt.gpu_verbose_level
earlystopper = onmt.utils.EarlyStopping(
opt.early_stopping, scorers=onmt.utils.scorers_from_opts(opt)) \
if opt.early_stopping > 0 else None
report_manager = onmt.utils.build_report_manager(opt, gpu_rank)
trainer = onmt.Trainer(model, train_loss, valid_loss, optim, trunc_size,
shard_size, norm_method,
accum_count, accum_steps,
n_gpu, gpu_rank,
gpu_verbose_level, report_manager,
with_align=True if opt.lambda_align > 0 else False,
model_saver=model_saver if gpu_rank == 0 else None,
average_decay=average_decay,
average_every=average_every,
model_dtype=opt.model_dtype,
earlystopper=earlystopper,
dropout=dropout,
dropout_steps=dropout_steps)
return trainer
class Trainer(object):
"""
Class that controls the training process.
Args:
model(:py:class:`onmt.models.model.NMTModel`): translation model
to train
train_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
valid_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
optim(:obj:`onmt.utils.optimizers.Optimizer`):
the optimizer responsible for update
trunc_size(int): length of truncated back propagation through time
shard_size(int): compute loss in shards of this size for efficiency
data_type(string): type of the source input: [text|img|audio]
norm_method(string): normalization methods: [sents|tokens]
accum_count(list): accumulate gradients this many times.
accum_steps(list): steps for accum gradients changes.
report_manager(:obj:`onmt.utils.ReportMgrBase`):
the object that creates reports, or None
model_saver(:obj:`onmt.models.ModelSaverBase`): the saver is
used to save a checkpoint.
Thus nothing will be saved if this parameter is None
"""
def __init__(self, model, train_loss, valid_loss, optim,
trunc_size=0, shard_size=32,
norm_method="sents", accum_count=[1],
accum_steps=[0],
n_gpu=1, gpu_rank=1, gpu_verbose_level=0,
report_manager=None, with_align=False, model_saver=None,
average_decay=0, average_every=1, model_dtype='fp32',
earlystopper=None, dropout=[0.3], dropout_steps=[0]):
# Basic attributes.
self.model = model
self.train_loss = train_loss
self.valid_loss = valid_loss
self.optim = optim
self.trunc_size = trunc_size
self.shard_size = shard_size
self.norm_method = norm_method
self.accum_count_l = accum_count
self.accum_count = accum_count[0]
self.accum_steps = accum_steps
self.n_gpu = n_gpu
self.gpu_rank = gpu_rank
self.gpu_verbose_level = gpu_verbose_level
self.report_manager = report_manager
self.with_align = with_align
self.model_saver = model_saver
self.average_decay = average_decay
self.moving_average = None
self.average_every = average_every
self.model_dtype = model_dtype
self.earlystopper = earlystopper
self.dropout = dropout
self.dropout_steps = dropout_steps
for i in range(len(self.accum_count_l)):
assert self.accum_count_l[i] > 0
if self.accum_count_l[i] > 1:
assert self.trunc_size == 0, \
"""To enable accumulated gradients,
you must disable target sequence truncating."""
# Set model in training mode.
self.model.train()
def _accum_count(self, step):
for i in range(len(self.accum_steps)):
if step > self.accum_steps[i]:
_accum = self.accum_count_l[i]
return _accum
def _maybe_update_dropout(self, step):
for i in range(len(self.dropout_steps)):
if step > 1 and step == self.dropout_steps[i] + 1:
self.model.update_dropout(self.dropout[i])
logger.info("Updated dropout to %f from step %d"
% (self.dropout[i], step))
def _accum_batches(self, iterator):
batches = []
normalization = 0
self.accum_count = self._accum_count(self.optim.training_step)
for batch in iterator:
batches.append(batch)
if self.norm_method == "tokens":
num_tokens = batch.tgt[1:, :, 0].ne(
self.train_loss.padding_idx).sum()
normalization += num_tokens.item()
else:
normalization += batch.batch_size
if len(batches) == self.accum_count:
yield batches, normalization
self.accum_count = self._accum_count(self.optim.training_step)
batches = []
normalization = 0
if batches:
yield batches, normalization
def _update_average(self, step):
if self.moving_average is None:
copy_params = [params.detach().float()
for params in self.model.parameters()]
self.moving_average = copy_params
else:
average_decay = max(self.average_decay,
1 - (step + 1)/(step + 10))
for (i, avg), cpt in zip(enumerate(self.moving_average),
self.model.parameters()):
self.moving_average[i] = \
(1 - average_decay) * avg + \
cpt.detach().float() * average_decay
def train(self,
train_iter,
train_steps,
save_checkpoint_steps=5000,
valid_iter=None,
valid_steps=10000):
"""
The main training loop by iterating over `train_iter` and possibly
running validation on `valid_iter`.
Args:
train_iter: A generator that returns the next training batch.
train_steps: Run training for this many iterations.
save_checkpoint_steps: Save a checkpoint every this many
iterations.
valid_iter: A generator that returns the next validation batch.
valid_steps: Run evaluation every this many iterations.
Returns:
The gathered statistics.
"""
if valid_iter is None:
logger.info('Start training loop without validation...')
else:
logger.info('Start training loop and validate every %d steps...',
valid_steps)
total_stats = onmt.utils.Statistics()
report_stats = onmt.utils.Statistics()
self._start_report_manager(start_time=total_stats.start_time)
for i, (batches, normalization) in enumerate(
self._accum_batches(train_iter)):
step = self.optim.training_step
# UPDATE DROPOUT
self._maybe_update_dropout(step)
if self.gpu_verbose_level > 1:
logger.info("GpuRank %d: index: %d", self.gpu_rank, i)
if self.gpu_verbose_level > 0:
logger.info("GpuRank %d: reduce_counter: %d \
n_minibatch %d"
% (self.gpu_rank, i + 1, len(batches)))
if self.n_gpu > 1:
normalization = sum(onmt.utils.distributed
.all_gather_list
(normalization))
self._gradient_accumulation(
batches, normalization, total_stats,
report_stats)
if self.average_decay > 0 and i % self.average_every == 0:
self._update_average(step)
report_stats = self._maybe_report_training(
step, train_steps,
self.optim.learning_rate(),
report_stats)
if valid_iter is not None and step % valid_steps == 0:
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: validate step %d'
% (self.gpu_rank, step))
valid_stats = self.validate(
valid_iter, moving_average=self.moving_average)
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: gather valid stat \
step %d' % (self.gpu_rank, step))
valid_stats = self._maybe_gather_stats(valid_stats)
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: report stat step %d'
% (self.gpu_rank, step))
self._report_step(self.optim.learning_rate(),
step, valid_stats=valid_stats)
# Run patience mechanism
if self.earlystopper is not None:
self.earlystopper(valid_stats, step)
# If the patience has reached the limit, stop training
if self.earlystopper.has_stopped():
break
if (self.model_saver is not None
and (save_checkpoint_steps != 0
and step % save_checkpoint_steps == 0)):
self.model_saver.save(step, moving_average=self.moving_average)
if train_steps > 0 and step >= train_steps:
break
if self.model_saver is not None:
self.model_saver.save(step, moving_average=self.moving_average)
return total_stats
def validate(self, valid_iter, moving_average=None):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
valid_model = self.model
if moving_average:
# swap model params w/ moving average
# (and keep the original parameters)
model_params_data = []
for avg, param in zip(self.moving_average,
valid_model.parameters()):
model_params_data.append(param.data)
param.data = avg.data.half() if self.optim._fp16 == "legacy" \
else avg.data
# Set model in validating mode.
valid_model.eval()
with torch.no_grad():
stats = onmt.utils.Statistics()
for batch in valid_iter:
src, src_lengths = batch.src if isinstance(batch.src, tuple) \
else (batch.src, None)
tgt = batch.tgt
# F-prop through the model.
outputs, attns = valid_model(src, tgt, src_lengths,
with_align=self.with_align)
# Compute loss.
_, batch_stats = self.valid_loss(batch, outputs, attns)
# Update statistics.
stats.update(batch_stats)
if moving_average:
for param_data, param in zip(model_params_data,
self.model.parameters()):
param.data = param_data
# Set model back to training mode.
valid_model.train()
return stats
def _gradient_accumulation(self, true_batches, normalization, total_stats,
report_stats):
if self.accum_count > 1:
self.optim.zero_grad()
for k, batch in enumerate(true_batches):
target_size = batch.tgt.size(0)
# Truncated BPTT: reminder not compatible with accum > 1
if self.trunc_size:
trunc_size = self.trunc_size
else:
trunc_size = target_size
src, src_lengths = batch.src if isinstance(batch.src, tuple) \
else (batch.src, None)
if src_lengths is not None:
report_stats.n_src_words += src_lengths.sum().item()
tgt_outer = batch.tgt
bptt = False
for j in range(0, target_size-1, trunc_size):
# 1. Create truncated target.
tgt = tgt_outer[j: j + trunc_size]
# 2. F-prop all but generator.
if self.accum_count == 1:
self.optim.zero_grad()
outputs, attns = self.model(src, tgt, src_lengths, bptt=bptt,
with_align=self.with_align)
bptt = True
# 3. Compute loss.
try:
loss, batch_stats = self.train_loss(
batch,
outputs,
attns,
normalization=normalization,
shard_size=self.shard_size,
trunc_start=j,
trunc_size=trunc_size)
if loss is not None:
self.optim.backward(loss)
total_stats.update(batch_stats)
report_stats.update(batch_stats)
except Exception:
traceback.print_exc()
logger.info("At step %d, we removed a batch - accum %d",
self.optim.training_step, k)
# 4. Update the parameters and statistics.
if self.accum_count == 1:
# Multi GPU gradient gather
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
onmt.utils.distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
# If truncated, don't backprop fully.
# TO CHECK
# if dec_state is not None:
# dec_state.detach()
if self.model.decoder.state is not None:
self.model.decoder.detach_state()
# in case of multi step gradient accumulation,
# update only after accum batches
if self.accum_count > 1:
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
onmt.utils.distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
def _start_report_manager(self, start_time=None):
"""
Simple function to start report manager (if any)
"""
if self.report_manager is not None:
if start_time is None:
self.report_manager.start()
else:
self.report_manager.start_time = start_time
def _maybe_gather_stats(self, stat):
"""
Gather statistics in multi-processes cases
Args:
stat(:obj:onmt.utils.Statistics): a Statistics object to gather
or None (it returns None in this case)
Returns:
stat: the updated (or unchanged) stat object
"""
if stat is not None and self.n_gpu > 1:
return onmt.utils.Statistics.all_gather_stats(stat)
return stat
def _maybe_report_training(self, step, num_steps, learning_rate,
report_stats):
"""
Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_training(
step, num_steps, learning_rate, report_stats,
multigpu=self.n_gpu > 1)
def _report_step(self, learning_rate, step, train_stats=None,
valid_stats=None):
"""
Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_step(
learning_rate, step, train_stats=train_stats,
valid_stats=valid_stats)
| 18,735 | 39.292473 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/inputters/text_dataset.py
|
# -*- coding: utf-8 -*-
from functools import partial
import six
import torch
from torchtext.data import Field, RawField
from onmt.inputters.datareader_base import DataReaderBase
class TextDataReader(DataReaderBase):
def read(self, sequences, side, _dir=None):
"""Read text data from disk.
Args:
sequences (str or Iterable[str]):
path to text file or iterable of the actual text data.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
_dir (NoneType): Leave as ``None``. This parameter exists to
conform with the :func:`DataReaderBase.read()` signature.
Yields:
dictionaries whose keys are the names of fields and whose
values are more or less the result of tokenizing with those
fields.
"""
assert _dir is None or _dir == "", \
"Cannot use _dir with TextDataReader."
if isinstance(sequences, str):
sequences = DataReaderBase._read_file(sequences)
for i, seq in enumerate(sequences):
if isinstance(seq, six.binary_type):
seq = seq.decode("utf-8")
yield {side: seq, "indices": i}
def text_sort_key(ex):
"""Sort using the number of tokens in the sequence."""
if hasattr(ex, "tgt"):
return len(ex.src[0]), len(ex.tgt[0])
return len(ex.src[0])
# mix this with partial
def _feature_tokenize(
string, layer=0, tok_delim=None, feat_delim=None, truncate=None):
"""Split apart word features (like POS/NER tags) from the tokens.
Args:
string (str): A string with ``tok_delim`` joining tokens and
features joined by ``feat_delim``. For example,
``"hello|NOUN|'' Earth|NOUN|PLANET"``.
layer (int): Which feature to extract. (Not used if there are no
features, indicated by ``feat_delim is None``). In the
example above, layer 2 is ``'' PLANET``.
truncate (int or NoneType): Restrict sequences to this length of
tokens.
Returns:
List[str] of tokens.
"""
tokens = string.split(tok_delim)
if truncate is not None:
tokens = tokens[:truncate]
if feat_delim is not None:
tokens = [t.split(feat_delim)[layer] for t in tokens]
return tokens
class TextMultiField(RawField):
"""Container for subfields.
Text data might use POS/NER/etc labels in addition to tokens.
This class associates the "base" :class:`Field` with any subfields.
It also handles padding the data and stacking it.
Args:
base_name (str): Name for the base field.
base_field (Field): The token field.
feats_fields (Iterable[Tuple[str, Field]]): A list of name-field
pairs.
Attributes:
fields (Iterable[Tuple[str, Field]]): A list of name-field pairs.
The order is defined as the base field first, then
``feats_fields`` in alphabetical order.
"""
def __init__(self, base_name, base_field, feats_fields):
super(TextMultiField, self).__init__()
self.fields = [(base_name, base_field)]
for name, ff in sorted(feats_fields, key=lambda kv: kv[0]):
self.fields.append((name, ff))
@property
def base_field(self):
return self.fields[0][1]
def process(self, batch, device=None):
"""Convert outputs of preprocess into Tensors.
Args:
batch (List[List[List[str]]]): A list of length batch size.
Each element is a list of the preprocess results for each
field (which are lists of str "words" or feature tags.
device (torch.device or str): The device on which the tensor(s)
are built.
Returns:
torch.LongTensor or Tuple[LongTensor, LongTensor]:
A tensor of shape ``(seq_len, batch_size, len(self.fields))``
where the field features are ordered like ``self.fields``.
If the base field returns lengths, these are also returned
and have shape ``(batch_size,)``.
"""
# batch (list(list(list))): batch_size x len(self.fields) x seq_len
batch_by_feat = list(zip(*batch))
base_data = self.base_field.process(batch_by_feat[0], device=device)
if self.base_field.include_lengths:
# lengths: batch_size
base_data, lengths = base_data
feats = [ff.process(batch_by_feat[i], device=device)
for i, (_, ff) in enumerate(self.fields[1:], 1)]
levels = [base_data] + feats
# data: seq_len x batch_size x len(self.fields)
data = torch.stack(levels, 2)
if self.base_field.include_lengths:
return data, lengths
else:
return data
def preprocess(self, x):
"""Preprocess data.
Args:
x (str): A sentence string (words joined by whitespace).
Returns:
List[List[str]]: A list of length ``len(self.fields)`` containing
lists of tokens/feature tags for the sentence. The output
is ordered like ``self.fields``.
"""
return [f.preprocess(x) for _, f in self.fields]
def __getitem__(self, item):
return self.fields[item]
def text_fields(**kwargs):
"""Create text fields.
Args:
base_name (str): Name associated with the field.
n_feats (int): Number of word level feats (not counting the tokens)
include_lengths (bool): Optionally return the sequence lengths.
pad (str, optional): Defaults to ``"<blank>"``.
bos (str or NoneType, optional): Defaults to ``"<s>"``.
eos (str or NoneType, optional): Defaults to ``"</s>"``.
truncate (bool or NoneType, optional): Defaults to ``None``.
Returns:
TextMultiField
"""
n_feats = kwargs["n_feats"]
include_lengths = kwargs["include_lengths"]
base_name = kwargs["base_name"]
pad = kwargs.get("pad", "<blank>")
bos = kwargs.get("bos", "<s>")
eos = kwargs.get("eos", "</s>")
truncate = kwargs.get("truncate", None)
fields_ = []
feat_delim = u"│" if n_feats > 0 else None
for i in range(n_feats + 1):
name = base_name + "_feat_" + str(i - 1) if i > 0 else base_name
tokenize = partial(
_feature_tokenize,
layer=i,
truncate=truncate,
feat_delim=feat_delim)
use_len = i == 0 and include_lengths
feat = Field(
init_token=bos, eos_token=eos,
pad_token=pad, tokenize=tokenize,
include_lengths=use_len)
fields_.append((name, feat))
assert fields_[0][0] == base_name # sanity check
field = TextMultiField(fields_[0][0], fields_[0][1], fields_[1:])
return field
| 6,904 | 34.410256 | 77 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/inputters/dataset_base.py
|
# coding: utf-8
from itertools import chain, starmap
from collections import Counter
import torch
from torchtext.data import Dataset as TorchtextDataset
from torchtext.data import Example
from torchtext.vocab import Vocab
def _join_dicts(*args):
"""
Args:
dictionaries with disjoint keys.
Returns:
a single dictionary that has the union of these keys.
"""
return dict(chain(*[d.items() for d in args]))
def _dynamic_dict(example, src_field, tgt_field):
"""Create copy-vocab and numericalize with it.
In-place adds ``"src_map"`` to ``example``. That is the copy-vocab
numericalization of the tokenized ``example["src"]``. If ``example``
has a ``"tgt"`` key, adds ``"alignment"`` to example. That is the
copy-vocab numericalization of the tokenized ``example["tgt"]``. The
alignment has an initial and final UNK token to match the BOS and EOS
tokens.
Args:
example (dict): An example dictionary with a ``"src"`` key and
maybe a ``"tgt"`` key. (This argument changes in place!)
src_field (torchtext.data.Field): Field object.
tgt_field (torchtext.data.Field): Field object.
Returns:
torchtext.data.Vocab and ``example``, changed as described.
"""
src = src_field.tokenize(example["src"])
# make a small vocab containing just the tokens in the source sequence
unk = src_field.unk_token
pad = src_field.pad_token
src_ex_vocab = Vocab(Counter(src), specials=[unk, pad])
unk_idx = src_ex_vocab.stoi[unk]
# Map source tokens to indices in the dynamic dict.
src_map = torch.LongTensor([src_ex_vocab.stoi[w] for w in src])
example["src_map"] = src_map
example["src_ex_vocab"] = src_ex_vocab
if "tgt" in example:
tgt = tgt_field.tokenize(example["tgt"])
mask = torch.LongTensor(
[unk_idx] + [src_ex_vocab.stoi[w] for w in tgt] + [unk_idx])
example["alignment"] = mask
return src_ex_vocab, example
class Dataset(TorchtextDataset):
"""Contain data and process it.
A dataset is an object that accepts sequences of raw data (sentence pairs
in the case of machine translation) and fields which describe how this
raw data should be processed to produce tensors. When a dataset is
instantiated, it applies the fields' preprocessing pipeline (but not
the bit that numericalizes it or turns it into batch tensors) to the raw
data, producing a list of :class:`torchtext.data.Example` objects.
torchtext's iterators then know how to use these examples to make batches.
Args:
fields (dict[str, Field]): a dict with the structure
returned by :func:`onmt.inputters.get_fields()`. Usually
that means the dataset side, ``"src"`` or ``"tgt"``. Keys match
the keys of items yielded by the ``readers``, while values
are lists of (name, Field) pairs. An attribute with this
name will be created for each :class:`torchtext.data.Example`
object and its value will be the result of applying the Field
to the data that matches the key. The advantage of having
sequences of fields for each piece of raw input is that it allows
the dataset to store multiple "views" of each input, which allows
for easy implementation of token-level features, mixed word-
and character-level models, and so on. (See also
:class:`onmt.inputters.TextMultiField`.)
readers (Iterable[onmt.inputters.DataReaderBase]): Reader objects
for disk-to-dict. The yielded dicts are then processed
according to ``fields``.
data (Iterable[Tuple[str, Any]]): (name, ``data_arg``) pairs
where ``data_arg`` is passed to the ``read()`` method of the
reader in ``readers`` at that position. (See the reader object for
details on the ``Any`` type.)
dirs (Iterable[str or NoneType]): A list of directories where
data is contained. See the reader object for more details.
sort_key (Callable[[torchtext.data.Example], Any]): A function
for determining the value on which data is sorted (i.e. length).
filter_pred (Callable[[torchtext.data.Example], bool]): A function
that accepts Example objects and returns a boolean value
indicating whether to include that example in the dataset.
Attributes:
src_vocabs (List[torchtext.data.Vocab]): Used with dynamic dict/copy
attention. There is a very short vocab for each src example.
It contains just the source words, e.g. so that the generator can
predict to copy them.
"""
def __init__(self, fields, readers, data, dirs, sort_key,
filter_pred=None):
self.sort_key = sort_key
can_copy = 'src_map' in fields and 'alignment' in fields
read_iters = [r.read(dat[1], dat[0], dir_) for r, dat, dir_
in zip(readers, data, dirs)]
# self.src_vocabs is used in collapse_copy_scores and Translator.py
self.src_vocabs = []
examples = []
for ex_dict in starmap(_join_dicts, zip(*read_iters)):
if can_copy:
src_field = fields['src']
tgt_field = fields['tgt']
# this assumes src_field and tgt_field are both text
src_ex_vocab, ex_dict = _dynamic_dict(
ex_dict, src_field.base_field, tgt_field.base_field)
self.src_vocabs.append(src_ex_vocab)
ex_fields = {k: [(k, v)] for k, v in fields.items() if
k in ex_dict}
ex = Example.fromdict(ex_dict, ex_fields)
examples.append(ex)
# fields needs to have only keys that examples have as attrs
fields = []
for _, nf_list in ex_fields.items():
assert len(nf_list) == 1
fields.append(nf_list[0])
super(Dataset, self).__init__(examples, fields, filter_pred)
def __getattr__(self, attr):
# avoid infinite recursion when fields isn't defined
if 'fields' not in vars(self):
raise AttributeError
if attr in self.fields:
return (getattr(x, attr) for x in self.examples)
else:
raise AttributeError
def save(self, path, remove_fields=True):
if remove_fields:
self.fields = []
torch.save(self, path)
@staticmethod
def config(fields):
readers, data, dirs = [], [], []
for name, field in fields:
if field["data"] is not None:
readers.append(field["reader"])
data.append((name, field["data"]))
dirs.append(field["dir"])
return readers, data, dirs
| 6,865 | 40.612121 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/inputters/datareader_base.py
|
# coding: utf-8
# several data readers need optional dependencies. There's no
# appropriate builtin exception
class MissingDependencyException(Exception):
pass
class DataReaderBase(object):
"""Read data from file system and yield as dicts.
Raises:
onmt.inputters.datareader_base.MissingDependencyException: A number
of DataReaders need specific additional packages.
If any are missing, this will be raised.
"""
@classmethod
def from_opt(cls, opt):
"""Alternative constructor.
Args:
opt (argparse.Namespace): The parsed arguments.
"""
return cls()
@classmethod
def _read_file(cls, path):
"""Line-by-line read a file as bytes."""
with open(path, "rb") as f:
for line in f:
yield line
@staticmethod
def _raise_missing_dep(*missing_deps):
"""Raise missing dep exception with standard error message."""
raise MissingDependencyException(
"Could not create reader. Be sure to install "
"the following dependencies: " + ", ".join(missing_deps))
def read(self, data, side, src_dir):
"""Read data from file system and yield as dicts."""
raise NotImplementedError()
| 1,286 | 26.978261 | 75 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/inputters/inputter.py
|
# -*- coding: utf-8 -*-
import glob
import os
import codecs
import math
from collections import Counter, defaultdict
from itertools import chain, cycle
import torch
import torchtext.data
from torchtext.data import Field, RawField, LabelField
from torchtext.vocab import Vocab
from torchtext.data.utils import RandomShuffler
from onmt.inputters.text_dataset import text_fields, TextMultiField
from onmt.inputters.image_dataset import image_fields
from onmt.inputters.audio_dataset import audio_fields
from onmt.inputters.vec_dataset import vec_fields
from onmt.utils.logging import logger
# backwards compatibility
from onmt.inputters.text_dataset import _feature_tokenize # noqa: F401
from onmt.inputters.image_dataset import ( # noqa: F401
batch_img as make_img)
import gc
# monkey-patch to make torchtext Vocab's pickleable
def _getstate(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def _setstate(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
Vocab.__getstate__ = _getstate
Vocab.__setstate__ = _setstate
def make_src(data, vocab):
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
def make_tgt(data, vocab):
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
class AlignField(LabelField):
"""
Parse ['<src>-<tgt>', ...] into ['<src>','<tgt>', ...]
"""
def __init__(self, **kwargs):
kwargs['use_vocab'] = False
kwargs['preprocessing'] = parse_align_idx
super(AlignField, self).__init__(**kwargs)
def process(self, batch, device=None):
""" Turn a batch of align-idx to a sparse align idx Tensor"""
sparse_idx = []
for i, example in enumerate(batch):
for src, tgt in example:
# +1 for tgt side to keep coherent after "bos" padding,
# register ['N°_in_batch', 'tgt_id+1', 'src_id']
sparse_idx.append([i, tgt + 1, src])
align_idx = torch.tensor(sparse_idx, dtype=self.dtype, device=device)
return align_idx
def parse_align_idx(align_pharaoh):
"""
Parse Pharaoh alignment into [[<src>, <tgt>], ...]
"""
align_list = align_pharaoh.strip().split(' ')
flatten_align_idx = []
for align in align_list:
try:
src_idx, tgt_idx = align.split('-')
except ValueError:
logger.warning("{} in `{}`".format(align, align_pharaoh))
logger.warning("Bad alignement line exists. Please check file!")
raise
flatten_align_idx.append([int(src_idx), int(tgt_idx)])
return flatten_align_idx
def get_fields(
src_data_type,
n_src_feats,
n_tgt_feats,
pad='<blank>',
bos='<s>',
eos='</s>',
dynamic_dict=False,
with_align=False,
src_truncate=None,
tgt_truncate=None
):
"""
Args:
src_data_type: type of the source input. Options are [text|img|audio].
n_src_feats (int): the number of source features (not counting tokens)
to create a :class:`torchtext.data.Field` for. (If
``src_data_type=="text"``, these fields are stored together
as a ``TextMultiField``).
n_tgt_feats (int): See above.
pad (str): Special pad symbol. Used on src and tgt side.
bos (str): Special beginning of sequence symbol. Only relevant
for tgt.
eos (str): Special end of sequence symbol. Only relevant
for tgt.
dynamic_dict (bool): Whether or not to include source map and
alignment fields.
with_align (bool): Whether or not to include word align.
src_truncate: Cut off src sequences beyond this (passed to
``src_data_type``'s data reader - see there for more details).
tgt_truncate: Cut off tgt sequences beyond this (passed to
:class:`TextDataReader` - see there for more details).
Returns:
A dict mapping names to fields. These names need to match
the dataset example attributes.
"""
assert src_data_type in ['text', 'img', 'audio', 'vec'], \
"Data type not implemented"
assert not dynamic_dict or src_data_type == 'text', \
'it is not possible to use dynamic_dict with non-text input'
fields = {}
fields_getters = {"text": text_fields,
"img": image_fields,
"audio": audio_fields,
"vec": vec_fields}
src_field_kwargs = {"n_feats": n_src_feats,
"include_lengths": True,
"pad": pad, "bos": None, "eos": None,
"truncate": src_truncate,
"base_name": "src"}
fields["src"] = fields_getters[src_data_type](**src_field_kwargs)
tgt_field_kwargs = {"n_feats": n_tgt_feats,
"include_lengths": False,
"pad": pad, "bos": bos, "eos": eos,
"truncate": tgt_truncate,
"base_name": "tgt"}
fields["tgt"] = fields_getters["text"](**tgt_field_kwargs)
indices = Field(use_vocab=False, dtype=torch.long, sequential=False)
fields["indices"] = indices
if dynamic_dict:
src_map = Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
fields["src_map"] = src_map
src_ex_vocab = RawField()
fields["src_ex_vocab"] = src_ex_vocab
align = Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["alignment"] = align
if with_align:
word_align = AlignField()
fields["align"] = word_align
return fields
def load_old_vocab(vocab, data_type="text", dynamic_dict=False):
"""Update a legacy vocab/field format.
Args:
vocab: a list of (field name, torchtext.vocab.Vocab) pairs. This is the
format formerly saved in *.vocab.pt files. Or, text data
not using a :class:`TextMultiField`.
data_type (str): text, img, or audio
dynamic_dict (bool): Used for copy attention.
Returns:
a dictionary whose keys are the field names and whose values Fields.
"""
if _old_style_vocab(vocab):
# List[Tuple[str, Vocab]] -> List[Tuple[str, Field]]
# -> dict[str, Field]
vocab = dict(vocab)
n_src_features = sum('src_feat_' in k for k in vocab)
n_tgt_features = sum('tgt_feat_' in k for k in vocab)
fields = get_fields(
data_type, n_src_features, n_tgt_features,
dynamic_dict=dynamic_dict)
for n, f in fields.items():
try:
f_iter = iter(f)
except TypeError:
f_iter = [(n, f)]
for sub_n, sub_f in f_iter:
if sub_n in vocab:
sub_f.vocab = vocab[sub_n]
return fields
if _old_style_field_list(vocab): # upgrade to multifield
# Dict[str, List[Tuple[str, Field]]]
# doesn't change structure - don't return early.
fields = vocab
for base_name, vals in fields.items():
if ((base_name == 'src' and data_type == 'text') or
base_name == 'tgt'):
assert not isinstance(vals[0][1], TextMultiField)
fields[base_name] = [(base_name, TextMultiField(
vals[0][0], vals[0][1], vals[1:]))]
if _old_style_nesting(vocab):
# Dict[str, List[Tuple[str, Field]]] -> List[Tuple[str, Field]]
# -> dict[str, Field]
fields = dict(list(chain.from_iterable(vocab.values())))
return fields
def _old_style_vocab(vocab):
"""Detect old-style vocabs (``List[Tuple[str, torchtext.data.Vocab]]``).
Args:
vocab: some object loaded from a *.vocab.pt file
Returns:
Whether ``vocab`` is a list of pairs where the second object
is a :class:`torchtext.vocab.Vocab` object.
This exists because previously only the vocab objects from the fields
were saved directly, not the fields themselves, and the fields needed to
be reconstructed at training and translation time.
"""
return isinstance(vocab, list) and \
any(isinstance(v[1], Vocab) for v in vocab)
def _old_style_nesting(vocab):
"""Detect old-style nesting (``dict[str, List[Tuple[str, Field]]]``)."""
return isinstance(vocab, dict) and \
any(isinstance(v, list) for v in vocab.values())
def _old_style_field_list(vocab):
"""Detect old-style text fields.
Not old style vocab, old nesting, and text-type fields not using
``TextMultiField``.
Args:
vocab: some object loaded from a *.vocab.pt file
Returns:
Whether ``vocab`` is not an :func:`_old_style_vocab` and not
a :class:`TextMultiField` (using an old-style text representation).
"""
# if tgt isn't using TextMultiField, then no text field is.
return (not _old_style_vocab(vocab)) and _old_style_nesting(vocab) and \
(not isinstance(vocab['tgt'][0][1], TextMultiField))
def old_style_vocab(vocab):
"""The vocab/fields need updated."""
return _old_style_vocab(vocab) or _old_style_field_list(vocab) or \
_old_style_nesting(vocab)
def filter_example(ex, use_src_len=True, use_tgt_len=True,
min_src_len=1, max_src_len=float('inf'),
min_tgt_len=1, max_tgt_len=float('inf')):
"""Return whether an example is an acceptable length.
If used with a dataset as ``filter_pred``, use :func:`partial()`
for all keyword arguments.
Args:
ex (torchtext.data.Example): An object with a ``src`` and ``tgt``
property.
use_src_len (bool): Filter based on the length of ``ex.src``.
use_tgt_len (bool): Similar to above.
min_src_len (int): A non-negative minimally acceptable length
(examples of exactly this length will be included).
min_tgt_len (int): Similar to above.
max_src_len (int or float): A non-negative (possibly infinite)
maximally acceptable length (examples of exactly this length
will be included).
max_tgt_len (int or float): Similar to above.
"""
src_len = len(ex.src[0])
tgt_len = len(ex.tgt[0])
return (not use_src_len or min_src_len <= src_len <= max_src_len) and \
(not use_tgt_len or min_tgt_len <= tgt_len <= max_tgt_len)
def _pad_vocab_to_multiple(vocab, multiple):
vocab_size = len(vocab)
if vocab_size % multiple == 0:
return
target_size = int(math.ceil(vocab_size / multiple)) * multiple
padding_tokens = [
"averyunlikelytoken%d" % i for i in range(target_size - vocab_size)]
vocab.extend(Vocab(Counter(), specials=padding_tokens))
return vocab
def _build_field_vocab(field, counter, size_multiple=1, **kwargs):
# this is basically copy-pasted from torchtext.
all_specials = [
field.unk_token, field.pad_token, field.init_token, field.eos_token
]
specials = [tok for tok in all_specials if tok is not None]
field.vocab = field.vocab_cls(counter, specials=specials, **kwargs)
if size_multiple > 1:
_pad_vocab_to_multiple(field.vocab, size_multiple)
def _load_vocab(vocab_path, name, counters, min_freq):
# counters changes in place
vocab = _read_vocab_file(vocab_path, name)
vocab_size = len(vocab)
logger.info('Loaded %s vocab has %d tokens.' % (name, vocab_size))
for i, token in enumerate(vocab):
# keep the order of tokens specified in the vocab file by
# adding them to the counter with decreasing counting values
counters[name][token] = vocab_size - i + min_freq
return vocab, vocab_size
def _build_fv_from_multifield(multifield, counters, build_fv_args,
size_multiple=1):
for name, field in multifield:
_build_field_vocab(
field,
counters[name],
size_multiple=size_multiple,
**build_fv_args[name])
logger.info(" * %s vocab size: %d." % (name, len(field.vocab)))
def _build_fields_vocab(fields, counters, data_type, share_vocab,
vocab_size_multiple,
src_vocab_size, src_words_min_frequency,
tgt_vocab_size, tgt_words_min_frequency):
build_fv_args = defaultdict(dict)
build_fv_args["src"] = dict(
max_size=src_vocab_size, min_freq=src_words_min_frequency)
build_fv_args["tgt"] = dict(
max_size=tgt_vocab_size, min_freq=tgt_words_min_frequency)
tgt_multifield = fields["tgt"]
_build_fv_from_multifield(
tgt_multifield,
counters,
build_fv_args,
size_multiple=vocab_size_multiple if not share_vocab else 1)
if data_type == 'text':
src_multifield = fields["src"]
_build_fv_from_multifield(
src_multifield,
counters,
build_fv_args,
size_multiple=vocab_size_multiple if not share_vocab else 1)
if share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
logger.info(" * merging src and tgt vocab...")
src_field = src_multifield.base_field
tgt_field = tgt_multifield.base_field
_merge_field_vocabs(
src_field, tgt_field, vocab_size=src_vocab_size,
min_freq=src_words_min_frequency,
vocab_size_multiple=vocab_size_multiple)
logger.info(" * merged vocab size: %d." % len(src_field.vocab))
return fields
def build_vocab(train_dataset_files, fields, data_type, share_vocab,
src_vocab_path, src_vocab_size, src_words_min_frequency,
tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency,
vocab_size_multiple=1):
"""Build the fields for all data sides.
Args:
train_dataset_files: a list of train dataset pt file.
fields (dict[str, Field]): fields to build vocab for.
data_type (str): A supported data type string.
share_vocab (bool): share source and target vocabulary?
src_vocab_path (str): Path to src vocabulary file.
src_vocab_size (int): size of the source vocabulary.
src_words_min_frequency (int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_path (str): Path to tgt vocabulary file.
tgt_vocab_size (int): size of the target vocabulary.
tgt_words_min_frequency (int): the minimum frequency needed to
include a target word in the vocabulary.
vocab_size_multiple (int): ensure that the vocabulary size is a
multiple of this value.
Returns:
Dict of Fields
"""
counters = defaultdict(Counter)
if src_vocab_path:
try:
logger.info("Using existing vocabulary...")
vocab = torch.load(src_vocab_path)
# return vocab to dump with standard name
return vocab
except torch.serialization.pickle.UnpicklingError:
logger.info("Building vocab from text file...")
# empty train_dataset_files so that vocab is only loaded from
# given paths in src_vocab_path, tgt_vocab_path
train_dataset_files = []
# Load vocabulary
if src_vocab_path:
src_vocab, src_vocab_size = _load_vocab(
src_vocab_path, "src", counters,
src_words_min_frequency)
else:
src_vocab = None
if tgt_vocab_path:
tgt_vocab, tgt_vocab_size = _load_vocab(
tgt_vocab_path, "tgt", counters,
tgt_words_min_frequency)
else:
tgt_vocab = None
for i, path in enumerate(train_dataset_files):
dataset = torch.load(path)
logger.info(" * reloading %s." % path)
for ex in dataset.examples:
for name, field in fields.items():
try:
f_iter = iter(field)
except TypeError:
f_iter = [(name, field)]
all_data = [getattr(ex, name, None)]
else:
all_data = getattr(ex, name)
for (sub_n, sub_f), fd in zip(
f_iter, all_data):
has_vocab = (sub_n == 'src' and src_vocab) or \
(sub_n == 'tgt' and tgt_vocab)
if sub_f.sequential and not has_vocab:
val = fd
counters[sub_n].update(val)
# Drop the none-using from memory but keep the last
if i < len(train_dataset_files) - 1:
dataset.examples = None
gc.collect()
del dataset.examples
gc.collect()
del dataset
gc.collect()
fields = _build_fields_vocab(
fields, counters, data_type,
share_vocab, vocab_size_multiple,
src_vocab_size, src_words_min_frequency,
tgt_vocab_size, tgt_words_min_frequency)
return fields # is the return necessary?
def _merge_field_vocabs(src_field, tgt_field, vocab_size, min_freq,
vocab_size_multiple):
# in the long run, shouldn't it be possible to do this by calling
# build_vocab with both the src and tgt data?
specials = [tgt_field.unk_token, tgt_field.pad_token,
tgt_field.init_token, tgt_field.eos_token]
merged = sum(
[src_field.vocab.freqs, tgt_field.vocab.freqs], Counter()
)
merged_vocab = Vocab(
merged, specials=specials,
max_size=vocab_size, min_freq=min_freq
)
if vocab_size_multiple > 1:
_pad_vocab_to_multiple(merged_vocab, vocab_size_multiple)
src_field.vocab = merged_vocab
tgt_field.vocab = merged_vocab
assert len(src_field.vocab) == len(tgt_field.vocab)
def _read_vocab_file(vocab_path, tag):
"""Loads a vocabulary from the given path.
Args:
vocab_path (str): Path to utf-8 text file containing vocabulary.
Each token should be on a line by itself. Tokens must not
contain whitespace (else only before the whitespace
is considered).
tag (str): Used for logging which vocab is being read.
"""
logger.info("Loading {} vocabulary from {}".format(tag, vocab_path))
if not os.path.exists(vocab_path):
raise RuntimeError(
"{} vocabulary not found at {}".format(tag, vocab_path))
else:
with codecs.open(vocab_path, 'r', 'utf-8') as f:
return [line.strip().split()[0] for line in f if line.strip()]
def batch_iter(data, batch_size, batch_size_fn=None, batch_size_multiple=1):
"""Yield elements from data in chunks of batch_size, where each chunk size
is a multiple of batch_size_multiple.
This is an extended version of torchtext.data.batch.
"""
if batch_size_fn is None:
def batch_size_fn(new, count, sofar):
return count
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
if size_so_far >= batch_size:
overflowed = 0
if size_so_far > batch_size:
overflowed += 1
if batch_size_multiple > 1:
overflowed += (
(len(minibatch) - overflowed) % batch_size_multiple)
if overflowed == 0:
yield minibatch
minibatch, size_so_far = [], 0
else:
if overflowed == len(minibatch):
logger.warning(
"An example was ignored, more tokens"
" than allowed by tokens batch_size")
else:
yield minibatch[:-overflowed]
minibatch = minibatch[-overflowed:]
size_so_far = 0
for i, ex in enumerate(minibatch):
size_so_far = batch_size_fn(ex, i + 1, size_so_far)
if minibatch:
yield minibatch
def _pool(data, batch_size, batch_size_fn, batch_size_multiple,
sort_key, random_shuffler, pool_factor):
for p in torchtext.data.batch(
data, batch_size * pool_factor,
batch_size_fn=batch_size_fn):
p_batch = list(batch_iter(
sorted(p, key=sort_key),
batch_size,
batch_size_fn=batch_size_fn,
batch_size_multiple=batch_size_multiple))
for b in random_shuffler(p_batch):
yield b
class OrderedIterator(torchtext.data.Iterator):
def __init__(self,
dataset,
batch_size,
pool_factor=1,
batch_size_multiple=1,
yield_raw_example=False,
**kwargs):
super(OrderedIterator, self).__init__(dataset, batch_size, **kwargs)
self.batch_size_multiple = batch_size_multiple
self.yield_raw_example = yield_raw_example
self.dataset = dataset
self.pool_factor = pool_factor
def create_batches(self):
if self.train:
if self.yield_raw_example:
self.batches = batch_iter(
self.data(),
1,
batch_size_fn=None,
batch_size_multiple=1)
else:
self.batches = _pool(
self.data(),
self.batch_size,
self.batch_size_fn,
self.batch_size_multiple,
self.sort_key,
self.random_shuffler,
self.pool_factor)
else:
self.batches = []
for b in batch_iter(
self.data(),
self.batch_size,
batch_size_fn=self.batch_size_fn,
batch_size_multiple=self.batch_size_multiple):
self.batches.append(sorted(b, key=self.sort_key))
def __iter__(self):
"""
Extended version of the definition in torchtext.data.Iterator.
Added yield_raw_example behaviour to yield a torchtext.data.Example
instead of a torchtext.data.Batch object.
"""
while True:
self.init_epoch()
for idx, minibatch in enumerate(self.batches):
# fast-forward if loaded from state
if self._iterations_this_epoch > idx:
continue
self.iterations += 1
self._iterations_this_epoch += 1
if self.sort_within_batch:
# NOTE: `rnn.pack_padded_sequence` requires that a
# minibatch be sorted by decreasing order, which
# requires reversing relative to typical sort keys
if self.sort:
minibatch.reverse()
else:
minibatch.sort(key=self.sort_key, reverse=True)
if self.yield_raw_example:
yield minibatch[0]
else:
yield torchtext.data.Batch(
minibatch,
self.dataset,
self.device)
if not self.repeat:
return
class MultipleDatasetIterator(object):
"""
This takes a list of iterable objects (DatasetLazyIter) and their
respective weights, and yields a batch in the wanted proportions.
"""
def __init__(self,
train_shards,
fields,
device,
opt):
self.index = -1
self.iterables = []
for shard in train_shards:
self.iterables.append(
build_dataset_iter(shard, fields, opt, multi=True))
self.init_iterators = True
self.weights = opt.data_weights
self.batch_size = opt.batch_size
self.batch_size_fn = max_tok_len \
if opt.batch_type == "tokens" else None
self.batch_size_multiple = 8 if opt.model_dtype == "fp16" else 1
self.device = device
# Temporarily load one shard to retrieve sort_key for data_type
temp_dataset = torch.load(self.iterables[0]._paths[0])
self.sort_key = temp_dataset.sort_key
self.random_shuffler = RandomShuffler()
self.pool_factor = opt.pool_factor
del temp_dataset
def _iter_datasets(self):
if self.init_iterators:
self.iterators = [iter(iterable) for iterable in self.iterables]
self.init_iterators = False
for weight in self.weights:
self.index = (self.index + 1) % len(self.iterators)
for i in range(weight):
yield self.iterators[self.index]
def _iter_examples(self):
for iterator in cycle(self._iter_datasets()):
yield next(iterator)
def __iter__(self):
while True:
for minibatch in _pool(
self._iter_examples(),
self.batch_size,
self.batch_size_fn,
self.batch_size_multiple,
self.sort_key,
self.random_shuffler,
self.pool_factor):
minibatch = sorted(minibatch, key=self.sort_key, reverse=True)
yield torchtext.data.Batch(minibatch,
self.iterables[0].dataset,
self.device)
class DatasetLazyIter(object):
"""Yield data from sharded dataset files.
Args:
dataset_paths: a list containing the locations of dataset files.
fields (dict[str, Field]): fields dict for the
datasets.
batch_size (int): batch size.
batch_size_fn: custom batch process function.
device: See :class:`OrderedIterator` ``device``.
is_train (bool): train or valid?
"""
def __init__(self, dataset_paths, fields, batch_size, batch_size_fn,
batch_size_multiple, device, is_train, pool_factor,
repeat=True, num_batches_multiple=1, yield_raw_example=False):
self._paths = dataset_paths
self.fields = fields
self.batch_size = batch_size
self.batch_size_fn = batch_size_fn
self.batch_size_multiple = batch_size_multiple
self.device = device
self.is_train = is_train
self.repeat = repeat
self.num_batches_multiple = num_batches_multiple
self.yield_raw_example = yield_raw_example
self.pool_factor = pool_factor
def _iter_dataset(self, path):
logger.info('Loading dataset from %s' % path)
cur_dataset = torch.load(path)
logger.info('number of examples: %d' % len(cur_dataset))
cur_dataset.fields = self.fields
cur_iter = OrderedIterator(
dataset=cur_dataset,
batch_size=self.batch_size,
pool_factor=self.pool_factor,
batch_size_multiple=self.batch_size_multiple,
batch_size_fn=self.batch_size_fn,
device=self.device,
train=self.is_train,
sort=False,
sort_within_batch=True,
repeat=False,
yield_raw_example=self.yield_raw_example
)
for batch in cur_iter:
self.dataset = cur_iter.dataset
yield batch
# NOTE: This is causing some issues for consumer/producer,
# as we may still have some of those examples in some queue
# cur_dataset.examples = None
# gc.collect()
# del cur_dataset
# gc.collect()
def __iter__(self):
num_batches = 0
paths = self._paths
if self.is_train and self.repeat:
# Cycle through the shards indefinitely.
paths = cycle(paths)
for path in paths:
for batch in self._iter_dataset(path):
yield batch
num_batches += 1
if self.is_train and not self.repeat and \
num_batches % self.num_batches_multiple != 0:
# When the dataset is not repeated, we might need to ensure that
# the number of returned batches is the multiple of a given value.
# This is important for multi GPU training to ensure that all
# workers have the same number of batches to process.
for path in paths:
for batch in self._iter_dataset(path):
yield batch
num_batches += 1
if num_batches % self.num_batches_multiple == 0:
return
def max_tok_len(new, count, sofar):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch, max_tgt_in_batch # this is a hack
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
# Src: [<bos> w1 ... wN <eos>]
max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)
# Tgt: [w1 ... wM <eos>]
max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt[0]) + 1)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
def build_dataset_iter(corpus_type, fields, opt, is_train=True, multi=False):
"""
This returns user-defined train/validate data iterator for the trainer
to iterate over. We implement simple ordered iterator strategy here,
but more sophisticated strategy like curriculum learning is ok too.
"""
dataset_paths = list(sorted(
glob.glob(opt.data + '.' + corpus_type + '.[0-9]*.pt')))
if not dataset_paths:
if is_train:
raise ValueError('Training data %s not found' % opt.data)
else:
return None
if multi:
batch_size = 1
batch_fn = None
batch_size_multiple = 1
else:
batch_size = opt.batch_size if is_train else opt.valid_batch_size
batch_fn = max_tok_len \
if is_train and opt.batch_type == "tokens" else None
batch_size_multiple = 8 if opt.model_dtype == "fp16" else 1
device = "cuda" if opt.gpu_ranks else "cpu"
return DatasetLazyIter(
dataset_paths,
fields,
batch_size,
batch_fn,
batch_size_multiple,
device,
is_train,
opt.pool_factor,
repeat=not opt.single_pass,
num_batches_multiple=max(opt.accum_count) * opt.world_size,
yield_raw_example=multi)
def build_dataset_iter_multiple(train_shards, fields, opt):
return MultipleDatasetIterator(
train_shards, fields, "cuda" if opt.gpu_ranks else "cpu", opt)
| 31,503 | 35.590012 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/inputters/audio_dataset.py
|
# -*- coding: utf-8 -*-
import os
from tqdm import tqdm
import torch
from torchtext.data import Field
from onmt.inputters.datareader_base import DataReaderBase
# imports of datatype-specific dependencies
try:
import torchaudio
import librosa
import numpy as np
except ImportError:
torchaudio, librosa, np = None, None, None
class AudioDataReader(DataReaderBase):
"""Read audio data from disk.
Args:
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation. See
:func:`librosa.stft()` ``window`` for more details.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int or NoneType): maximum audio length
(0 or None for unlimited).
Raises:
onmt.inputters.datareader_base.MissingDependencyException: If
importing any of ``torchaudio``, ``librosa``, or ``numpy`` fail.
"""
def __init__(self, sample_rate=0, window_size=0, window_stride=0,
window=None, normalize_audio=True, truncate=None):
self._check_deps()
self.sample_rate = sample_rate
self.window_size = window_size
self.window_stride = window_stride
self.window = window
self.normalize_audio = normalize_audio
self.truncate = truncate
@classmethod
def from_opt(cls, opt):
return cls(sample_rate=opt.sample_rate, window_size=opt.window_size,
window_stride=opt.window_stride, window=opt.window)
@classmethod
def _check_deps(cls):
if any([torchaudio is None, librosa is None, np is None]):
cls._raise_missing_dep(
"torchaudio", "librosa", "numpy")
def extract_features(self, audio_path):
# torchaudio loading options recently changed. It's probably
# straightforward to rewrite the audio handling to make use of
# up-to-date torchaudio, but in the meantime there is a legacy
# method which uses the old defaults
sound, sample_rate_ = torchaudio.legacy.load(audio_path)
if self.truncate and self.truncate > 0:
if sound.size(0) > self.truncate:
sound = sound[:self.truncate]
assert sample_rate_ == self.sample_rate, \
'Sample rate of %s != -sample_rate (%d vs %d)' \
% (audio_path, sample_rate_, self.sample_rate)
sound = sound.numpy()
if len(sound.shape) > 1:
if sound.shape[1] == 1:
sound = sound.squeeze()
else:
sound = sound.mean(axis=1) # average multiple channels
n_fft = int(self.sample_rate * self.window_size)
win_length = n_fft
hop_length = int(self.sample_rate * self.window_stride)
# STFT
d = librosa.stft(sound, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=self.window)
spect, _ = librosa.magphase(d)
spect = np.log1p(spect)
spect = torch.FloatTensor(spect)
if self.normalize_audio:
mean = spect.mean()
std = spect.std()
spect.add_(-mean)
spect.div_(std)
return spect
def read(self, data, side, src_dir=None):
"""Read data into dicts.
Args:
data (str or Iterable[str]): Sequence of audio paths or
path to file containing audio paths.
In either case, the filenames may be relative to ``src_dir``
(default behavior) or absolute.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
src_dir (str): Location of source audio files. See ``data``.
Yields:
A dictionary containing audio data for each line.
"""
assert src_dir is not None and os.path.exists(src_dir),\
"src_dir must be a valid directory if data_type is audio"
if isinstance(data, str):
data = DataReaderBase._read_file(data)
for i, line in enumerate(tqdm(data)):
line = line.decode("utf-8").strip()
audio_path = os.path.join(src_dir, line)
if not os.path.exists(audio_path):
audio_path = line
assert os.path.exists(audio_path), \
'audio path %s not found' % line
spect = self.extract_features(audio_path)
yield {side: spect, side + '_path': line, 'indices': i}
def audio_sort_key(ex):
"""Sort using duration time of the sound spectrogram."""
return ex.src.size(1)
class AudioSeqField(Field):
"""Defines an audio datatype and instructions for converting to Tensor.
See :class:`Fields` for attribute descriptions.
"""
def __init__(self, preprocessing=None, postprocessing=None,
include_lengths=False, batch_first=False, pad_index=0,
is_target=False):
super(AudioSeqField, self).__init__(
sequential=True, use_vocab=False, init_token=None,
eos_token=None, fix_length=False, dtype=torch.float,
preprocessing=preprocessing, postprocessing=postprocessing,
lower=False, tokenize=None, include_lengths=include_lengths,
batch_first=batch_first, pad_token=pad_index, unk_token=None,
pad_first=False, truncate_first=False, stop_words=None,
is_target=is_target
)
def pad(self, minibatch):
"""Pad a batch of examples to the length of the longest example.
Args:
minibatch (List[torch.FloatTensor]): A list of audio data,
each having shape 1 x n_feats x len where len is variable.
Returns:
torch.FloatTensor or Tuple[torch.FloatTensor, List[int]]: The
padded tensor of shape ``(batch_size, 1, n_feats, max_len)``.
and a list of the lengths if `self.include_lengths` is `True`
else just returns the padded tensor.
"""
assert not self.pad_first and not self.truncate_first \
and not self.fix_length and self.sequential
minibatch = list(minibatch)
lengths = [x.size(1) for x in minibatch]
max_len = max(lengths)
nfft = minibatch[0].size(0)
sounds = torch.full((len(minibatch), 1, nfft, max_len), self.pad_token)
for i, (spect, len_) in enumerate(zip(minibatch, lengths)):
sounds[i, :, :, 0:len_] = spect
if self.include_lengths:
return (sounds, lengths)
return sounds
def numericalize(self, arr, device=None):
"""Turn a batch of examples that use this field into a Variable.
If the field has ``include_lengths=True``, a tensor of lengths will be
included in the return value.
Args:
arr (torch.FloatTensor or Tuple(torch.FloatTensor, List[int])):
List of tokenized and padded examples, or tuple of List of
tokenized and padded examples and List of lengths of each
example if self.include_lengths is True. Examples have shape
``(batch_size, 1, n_feats, max_len)`` if `self.batch_first`
else ``(max_len, batch_size, 1, n_feats)``.
device (str or torch.device): See `Field.numericalize`.
"""
assert self.use_vocab is False
if self.include_lengths and not isinstance(arr, tuple):
raise ValueError("Field has include_lengths set to True, but "
"input data is not a tuple of "
"(data batch, batch lengths).")
if isinstance(arr, tuple):
arr, lengths = arr
lengths = torch.tensor(lengths, dtype=torch.int, device=device)
if self.postprocessing is not None:
arr = self.postprocessing(arr, None)
if self.sequential and not self.batch_first:
arr = arr.permute(3, 0, 1, 2)
if self.sequential:
arr = arr.contiguous()
arr = arr.to(device)
if self.include_lengths:
return arr, lengths
return arr
def audio_fields(**kwargs):
audio = AudioSeqField(pad_index=0, batch_first=True, include_lengths=True)
return audio
| 8,459 | 36.93722 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/inputters/image_dataset.py
|
# -*- coding: utf-8 -*-
import os
import torch
from torchtext.data import Field
from onmt.inputters.datareader_base import DataReaderBase
# domain specific dependencies
try:
from PIL import Image
from torchvision import transforms
import cv2
except ImportError:
Image, transforms, cv2 = None, None, None
class ImageDataReader(DataReaderBase):
"""Read image data from disk.
Args:
truncate (tuple[int] or NoneType): maximum img size. Use
``(0,0)`` or ``None`` for unlimited.
channel_size (int): Number of channels per image.
Raises:
onmt.inputters.datareader_base.MissingDependencyException: If
importing any of ``PIL``, ``torchvision``, or ``cv2`` fail.
"""
def __init__(self, truncate=None, channel_size=3):
self._check_deps()
self.truncate = truncate
self.channel_size = channel_size
@classmethod
def from_opt(cls, opt):
return cls(channel_size=opt.image_channel_size)
@classmethod
def _check_deps(cls):
if any([Image is None, transforms is None, cv2 is None]):
cls._raise_missing_dep(
"PIL", "torchvision", "cv2")
def read(self, images, side, img_dir=None):
"""Read data into dicts.
Args:
images (str or Iterable[str]): Sequence of image paths or
path to file containing audio paths.
In either case, the filenames may be relative to ``src_dir``
(default behavior) or absolute.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
img_dir (str): Location of source image files. See ``images``.
Yields:
a dictionary containing image data, path and index for each line.
"""
if isinstance(images, str):
images = DataReaderBase._read_file(images)
for i, filename in enumerate(images):
filename = filename.decode("utf-8").strip()
img_path = os.path.join(img_dir, filename)
if not os.path.exists(img_path):
img_path = filename
assert os.path.exists(img_path), \
'img path %s not found' % filename
if self.channel_size == 1:
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
img = transforms.ToTensor()(Image.open(img_path))
if self.truncate and self.truncate != (0, 0):
if not (img.size(1) <= self.truncate[0]
and img.size(2) <= self.truncate[1]):
continue
yield {side: img, side + '_path': filename, 'indices': i}
def img_sort_key(ex):
"""Sort using the size of the image: (width, height)."""
return ex.src.size(2), ex.src.size(1)
def batch_img(data, vocab):
"""Pad and batch a sequence of images."""
c = data[0].size(0)
h = max([t.size(1) for t in data])
w = max([t.size(2) for t in data])
imgs = torch.zeros(len(data), c, h, w).fill_(1)
for i, img in enumerate(data):
imgs[i, :, 0:img.size(1), 0:img.size(2)] = img
return imgs
def image_fields(**kwargs):
img = Field(
use_vocab=False, dtype=torch.float,
postprocessing=batch_img, sequential=False)
return img
| 3,378 | 30.579439 | 77 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/inputters/__init__.py
|
"""Module defining inputters.
Inputters implement the logic of transforming raw data to vectorized inputs,
e.g., from a line of text to a sequence of embeddings.
"""
from onmt.inputters.inputter import \
load_old_vocab, get_fields, OrderedIterator, \
build_vocab, old_style_vocab, filter_example
from onmt.inputters.dataset_base import Dataset
from onmt.inputters.text_dataset import text_sort_key, TextDataReader
from onmt.inputters.image_dataset import img_sort_key, ImageDataReader
from onmt.inputters.audio_dataset import audio_sort_key, AudioDataReader
from onmt.inputters.vec_dataset import vec_sort_key, VecDataReader
from onmt.inputters.datareader_base import DataReaderBase
str2reader = {
"text": TextDataReader, "img": ImageDataReader, "audio": AudioDataReader,
"vec": VecDataReader}
str2sortkey = {
'text': text_sort_key, 'img': img_sort_key, 'audio': audio_sort_key,
'vec': vec_sort_key}
__all__ = ['Dataset', 'load_old_vocab', 'get_fields', 'DataReaderBase',
'filter_example', 'old_style_vocab',
'build_vocab', 'OrderedIterator',
'text_sort_key', 'img_sort_key', 'audio_sort_key', 'vec_sort_key',
'TextDataReader', 'ImageDataReader', 'AudioDataReader',
'VecDataReader']
| 1,267 | 39.903226 | 77 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/inputters/vec_dataset.py
|
import os
import torch
from torchtext.data import Field
from onmt.inputters.datareader_base import DataReaderBase
try:
import numpy as np
except ImportError:
np = None
class VecDataReader(DataReaderBase):
"""Read feature vector data from disk.
Raises:
onmt.inputters.datareader_base.MissingDependencyException: If
importing ``np`` fails.
"""
def __init__(self):
self._check_deps()
@classmethod
def _check_deps(cls):
if np is None:
cls._raise_missing_dep("np")
def read(self, vecs, side, vec_dir=None):
"""Read data into dicts.
Args:
vecs (str or Iterable[str]): Sequence of feature vector paths or
path to file containing feature vector paths.
In either case, the filenames may be relative to ``vec_dir``
(default behavior) or absolute.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
vec_dir (str): Location of source vectors. See ``vecs``.
Yields:
A dictionary containing feature vector data.
"""
if isinstance(vecs, str):
vecs = DataReaderBase._read_file(vecs)
for i, filename in enumerate(vecs):
filename = filename.decode("utf-8").strip()
vec_path = os.path.join(vec_dir, filename)
if not os.path.exists(vec_path):
vec_path = filename
assert os.path.exists(vec_path), \
'vec path %s not found' % filename
vec = np.load(vec_path)
yield {side: torch.from_numpy(vec),
side + "_path": filename, "indices": i}
def vec_sort_key(ex):
"""Sort using the length of the vector sequence."""
return ex.src.shape[0]
class VecSeqField(Field):
"""Defines an vector datatype and instructions for converting to Tensor.
See :class:`Fields` for attribute descriptions.
"""
def __init__(self, preprocessing=None, postprocessing=None,
include_lengths=False, batch_first=False, pad_index=0,
is_target=False):
super(VecSeqField, self).__init__(
sequential=True, use_vocab=False, init_token=None,
eos_token=None, fix_length=False, dtype=torch.float,
preprocessing=preprocessing, postprocessing=postprocessing,
lower=False, tokenize=None, include_lengths=include_lengths,
batch_first=batch_first, pad_token=pad_index, unk_token=None,
pad_first=False, truncate_first=False, stop_words=None,
is_target=is_target
)
def pad(self, minibatch):
"""Pad a batch of examples to the length of the longest example.
Args:
minibatch (List[torch.FloatTensor]): A list of audio data,
each having shape ``(len, n_feats, feat_dim)``
where len is variable.
Returns:
torch.FloatTensor or Tuple[torch.FloatTensor, List[int]]: The
padded tensor of shape
``(batch_size, max_len, n_feats, feat_dim)``.
and a list of the lengths if `self.include_lengths` is `True`
else just returns the padded tensor.
"""
assert not self.pad_first and not self.truncate_first \
and not self.fix_length and self.sequential
minibatch = list(minibatch)
lengths = [x.size(0) for x in minibatch]
max_len = max(lengths)
nfeats = minibatch[0].size(1)
feat_dim = minibatch[0].size(2)
feats = torch.full((len(minibatch), max_len, nfeats, feat_dim),
self.pad_token)
for i, (feat, len_) in enumerate(zip(minibatch, lengths)):
feats[i, 0:len_, :, :] = feat
if self.include_lengths:
return (feats, lengths)
return feats
def numericalize(self, arr, device=None):
"""Turn a batch of examples that use this field into a Variable.
If the field has ``include_lengths=True``, a tensor of lengths will be
included in the return value.
Args:
arr (torch.FloatTensor or Tuple(torch.FloatTensor, List[int])):
List of tokenized and padded examples, or tuple of List of
tokenized and padded examples and List of lengths of each
example if self.include_lengths is True.
device (str or torch.device): See `Field.numericalize`.
"""
assert self.use_vocab is False
if self.include_lengths and not isinstance(arr, tuple):
raise ValueError("Field has include_lengths set to True, but "
"input data is not a tuple of "
"(data batch, batch lengths).")
if isinstance(arr, tuple):
arr, lengths = arr
lengths = torch.tensor(lengths, dtype=torch.int, device=device)
arr = arr.to(device)
if self.postprocessing is not None:
arr = self.postprocessing(arr, None)
if self.sequential and not self.batch_first:
arr = arr.permute(1, 0, 2, 3)
if self.sequential:
arr = arr.contiguous()
if self.include_lengths:
return arr, lengths
return arr
def vec_fields(**kwargs):
vec = VecSeqField(pad_index=0, include_lengths=True)
return vec
| 5,447 | 35.32 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/sparse_losses.py
|
import torch
import torch.nn as nn
from torch.autograd import Function
from onmt.modules.sparse_activations import _threshold_and_support
from onmt.utils.misc import aeq
class SparsemaxLossFunction(Function):
@staticmethod
def forward(ctx, input, target):
"""
input (FloatTensor): ``(n, num_classes)``.
target (LongTensor): ``(n,)``, the indices of the target classes
"""
input_batch, classes = input.size()
target_batch = target.size(0)
aeq(input_batch, target_batch)
z_k = input.gather(1, target.unsqueeze(1)).squeeze()
tau_z, support_size = _threshold_and_support(input, dim=1)
support = input > tau_z
x = torch.where(
support, input**2 - tau_z**2,
torch.tensor(0.0, device=input.device)
).sum(dim=1)
ctx.save_for_backward(input, target, tau_z)
# clamping necessary because of numerical errors: loss should be lower
# bounded by zero, but negative values near zero are possible without
# the clamp
return torch.clamp(x / 2 - z_k + 0.5, min=0.0)
@staticmethod
def backward(ctx, grad_output):
input, target, tau_z = ctx.saved_tensors
sparsemax_out = torch.clamp(input - tau_z, min=0)
delta = torch.zeros_like(sparsemax_out)
delta.scatter_(1, target.unsqueeze(1), 1)
return sparsemax_out - delta, None
sparsemax_loss = SparsemaxLossFunction.apply
class SparsemaxLoss(nn.Module):
"""
An implementation of sparsemax loss, first proposed in
:cite:`DBLP:journals/corr/MartinsA16`. If using
a sparse output layer, it is not possible to use negative log likelihood
because the loss is infinite in the case the target is assigned zero
probability. Inputs to SparsemaxLoss are arbitrary dense real-valued
vectors (like in nn.CrossEntropyLoss), not probability vectors (like in
nn.NLLLoss).
"""
def __init__(self, weight=None, ignore_index=-100,
reduction='elementwise_mean'):
assert reduction in ['elementwise_mean', 'sum', 'none']
self.reduction = reduction
self.weight = weight
self.ignore_index = ignore_index
super(SparsemaxLoss, self).__init__()
def forward(self, input, target):
loss = sparsemax_loss(input, target)
if self.ignore_index >= 0:
ignored_positions = target == self.ignore_index
size = float((target.size(0) - ignored_positions.sum()).item())
loss.masked_fill_(ignored_positions, 0.0)
else:
size = float(target.size(0))
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'elementwise_mean':
loss = loss.sum() / size
return loss
| 2,804 | 35.428571 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/sparse_activations.py
|
"""
An implementation of sparsemax (Martins & Astudillo, 2016). See
:cite:`DBLP:journals/corr/MartinsA16` for detailed description.
By Ben Peters and Vlad Niculae
"""
import torch
from torch.autograd import Function
import torch.nn as nn
def _make_ix_like(input, dim=0):
d = input.size(dim)
rho = torch.arange(1, d + 1, device=input.device, dtype=input.dtype)
view = [1] * input.dim()
view[0] = -1
return rho.view(view).transpose(0, dim)
def _threshold_and_support(input, dim=0):
"""Sparsemax building block: compute the threshold
Args:
input: any dimension
dim: dimension along which to apply the sparsemax
Returns:
the threshold value
"""
input_srt, _ = torch.sort(input, descending=True, dim=dim)
input_cumsum = input_srt.cumsum(dim) - 1
rhos = _make_ix_like(input, dim)
support = rhos * input_srt > input_cumsum
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = input_cumsum.gather(dim, support_size - 1)
tau /= support_size.to(input.dtype)
return tau, support_size
class SparsemaxFunction(Function):
@staticmethod
def forward(ctx, input, dim=0):
"""sparsemax: normalizing sparse transform (a la softmax)
Parameters:
input (Tensor): any shape
dim: dimension along which to apply sparsemax
Returns:
output (Tensor): same shape as input
"""
ctx.dim = dim
max_val, _ = input.max(dim=dim, keepdim=True)
input -= max_val # same numerical stability trick as for softmax
tau, supp_size = _threshold_and_support(input, dim=dim)
output = torch.clamp(input - tau, min=0)
ctx.save_for_backward(supp_size, output)
return output
@staticmethod
def backward(ctx, grad_output):
supp_size, output = ctx.saved_tensors
dim = ctx.dim
grad_input = grad_output.clone()
grad_input[output == 0] = 0
v_hat = grad_input.sum(dim=dim) / supp_size.to(output.dtype).squeeze()
v_hat = v_hat.unsqueeze(dim)
grad_input = torch.where(output != 0, grad_input - v_hat, grad_input)
return grad_input, None
sparsemax = SparsemaxFunction.apply
class Sparsemax(nn.Module):
def __init__(self, dim=0):
self.dim = dim
super(Sparsemax, self).__init__()
def forward(self, input):
return sparsemax(input, self.dim)
class LogSparsemax(nn.Module):
def __init__(self, dim=0):
self.dim = dim
super(LogSparsemax, self).__init__()
def forward(self, input):
return torch.log(sparsemax(input, self.dim))
| 2,649 | 26.040816 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/structured_attention.py
|
import torch.nn as nn
import torch
import torch.cuda
class MatrixTree(nn.Module):
"""Implementation of the matrix-tree theorem for computing marginals
of non-projective dependency parsing. This attention layer is used
in the paper "Learning Structured Text Representations"
:cite:`DBLP:journals/corr/LiuL17d`.
"""
def __init__(self, eps=1e-5):
self.eps = eps
super(MatrixTree, self).__init__()
def forward(self, input):
laplacian = input.exp() + self.eps
output = input.clone()
for b in range(input.size(0)):
lap = laplacian[b].masked_fill(
torch.eye(input.size(1), device=input.device).ne(0), 0)
lap = -lap + torch.diag(lap.sum(0))
# store roots on diagonal
lap[0] = input[b].diag().exp()
inv_laplacian = lap.inverse()
factor = inv_laplacian.diag().unsqueeze(1)\
.expand_as(input[b]).transpose(0, 1)
term1 = input[b].exp().mul(factor).clone()
term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone()
term1[:, 0] = 0
term2[0] = 0
output[b] = term1 - term2
roots_output = input[b].diag().exp().mul(
inv_laplacian.transpose(0, 1)[0])
output[b] = output[b] + torch.diag(roots_output)
return output
| 1,414 | 35.282051 | 77 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/util_class.py
|
""" Misc classes """
import torch
import torch.nn as nn
# At the moment this class is only used by embeddings.Embeddings look-up tables
class Elementwise(nn.ModuleList):
"""
A simple network container.
Parameters are a list of modules.
Inputs are a 3d Tensor whose last dimension is the same length
as the list.
Outputs are the result of applying modules to inputs elementwise.
An optional merge parameter allows the outputs to be reduced to a
single Tensor.
"""
def __init__(self, merge=None, *args):
assert merge in [None, 'first', 'concat', 'sum', 'mlp']
self.merge = merge
super(Elementwise, self).__init__(*args)
def forward(self, inputs):
inputs_ = [feat.squeeze(2) for feat in inputs.split(1, dim=2)]
assert len(self) == len(inputs_)
outputs = [f(x) for f, x in zip(self, inputs_)]
if self.merge == 'first':
return outputs[0]
elif self.merge == 'concat' or self.merge == 'mlp':
return torch.cat(outputs, 2)
elif self.merge == 'sum':
return sum(outputs)
else:
return outputs
class Cast(nn.Module):
"""
Basic layer that casts its input to a specific data type. The same tensor
is returned if the data type is already correct.
"""
def __init__(self, dtype):
super(Cast, self).__init__()
self._dtype = dtype
def forward(self, x):
return x.to(self._dtype)
| 1,486 | 29.346939 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/hierarchical_attention.py
|
from ..utils.misc import aeq
from .sparse_activations import sparsemax
from torch.nn.utils.rnn import pad_sequence
import torch
import onmt
class ContainsNaN(Exception):
pass
def _check_for_nan(tensor, msg=''):
if (tensor!=tensor).any():
raise ContainsNaN(msg)
def _check_sizes(tensor, *sizes):
for dim, (s, _s) in enumerate(zip(tensor.shape, sizes)):
assert s == _s, f'dim {dim} are not of equal sizes'
class AttentionScorer(torch.nn.Module):
"""
dim_query is dim of the decoder
dim_key is dim of the encoder output
"""
def __init__(self, dim, attn_type):
super().__init__()
if isinstance(dim, tuple):
assert len(dim) == 2
assert isinstance(dim[0], int)
assert isinstance(dim[1], int)
assert attn_type != 'dot'
self.dim_query = dim[0]
self.dim_key = dim[1]
elif isinstance(dim, int):
self.dim_query = dim
self.dim_key = dim
else:
raise ValueError('dim should a one or two ints')
self.attn_type = attn_type
if self.attn_type == "general":
self.linear_in = torch.nn.Linear(self.dim_query,
self.dim_key,
bias=False)
elif self.attn_type == "mlp":
self.linear_context = torch.nn.Linear(self.dim_key,
self.dim_key,
bias=False)
self.linear_query = torch.nn.Linear(self.dim_query,
self.dim_key,
bias=True)
self.v = torch.nn.Linear(self.dim_key, 1, bias=False)
def forward(self, h_t, h_s):
"""
Args:
h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)``
h_s (FloatTensor): sequence of sources ``(batch, src_len, dim``
Returns:
FloatTensor: raw attention scores (unnormalized) for each src index
``(batch, tgt_len, src_len)``
"""
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, self.dim_key)
aeq(tgt_dim, self.dim_query)
if self.attn_type in ["general", "dot"]:
if self.attn_type == "general":
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, src_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
# where d is self.dim_key
return torch.bmm(h_t, h_s_)
else:
wq = self.linear_query(h_t.view(-1, tgt_dim))
wq = wq.view(tgt_batch, tgt_len, 1, src_dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, src_dim)
uh = self.linear_context(h_s.contiguous().view(-1, src_dim))
uh = uh.view(src_batch, 1, src_len, src_dim)
uh = uh.expand(src_batch, tgt_len, src_len, src_dim)
# (batch, t_len, s_len, d)
wquh = torch.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
class HierarchicalAttention(torch.nn.Module):
def __init__(self, dim, coverage=False, attn_type="dot",
attn_func="softmax", use_pos=True):
super().__init__()
assert not coverage
self.ent_size = onmt.ENT_SIZE
self.use_pos = use_pos
# dims shenanigans. memory_bank should be dim[0]
if isinstance(dim, tuple):
assert len(dim) == 2
assert isinstance(dim[0], int)
assert isinstance(dim[1], int)
self.chunks_dim = dim[0]
self.units_dim = dim[1]
elif isinstance(dim, int):
self.chunks_dim = dim
self.units_dim = dim
else:
raise ValueError('dim should be one or two ints')
if attn_func == 'softmax':
self.attn_func = torch.nn.functional.softmax
elif attn_func == 'sparsemax':
self.attn_func = sparsemax
else:
raise ValueError("Please select a valid attention function.")
assert attn_type in ["dot", "general", "mlp"], (
"Please select a valid attention type (got {:s}).".format(
attn_type))
self.attn_type = attn_type
self.unit_scorer = AttentionScorer((self.chunks_dim, self.units_dim),
attn_type)
self.chunk_scorer = AttentionScorer(self.chunks_dim, attn_type)
# mlp wants it with bias, others no
self.linear_out = torch.nn.Linear(self.chunks_dim * 2,
self.chunks_dim,
bias=(attn_type=="mlp"))
def forward(self, source, memory_bank):
"""
Args:
source (FloatTensor): query vectors ``(batch, tgt_len, dim)``
memory_bank (FloatTensor): source vectors ``(batch, src_len, dim)``
Returns:
(FloatTensor, FloatTensor):
* Computed vector ``(tgt_len, batch, dim)``
* Attention distribtutions for each query
``(tgt_len, batch, src_len)``
In this setup, tgt_len will always be equal to one, due to inputfeeding
"""
# assert one step input
assert source.dim() == 2
source = source.unsqueeze(1)
# Unpacking memory_bank (we reassign memory_bank to optimize memory
# and minimize errors when copy/paste exisiting code)
# we transpose the batch_dim for the scoring compute
chunks, memory_bank, pos_embs, units_mask, chunk_mask = memory_bank
chunks = chunks.transpose(0, 1)
memory_bank = memory_bank.transpose(0, 1)
pos_embs = pos_embs.transpose(0, 1)
units_mask = units_mask.transpose(0, 1)
chunk_mask = chunk_mask.transpose(0, 1)
# _check_for_nan(chunks)
# _check_for_nan(memory_bank)
# _check_for_nan(pos_embs)
# Checks and balances
batch_size, source_l, dim = memory_bank.size()
batch_, target_l, dim_ = source.size()
aeq(batch_size, batch_)
aeq(dim, dim_)
aeq(self.chunks_dim, dim)
# compute attention scores, as in Luong et al.
# align_units is [batch_size, src_len]
# align_chunks is [batch_size, 1, n_ents]
if self.use_pos:
align_units = self.unit_scorer(source, pos_embs).squeeze(1)
else:
align_units = self.unit_scorer(source, memory_bank).squeeze(1)
align_chunks = self.chunk_scorer(source, chunks)
# we compute the softmax first on the unit level
# - we reshape so that each row is an entity
# - we mask the padding and the <ent> token
# - we softmax
# - we flatten the scores again
_check_for_nan(align_units, 'align units scores') # sanity check (1)
align_units = align_units.view(batch_size, -1, self.ent_size)
align_units = align_units.masked_fill(units_mask, float('-inf'))
_check_for_nan(align_units, 'align units scores filled with -inf') # sanity check (2)
# tricky block
# we softmax on the last dim, ie: separatly on each entity
# However, some entity might be full <pad>, meaning full -inf
# giving NaN when softmax is computed (dividing by zero)
# We find those nan and remove them
align_units = self.attn_func(align_units, -1) # softmax
nan_mask = (align_units != align_units).sum(dim=2).ne(0) # nan != nan
if nan_mask.sum().item():
align_units = align_units.masked_fill(nan_mask.unsqueeze(-1), 0)
_check_for_nan(align_units, 'align units after attn_func') # sanity check (3)
# we flatten the scores again
align_units = align_units.view(batch_size, 1, -1)
# Now the second level of attention, on the <ent> tokens
align_chunks.masked_fill_(chunk_mask, float('-inf'))
align_chunks = self.attn_func(align_chunks, -1)
# align_chunks = sparsemax(align_chunks, -1)
_check_for_nan(align_chunks, 'align_chunks after attn_func')
# To compute the final scores, we weight the unit scores by the chunk
# score from the chunk to witch they belong. We inflate the chunk scores
# and simply elementwise multiply.
# It's easy to see that it remains a proba distribution (ie, sums to 1)
align_chunks_inflated = align_chunks.repeat_interleave(repeats=self.ent_size, dim=-1)
align_vectors = align_chunks_inflated * align_units
#print(align_vectors.sum())
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, memory_bank)
# concatenate
concat_c = torch.cat([c, source], 2).view(batch_size*target_l, dim*2)
attn_h = self.linear_out(concat_c).view(batch_size, target_l, dim)
if self.attn_type in ["general", "dot"]:
attn_h = torch.tanh(attn_h)
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch_size, batch_)
aeq(dim, dim_)
batch_, source_l_ = align_vectors.size()
aeq(batch_size, batch_)
aeq(source_l, source_l_)
ret = {
'': align_vectors,
'_align_chunks': align_chunks.squeeze(1),
'_align_units':align_units.squeeze(1)
}
return attn_h, ret
| 10,134 | 36.537037 | 94 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/conv_multi_step_attention.py
|
""" Multi Step Attention for CNN """
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.utils.misc import aeq
SCALE_WEIGHT = 0.5 ** 0.5
def seq_linear(linear, x):
""" linear transform for 3-d tensor """
batch, hidden_size, length, _ = x.size()
h = linear(torch.transpose(x, 1, 2).contiguous().view(
batch * length, hidden_size))
return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2)
class ConvMultiStepAttention(nn.Module):
"""
Conv attention takes a key matrix, a value matrix and a query vector.
Attention weight is calculated by key matrix with the query vector
and sum on the value matrix. And the same operation is applied
in each decode conv layer.
"""
def __init__(self, input_size):
super(ConvMultiStepAttention, self).__init__()
self.linear_in = nn.Linear(input_size, input_size)
self.mask = None
def apply_mask(self, mask):
""" Apply mask """
self.mask = mask
def forward(self, base_target_emb, input_from_dec, encoder_out_top,
encoder_out_combine):
"""
Args:
base_target_emb: target emb tensor
input_from_dec: output of decode conv
encoder_out_top: the key matrix for calculation of attetion weight,
which is the top output of encode conv
encoder_out_combine:
the value matrix for the attention-weighted sum,
which is the combination of base emb and top output of encode
"""
# checks
# batch, channel, height, width = base_target_emb.size()
batch, _, height, _ = base_target_emb.size()
# batch_, channel_, height_, width_ = input_from_dec.size()
batch_, _, height_, _ = input_from_dec.size()
aeq(batch, batch_)
aeq(height, height_)
# enc_batch, enc_channel, enc_height = encoder_out_top.size()
enc_batch, _, enc_height = encoder_out_top.size()
# enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()
enc_batch_, _, enc_height_ = encoder_out_combine.size()
aeq(enc_batch, enc_batch_)
aeq(enc_height, enc_height_)
preatt = seq_linear(self.linear_in, input_from_dec)
target = (base_target_emb + preatt) * SCALE_WEIGHT
target = torch.squeeze(target, 3)
target = torch.transpose(target, 1, 2)
pre_attn = torch.bmm(target, encoder_out_top)
if self.mask is not None:
pre_attn.data.masked_fill_(self.mask, -float('inf'))
attn = F.softmax(pre_attn, dim=2)
context_output = torch.bmm(
attn, torch.transpose(encoder_out_combine, 1, 2))
context_output = torch.transpose(
torch.unsqueeze(context_output, 3), 1, 2)
return context_output, attn
| 2,865 | 34.382716 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/average_attn.py
|
# -*- coding: utf-8 -*-
"""Average Attention module."""
import torch
import torch.nn as nn
from onmt.modules.position_ffn import PositionwiseFeedForward
class AverageAttention(nn.Module):
"""
Average Attention module from
"Accelerating Neural Transformer via an Average Attention Network"
:cite:`DBLP:journals/corr/abs-1805-00631`.
Args:
model_dim (int): the dimension of keys/values/queries,
must be divisible by head_count
dropout (float): dropout parameter
"""
def __init__(self, model_dim, dropout=0.1, aan_useffn=False):
self.model_dim = model_dim
self.aan_useffn = aan_useffn
super(AverageAttention, self).__init__()
if aan_useffn:
self.average_layer = PositionwiseFeedForward(model_dim, model_dim,
dropout)
self.gating_layer = nn.Linear(model_dim * 2, model_dim * 2)
def cumulative_average_mask(self, batch_size, inputs_len, device):
"""
Builds the mask to compute the cumulative average as described in
:cite:`DBLP:journals/corr/abs-1805-00631` -- Figure 3
Args:
batch_size (int): batch size
inputs_len (int): length of the inputs
Returns:
(FloatTensor):
* A Tensor of shape ``(batch_size, input_len, input_len)``
"""
triangle = torch.tril(torch.ones(inputs_len, inputs_len,
dtype=torch.float, device=device))
weights = torch.ones(1, inputs_len, dtype=torch.float, device=device) \
/ torch.arange(1, inputs_len + 1, dtype=torch.float, device=device)
mask = triangle * weights.transpose(0, 1)
return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len)
def cumulative_average(self, inputs, mask_or_step,
layer_cache=None, step=None):
"""
Computes the cumulative average as described in
:cite:`DBLP:journals/corr/abs-1805-00631` -- Equations (1) (5) (6)
Args:
inputs (FloatTensor): sequence to average
``(batch_size, input_len, dimension)``
mask_or_step: if cache is set, this is assumed
to be the current step of the
dynamic decoding. Otherwise, it is the mask matrix
used to compute the cumulative average.
layer_cache: a dictionary containing the cumulative average
of the previous step.
Returns:
a tensor of the same shape and type as ``inputs``.
"""
if layer_cache is not None:
step = mask_or_step
average_attention = (inputs + step *
layer_cache["prev_g"]) / (step + 1)
layer_cache["prev_g"] = average_attention
return average_attention
else:
mask = mask_or_step
return torch.matmul(mask.to(inputs.dtype), inputs)
def forward(self, inputs, mask=None, layer_cache=None, step=None):
"""
Args:
inputs (FloatTensor): ``(batch_size, input_len, model_dim)``
Returns:
(FloatTensor, FloatTensor):
* gating_outputs ``(batch_size, input_len, model_dim)``
* average_outputs average attention
``(batch_size, input_len, model_dim)``
"""
batch_size = inputs.size(0)
inputs_len = inputs.size(1)
average_outputs = self.cumulative_average(
inputs, self.cumulative_average_mask(batch_size,
inputs_len, inputs.device)
if layer_cache is None else step, layer_cache=layer_cache)
if self.aan_useffn:
average_outputs = self.average_layer(average_outputs)
gating_outputs = self.gating_layer(torch.cat((inputs,
average_outputs), -1))
input_gate, forget_gate = torch.chunk(gating_outputs, 2, dim=2)
gating_outputs = torch.sigmoid(input_gate) * inputs + \
torch.sigmoid(forget_gate) * average_outputs
return gating_outputs, average_outputs
| 4,227 | 36.75 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/copy_generator.py
|
import torch
import torch.nn as nn
from onmt.utils.misc import aeq
from onmt.utils.loss import NMTLossCompute
def collapse_copy_scores(scores, batch, tgt_vocab, src_vocabs=None,
batch_dim=1, batch_offset=None):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambiguous.
"""
offset = len(tgt_vocab)
for b in range(scores.size(batch_dim)):
blank = []
fill = []
if src_vocabs is None:
src_vocab = batch.src_ex_vocab[b]
else:
batch_id = batch_offset[b] if batch_offset is not None else b
index = batch.indices.data[batch_id]
src_vocab = src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
blank.append(offset + i)
fill.append(ti)
if blank:
blank = torch.Tensor(blank).type_as(batch.indices.data)
fill = torch.Tensor(fill).type_as(batch.indices.data)
score = scores[:, b] if batch_dim == 1 else scores[b]
score.index_add_(1, fill, score.index_select(1, blank))
score.index_fill_(1, blank, 1e-10)
return scores
class CopyGenerator(nn.Module):
"""An implementation of pointer-generator networks
:cite:`DBLP:journals/corr/SeeLM17`.
These networks consider copying words
directly from the source sequence.
The copy generator is an extended version of the standard
generator that computes three values.
* :math:`p_{softmax}` the standard softmax over `tgt_dict`
* :math:`p(z)` the probability of copying a word from
the source
* :math:`p_{copy}` the probility of copying a particular word.
taken from the attention distribution directly.
The model returns a distribution over the extend dictionary,
computed as
:math:`p(w) = p(z=1) p_{copy}(w) + p(z=0) p_{softmax}(w)`
.. mermaid::
graph BT
A[input]
S[src_map]
B[softmax]
BB[switch]
C[attn]
D[copy]
O[output]
A --> B
A --> BB
S --> D
C --> D
D --> O
B --> O
BB --> O
Args:
input_size (int): size of input representation
output_size (int): size of output vocabulary
pad_idx (int)
"""
def __init__(self, input_size, output_size, pad_idx):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(input_size, output_size)
self.linear_copy = nn.Linear(input_size, 1)
self.pad_idx = pad_idx
def forward(self, hidden, attn, src_map):
"""
Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by copying
source words.
Args:
hidden (FloatTensor): hidden outputs ``(batch x tlen, input_size)``
attn (FloatTensor): attn for each ``(batch x tlen, input_size)``
src_map (FloatTensor):
A sparse indicator matrix mapping each source word to
its index in the "extended" vocab containing.
``(src_len, batch, extra_words)``
"""
# CHECKS
batch_by_tlen, _ = hidden.size()
batch_by_tlen_, slen = attn.size()
slen_, batch, cvocab = src_map.size()
aeq(batch_by_tlen, batch_by_tlen_)
aeq(slen, slen_)
# Original probabilities.
logits = self.linear(hidden)
logits[:, self.pad_idx] = -float('inf')
prob = torch.softmax(logits, 1)
# Probability of copying p(z=1) batch.
p_copy = torch.sigmoid(self.linear_copy(hidden))
# Probability of not copying: p_{word}(w) * (1 - p(z))
out_prob = torch.mul(prob, 1 - p_copy)
mul_attn = torch.mul(attn, p_copy)
copy_prob = torch.bmm(
mul_attn.view(-1, batch, slen).transpose(0, 1),
src_map.transpose(0, 1)
).transpose(0, 1)
copy_prob = copy_prob.contiguous().view(-1, cvocab)
return torch.cat([out_prob, copy_prob], 1)
class CopyGeneratorLoss(nn.Module):
"""Copy generator criterion."""
def __init__(self, vocab_size, force_copy, unk_index=0,
ignore_index=-100, eps=1e-20):
super(CopyGeneratorLoss, self).__init__()
self.force_copy = force_copy
self.eps = eps
self.vocab_size = vocab_size
self.ignore_index = ignore_index
self.unk_index = unk_index
def forward(self, scores, align, target):
"""
Args:
scores (FloatTensor): ``(batch_size*tgt_len)`` x dynamic vocab size
whose sum along dim 1 is less than or equal to 1, i.e. cols
softmaxed.
align (LongTensor): ``(batch_size x tgt_len)``
target (LongTensor): ``(batch_size x tgt_len)``
"""
# probabilities assigned by the model to the gold targets
vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)
# probability of tokens copied from source
copy_ix = align.unsqueeze(1) + self.vocab_size
copy_tok_probs = scores.gather(1, copy_ix).squeeze(1)
# Set scores for unk to 0 and add eps
copy_tok_probs[align == self.unk_index] = 0
copy_tok_probs += self.eps # to avoid -inf logs
# find the indices in which you do not use the copy mechanism
non_copy = align == self.unk_index
if not self.force_copy:
non_copy = non_copy | (target != self.unk_index)
probs = torch.where(
non_copy, copy_tok_probs + vocab_probs, copy_tok_probs
)
loss = -probs.log() # just NLLLoss; can the module be incorporated?
# Drop padding.
loss[target == self.ignore_index] = 0
return loss
class CopyGeneratorLossCompute(NMTLossCompute):
"""Copy Generator Loss Computation."""
def __init__(self, criterion, generator, tgt_vocab, normalize_by_length,
lambda_coverage=0.0):
super(CopyGeneratorLossCompute, self).__init__(
criterion, generator, lambda_coverage=lambda_coverage)
self.tgt_vocab = tgt_vocab
self.normalize_by_length = normalize_by_length
def _make_shard_state(self, batch, output, range_, attns):
"""See base class for args description."""
if getattr(batch, "alignment", None) is None:
raise AssertionError("using -copy_attn you need to pass in "
"-dynamic_dict during preprocess stage.")
shard_state = super(CopyGeneratorLossCompute, self)._make_shard_state(
batch, output, range_, attns)
shard_state.update({
"copy_attn": attns.get("copy"),
"align": batch.alignment[range_[0] + 1: range_[1]]
})
return shard_state
def _compute_loss(self, batch, output, target, copy_attn, align,
std_attn=None, coverage_attn=None):
"""Compute the loss.
The args must match :func:`self._make_shard_state()`.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info.
"""
target = target.view(-1)
align = align.view(-1)
scores = self.generator(
self._bottle(output), self._bottle(copy_attn), batch.src_map
)
loss = self.criterion(scores, align, target)
if self.lambda_coverage != 0.0:
coverage_loss = self._compute_coverage_loss(std_attn,
coverage_attn)
loss += coverage_loss
# this block does not depend on the loss value computed above
# and is used only for stats
scores_data = collapse_copy_scores(
self._unbottle(scores.clone(), batch.batch_size),
batch, self.tgt_vocab, None)
scores_data = self._bottle(scores_data)
# this block does not depend on the loss value computed above
# and is used only for stats
# Correct target copy token instead of <unk>
# tgt[i] = align[i] + len(tgt_vocab)
# for i such that tgt[i] == 0 and align[i] != 0
target_data = target.clone()
unk = self.criterion.unk_index
correct_mask = (target_data == unk) & (align != unk)
offset_align = align[correct_mask] + len(self.tgt_vocab)
target_data[correct_mask] += offset_align
# Compute sum of perplexities for stats
stats = self._stats(loss.sum().clone(), scores_data, target_data)
# this part looks like it belongs in CopyGeneratorLoss
if self.normalize_by_length:
# Compute Loss as NLL divided by seq length
tgt_lens = batch.tgt[:, :, 0].ne(self.padding_idx).sum(0).float()
# Compute Total Loss per sequence in batch
loss = loss.view(-1, batch.batch_size).sum(0)
# Divide by length of each sequence and sum
loss = torch.div(loss, tgt_lens).sum()
else:
loss = loss.sum()
return loss, stats
| 9,415 | 34.938931 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/self_attention.py
|
"""
Custom reimplementation of torch.nn.MultiHeadAttention
It's actually the same module, with more or less flewibility at times,
and a more flexible use of the mask (different mask per element of the batch)
"""
from torch._jit_internal import weak_module, weak_script_method
from torch.nn.init import constant_
from torch.nn.parameter import Parameter
from torch.nn.init import xavier_uniform_
from torch.nn import functional as F
from onmt.utils.misc import tile
from onmt.modules import GatedLinear
import torch
@weak_module
class MultiHeadSelfAttention(torch.nn.Module):
"""
if glu_depth is not zero, we use GatedLinear layers instead of regular layers.
"""
def __init__(self, embed_dim, num_heads, dropout=0., glu_depth=0, bias=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
msg = "embed_dim must be divisible by num_heads, got {} and {}"
assert self.head_dim * num_heads == self.embed_dim, msg.format(embed_dim, num_heads)
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = torch.nn.Linear(embed_dim, embed_dim, bias=bias)
# Gated Linear Unit
self._use_glu = isinstance(glu_depth, int) and glu_depth > 0
if self._use_glu:
if not self.head_dim % pow(2, glu_depth) == 0:
raise ValueError('When using GLU you need to use a head_dim that is '
'a multiple of two to the power glu_depth. '
f'Got {self.head_dim} % 2^{glu_depth} != 0')
glu_out_dim = self.head_dim // pow(2, glu_depth)
self.key_glu = GatedLinear(self.head_dim, glu_out_dim, glu_depth)
self.query_glu = GatedLinear(self.head_dim, glu_out_dim, glu_depth)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self.in_proj_weight[:self.embed_dim, :])
xavier_uniform_(self.in_proj_weight[self.embed_dim:(self.embed_dim * 2), :])
xavier_uniform_(self.in_proj_weight[(self.embed_dim * 2):, :])
xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
@weak_script_method
def forward(self, input, attn_mask=None):
"""
Inputs of forward function
input: [target length, batch size, embed dim]
attn_mask [(batch size), sequence_length, sequence_length]
Outputs of forward function
attn_output: [target length, batch size, embed dim]
attn_output_weights: [batch size, target length, sequence length]
"""
seq_len, bsz, embed_dim = input.size()
assert embed_dim == self.embed_dim
# self-attention
q, k, v = F.linear(input, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)
q *= self.scaling
# Cut q, k, v in num_heads part
q = q.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
# Gated Linear Unit
if self._use_glu:
q = self.query_glu(q)
k = self.key_glu(k)
# batch matrix multply query against key
# attn_output_weights is [bsz * num_heads, seq_len, seq_len]
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * self.num_heads, seq_len, seq_len]
if attn_mask is not None:
if attn_mask.dim() == 2:
# We use the same mask for each item in the batch
attn_mask = attn_mask.unsqueeze(0)
elif attn_mask.dim() == 3:
# Each item in the batch has its own mask.
# We need to inflate the mask to go with all heads
attn_mask = tile(attn_mask, count=self.num_heads, dim=0)
else:
# Don't known what we would be doing here...
raise RuntimeError(f'Wrong mask dim: {attn_mask.dim()}')
# The mask should be either 0 of -inf to go with softmax
attn_output_weights += attn_mask
attn_output_weights = F.softmax(
attn_output_weights.float(), dim=-1,
dtype=torch.float32 if attn_output_weights.dtype == torch.float16 else attn_output_weights.dtype)
attn_output_weights = F.dropout(attn_output_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * self.num_heads, seq_len, self.head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(seq_len, bsz, embed_dim)
attn_output = self.out_proj(attn_output)
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, self.num_heads, seq_len, seq_len)
attn_output_weights = attn_output_weights.sum(dim=1) / self.num_heads
return attn_output, attn_output_weights
| 5,556 | 43.103175 | 109 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/embeddings.py
|
""" Embeddings module """
import math
import warnings
import torch
import torch.nn as nn
from onmt.modules.util_class import Elementwise
class PositionalEncoding(nn.Module):
"""Sinusoidal positional encoding for non-recurrent neural networks.
Implementation based on "Attention Is All You Need"
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`
Args:
dropout (float): dropout parameter
dim (int): embedding size
"""
def __init__(self, dropout, dim, max_len=5000):
if dim % 2 != 0:
raise ValueError("Cannot use sin/cos positional encoding with "
"odd dim (got dim={:d})".format(dim))
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) *
-(math.log(10000.0) / dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(1)
super(PositionalEncoding, self).__init__()
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
self.dim = dim
def forward(self, emb, step=None):
"""Embed inputs.
Args:
emb (FloatTensor): Sequence of word vectors
``(seq_len, batch_size, self.dim)``
step (int or NoneType): If stepwise (``seq_len = 1``), use
the encoding for this position.
"""
emb = emb * math.sqrt(self.dim)
if step is None:
emb = emb + self.pe[:emb.size(0)]
else:
emb = emb + self.pe[step]
emb = self.dropout(emb)
return emb
class VecEmbedding(nn.Module):
def __init__(self, vec_size,
emb_dim,
position_encoding=False,
dropout=0):
super(VecEmbedding, self).__init__()
self.embedding_size = emb_dim
self.proj = nn.Linear(vec_size, emb_dim, bias=False)
self.word_padding_idx = 0 # vector seqs are zero-padded
self.position_encoding = position_encoding
if self.position_encoding:
self.pe = PositionalEncoding(dropout, self.embedding_size)
def forward(self, x, step=None):
"""
Args:
x (FloatTensor): input, ``(len, batch, 1, vec_feats)``.
Returns:
FloatTensor: embedded vecs ``(len, batch, embedding_size)``.
"""
x = self.proj(x).squeeze(2)
if self.position_encoding:
x = self.pe(x, step=step)
return x
def load_pretrained_vectors(self, file):
assert not file
class Embeddings(nn.Module):
"""Words embeddings for encoder/decoder.
Additionally includes ability to add sparse input features
based on "Linguistic Input Features Improve Neural Machine Translation"
:cite:`sennrich2016linguistic`.
.. mermaid::
graph LR
A[Input]
C[Feature 1 Lookup]
A-->B[Word Lookup]
A-->C
A-->D[Feature N Lookup]
B-->E[MLP/Concat]
C-->E
D-->E
E-->F[Output]
Args:
word_vec_size (int): size of the dictionary of embeddings.
word_padding_idx (int): padding index for words in the embeddings.
feat_padding_idx (List[int]): padding index for a list of features
in the embeddings.
word_vocab_size (int): size of dictionary of embeddings for words.
feat_vocab_sizes (List[int], optional): list of size of dictionary
of embeddings for each feature.
position_encoding (bool): see :class:`~onmt.modules.PositionalEncoding`
feat_merge (string): merge action for the features embeddings:
concat, sum or mlp.
feat_vec_exponent (float): when using `-feat_merge concat`, feature
embedding size is N^feat_dim_exponent, where N is the
number of values the feature takes.
feat_vec_size (int): embedding dimension for features when using
`-feat_merge mlp`
dropout (float): dropout probability.
"""
def __init__(self, word_vec_size,
word_vocab_size,
word_padding_idx,
position_encoding=False,
feat_merge="concat",
feat_vec_exponent=0.7,
feat_vec_size=-1,
feat_padding_idx=[],
feat_vocab_sizes=[],
dropout=0,
sparse=False,
fix_word_vecs=False):
self._validate_args(feat_merge, feat_vocab_sizes, feat_vec_exponent,
feat_vec_size, feat_padding_idx)
if feat_padding_idx is None:
feat_padding_idx = []
self.word_padding_idx = word_padding_idx
self.word_vec_size = word_vec_size
# Dimensions and padding for constructing the word embedding matrix
vocab_sizes = [word_vocab_size]
emb_dims = [word_vec_size]
pad_indices = [word_padding_idx]
# Dimensions and padding for feature embedding matrices
# (these have no effect if feat_vocab_sizes is empty)
if feat_merge == 'sum':
feat_dims = [word_vec_size] * len(feat_vocab_sizes)
elif feat_vec_size > 0:
feat_dims = [feat_vec_size] * len(feat_vocab_sizes)
else:
feat_dims = [int(vocab ** feat_vec_exponent)
for vocab in feat_vocab_sizes]
vocab_sizes.extend(feat_vocab_sizes)
emb_dims.extend(feat_dims)
pad_indices.extend(feat_padding_idx)
# The embedding matrix look-up tables. The first look-up table
# is for words. Subsequent ones are for features, if any exist.
emb_params = zip(vocab_sizes, emb_dims, pad_indices)
embeddings = [nn.Embedding(vocab, dim, padding_idx=pad, sparse=sparse)
for vocab, dim, pad in emb_params]
emb_luts = Elementwise(feat_merge, embeddings)
# The final output size of word + feature vectors. This can vary
# from the word vector size if and only if features are defined.
# This is the attribute you should access if you need to know
# how big your embeddings are going to be.
self.embedding_size = (sum(emb_dims) if feat_merge == 'concat'
else word_vec_size)
# The sequence of operations that converts the input sequence
# into a sequence of embeddings. At minimum this consists of
# looking up the embeddings for each word and feature in the
# input. Model parameters may require the sequence to contain
# additional operations as well.
super(Embeddings, self).__init__()
self.make_embedding = nn.Sequential()
self.make_embedding.add_module('emb_luts', emb_luts)
if feat_merge == 'mlp' and len(feat_vocab_sizes) > 0:
in_dim = sum(emb_dims)
mlp = nn.Sequential(nn.Linear(in_dim, word_vec_size), nn.ReLU())
self.make_embedding.add_module('mlp', mlp)
self.position_encoding = position_encoding
if self.position_encoding:
pe = PositionalEncoding(dropout, self.embedding_size)
self.make_embedding.add_module('pe', pe)
if fix_word_vecs:
self.word_lut.weight.requires_grad = False
def _validate_args(self, feat_merge, feat_vocab_sizes, feat_vec_exponent,
feat_vec_size, feat_padding_idx):
if feat_merge == "sum":
# features must use word_vec_size
if feat_vec_exponent != 0.7:
warnings.warn("Merging with sum, but got non-default "
"feat_vec_exponent. It will be unused.")
if feat_vec_size != -1:
warnings.warn("Merging with sum, but got non-default "
"feat_vec_size. It will be unused.")
elif feat_vec_size > 0:
# features will use feat_vec_size
if feat_vec_exponent != -1:
warnings.warn("Not merging with sum and positive "
"feat_vec_size, but got non-default "
"feat_vec_exponent. It will be unused.")
else:
if feat_vec_exponent <= 0:
raise ValueError("Using feat_vec_exponent to determine "
"feature vec size, but got feat_vec_exponent "
"less than or equal to 0.")
n_feats = len(feat_vocab_sizes)
if n_feats != len(feat_padding_idx):
raise ValueError("Got unequal number of feat_vocab_sizes and "
"feat_padding_idx ({:d} != {:d})".format(
n_feats, len(feat_padding_idx)))
@property
def word_lut(self):
"""Word look-up table."""
return self.make_embedding[0][0]
@property
def emb_luts(self):
"""Embedding look-up table."""
return self.make_embedding[0]
def load_pretrained_vectors(self, emb_file):
"""Load in pretrained embeddings.
Args:
emb_file (str) : path to torch serialized embeddings
"""
if emb_file:
pretrained = torch.load(emb_file)
pretrained_vec_size = pretrained.size(1)
if self.word_vec_size > pretrained_vec_size:
self.word_lut.weight.data[:, :pretrained_vec_size] = pretrained
elif self.word_vec_size < pretrained_vec_size:
self.word_lut.weight.data \
.copy_(pretrained[:, :self.word_vec_size])
else:
self.word_lut.weight.data.copy_(pretrained)
def forward(self, source, step=None):
"""Computes the embeddings for words and features.
Args:
source (LongTensor): index tensor ``(len, batch, nfeat)``
Returns:
FloatTensor: Word embeddings ``(len, batch, embedding_size)``
"""
if self.position_encoding:
for i, module in enumerate(self.make_embedding._modules.values()):
if i == len(self.make_embedding._modules.values()) - 1:
source = module(source, step=step)
else:
source = module(source)
else:
source = self.make_embedding(source)
return source
def update_dropout(self, dropout):
if self.position_encoding:
self._modules['make_embedding'][1].dropout.p = dropout
| 10,689 | 36.640845 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/global_attention.py
|
"""Global attention modules (Luong / Bahdanau)"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.modules.sparse_activations import sparsemax
from onmt.utils.misc import aeq, sequence_mask
# This class is mainly used by decoder.py for RNNs but also
# by the CNN / transformer decoder when copy attention is used
# CNN has its own attention mechanism ConvMultiStepAttention
# Transformer has its own MultiHeadedAttention
class GlobalAttention(nn.Module):
r"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`, to an output
of size `dim`.
.. mermaid::
graph BT
A[Query]
subgraph RNN
C[H 1]
D[H 2]
E[H N]
end
F[Attn]
G[Output]
A --> F
C --> F
D --> F
E --> F
C -.-> G
D -.-> G
E -.-> G
F --> G
All models compute the output as
:math:`c = \sum_{j=1}^{\text{SeqLength}} a_j H_j` where
:math:`a_j` is the softmax of a score function.
Then then apply a projection layer to [q, c].
However they
differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`\text{score}(H_j,q) = H_j^T q`
* general: :math:`\text{score}(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`\text{score}(H_j, q) = v_a^T \text{tanh}(W_a q + U_a h_j)`
Args:
dim (int): dimensionality of query and key
coverage (bool): use coverage term
attn_type (str): type of attention to use, options [dot,general,mlp]
attn_func (str): attention function to use, options [softmax,sparsemax]
"""
def __init__(self, dim, coverage=False, attn_type="dot",
attn_func="softmax"):
super(GlobalAttention, self).__init__()
self.dim = dim
assert attn_type in ["dot", "general", "mlp"], (
"Please select a valid attention type (got {:s}).".format(
attn_type))
self.attn_type = attn_type
assert attn_func in ["softmax", "sparsemax"], (
"Please select a valid attention function.")
self.attn_func = attn_func
if self.attn_type == "general":
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == "mlp":
self.linear_context = nn.Linear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = nn.Linear(dim, 1, bias=False)
# mlp wants it with bias
out_bias = self.attn_type == "mlp"
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
"""
Args:
h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)``
h_s (FloatTensor): sequence of sources ``(batch, src_len, dim``
Returns:
FloatTensor: raw attention scores (unnormalized) for each src index
``(batch, tgt_len, src_len)``
"""
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ["general", "dot"]:
if self.attn_type == "general":
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
# (batch, t_len, s_len, d)
wquh = torch.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, source, memory_bank, memory_lengths=None, coverage=None):
"""
Args:
source (FloatTensor): query vectors ``(batch, tgt_len, dim)``
memory_bank (FloatTensor): source vectors ``(batch, src_len, dim)``
memory_lengths (LongTensor): the source context lengths ``(batch,)``
coverage (FloatTensor): None (not supported yet)
Returns:
(FloatTensor, FloatTensor):
* Computed vector ``(tgt_len, batch, dim)``
* Attention distribtutions for each query
``(tgt_len, batch, src_len)``
"""
# one step input
if source.dim() == 2:
one_step = True
source = source.unsqueeze(1)
else:
one_step = False
batch, source_l, dim = memory_bank.size()
batch_, target_l, dim_ = source.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, source_l_ = coverage.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
memory_bank += self.linear_cover(cover).view_as(memory_bank)
memory_bank = torch.tanh(memory_bank)
# compute attention scores, as in Luong et al.
align = self.score(source, memory_bank)
if memory_lengths is not None:
mask = sequence_mask(memory_lengths, max_len=align.size(-1))
mask = mask.unsqueeze(1) # Make it broadcastable.
align.masked_fill_(~mask, -float('inf'))
# Softmax or sparsemax to normalize attention weights
if self.attn_func == "softmax":
align_vectors = F.softmax(align.view(batch*target_l, source_l), -1)
else:
align_vectors = sparsemax(align.view(batch*target_l, source_l), -1)
align_vectors = align_vectors.view(batch, target_l, source_l)
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, memory_bank)
# concatenate
concat_c = torch.cat([c, source], 2).view(batch*target_l, dim*2)
attn_h = self.linear_out(concat_c).view(batch, target_l, dim)
if self.attn_type in ["general", "dot"]:
attn_h = torch.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, source_l_ = align_vectors.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
# Check output sizes
target_l_, batch_, dim_ = attn_h.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(dim, dim_)
target_l_, batch_, source_l_ = align_vectors.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(source_l, source_l_)
return attn_h, align_vectors
| 7,827 | 33.333333 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/glu.py
|
"""Comes directly from fairseq"""
import torch, math
class Downsample(torch.nn.Module):
"""
Selects every nth element along the last dim, where n is the index
"""
def __init__(self, in_dim, step):
super().__init__()
self._step = step
self._in_dim = in_dim
if in_dim % step != 0:
raise ValueError('in_dim should be a multiple of step. '
f'Got {in_dim} and {step}.')
self.index = torch.LongTensor(range(0, in_dim, step))
def forward(self, input):
return input.index_select(dim=-1, index=self.index.to(input.device))
def extra_repr(self):
return f'{self._in_dim}, {self._in_dim//self._step}'
def Linear(in_features, out_features, dropout=0., bias=True):
"""Weight-normalized Linear layer (input: B x T x C)"""
m = torch.nn.Linear(in_features, out_features, bias=bias)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return torch.nn.utils.weight_norm(m)
class GatedLinear(torch.nn.Module):
def __init__(self, in_features, out_features, depth=2,
downsample=0, dropout=0., bias=True):
"""
Weight-normalized Linear layer (input: B x T x C) with interspersed GLU units.
GLU units split the input in half to use one as values and one as gates:
glu([a; b]) = a * sigmoid(b)
"""
super().__init__()
self._num_layers = depth
self._bias = bias
self._dropout = dropout
self._downsample = isinstance(downsample, int) and downsample > 0
self.glu = torch.nn.GLU(dim=-1)
# In order to halve the dims at each step and end on out_features
# we need to start with out_feature * 2^depth and decrease the power
# of 2 at each depth.
if self._downsample:
self.linear_in = torch.nn.Sequential(
Downsample(in_features, downsample),
Linear(in_features//downsample, out_features * pow(2, depth), dropout, bias)
)
else:
if in_features != out_features * pow(2, depth):
raise ValueError('When not using downsampling, in_features should be '
'equal to out_feature * 2^depth. '
f'Got {in_features} != {out_features} * 2^{depth}')
self.linear_layers = torch.nn.ModuleList([
Linear(out_features * pow(2, depth - k),
out_features * pow(2, depth - k),
dropout, bias)
for k in range(1, depth+1)
])
def forward(self, input):
output = self.linear_in(input) if self._downsample else input
for linear in self.linear_layers:
output = linear(self.glu(output))
return output
| 2,916 | 37.893333 | 92 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/__init__.py
|
""" Attention and normalization modules """
from onmt.modules.util_class import Elementwise
from onmt.modules.gate import context_gate_factory, ContextGate
from onmt.modules.global_attention import GlobalAttention
from onmt.modules.hierarchical_attention import HierarchicalAttention
from onmt.modules.conv_multi_step_attention import ConvMultiStepAttention
from onmt.modules.copy_generator import CopyGenerator, CopyGeneratorLoss, \
CopyGeneratorLossCompute
from onmt.modules.multi_headed_attn import MultiHeadedAttention
#from onmt.modules.self_attention import MultiHeadSelfAttention
from onmt.modules.embeddings import Embeddings, PositionalEncoding, \
VecEmbedding
from onmt.modules.table_embeddings import TableEmbeddings
from onmt.modules.weight_norm import WeightNormConv2d
from onmt.modules.average_attn import AverageAttention
from onmt.modules.glu import GatedLinear
__all__ = ["Elementwise", "context_gate_factory", "ContextGate",
"GlobalAttention", "ConvMultiStepAttention", "CopyGenerator",
"CopyGeneratorLoss", "CopyGeneratorLossCompute",
"MultiHeadedAttention", "Embeddings", "PositionalEncoding",
"WeightNormConv2d", "AverageAttention", "VecEmbedding",
"GatedLinear", "HierarchicalAttention", "TableEmbeddings"]
| 1,297 | 50.92 | 75 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/gate.py
|
""" ContextGate module """
import torch
import torch.nn as nn
def context_gate_factory(gate_type, embeddings_size, decoder_size,
attention_size, output_size):
"""Returns the correct ContextGate class"""
gate_types = {'source': SourceContextGate,
'target': TargetContextGate,
'both': BothContextGate}
assert gate_type in gate_types, "Not valid ContextGate type: {0}".format(
gate_type)
return gate_types[gate_type](embeddings_size, decoder_size, attention_size,
output_size)
class ContextGate(nn.Module):
"""
Context gate is a decoder module that takes as input the previous word
embedding, the current decoder state and the attention state, and
produces a gate.
The gate can be used to select the input from the target side context
(decoder state), from the source context (attention state) or both.
"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(ContextGate, self).__init__()
input_size = embeddings_size + decoder_size + attention_size
self.gate = nn.Linear(input_size, output_size, bias=True)
self.sig = nn.Sigmoid()
self.source_proj = nn.Linear(attention_size, output_size)
self.target_proj = nn.Linear(embeddings_size + decoder_size,
output_size)
def forward(self, prev_emb, dec_state, attn_state):
input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1)
z = self.sig(self.gate(input_tensor))
proj_source = self.source_proj(attn_state)
proj_target = self.target_proj(
torch.cat((prev_emb, dec_state), dim=1))
return z, proj_source, proj_target
class SourceContextGate(nn.Module):
"""Apply the context gate only to the source context"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(SourceContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(
prev_emb, dec_state, attn_state)
return self.tanh(target + z * source)
class TargetContextGate(nn.Module):
"""Apply the context gate only to the target context"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(TargetContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(prev_emb, dec_state, attn_state)
return self.tanh(z * target + source)
class BothContextGate(nn.Module):
"""Apply the context gate to both contexts"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(BothContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(prev_emb, dec_state, attn_state)
return self.tanh((1. - z) * target + z * source)
| 3,635 | 38.521739 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/weight_norm.py
|
""" Weights normalization modules """
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
def get_var_maybe_avg(namespace, var_name, training, polyak_decay):
""" utility for retrieving polyak averaged params
Update average
"""
v = getattr(namespace, var_name)
v_avg = getattr(namespace, var_name + '_avg')
v_avg -= (1 - polyak_decay) * (v_avg - v.data)
if training:
return v
else:
return v_avg
def get_vars_maybe_avg(namespace, var_names, training, polyak_decay):
""" utility for retrieving polyak averaged params """
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(
namespace, vn, training, polyak_decay))
return vars
class WeightNormLinear(nn.Linear):
"""
Implementation of "Weight Normalization: A Simple Reparameterization
to Accelerate Training of Deep Neural Networks"
:cite:`DBLP:journals/corr/SalimansK16`
As a reparameterization method, weight normalization is same
as BatchNormalization, but it doesn't depend on minibatch.
NOTE: This is used nowhere in the code at this stage
Vincent Nguyen 05/18/2018
"""
def __init__(self, in_features, out_features,
init_scale=1., polyak_decay=0.9995):
super(WeightNormLinear, self).__init__(
in_features, out_features, bias=True)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_features))
self.b = self.bias
self.register_buffer(
'V_avg', torch.zeros(out_features, in_features))
self.register_buffer('g_avg', torch.zeros(out_features))
self.register_buffer('b_avg', torch.zeros(out_features))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# out_features * in_features
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(
self.V.data) * 0.05)
# norm is out_features * 1
v_norm = self.V.data / \
self.V.data.norm(2, 1).expand_as(self.V.data)
# batch_size * out_features
x_init = F.linear(x, v_norm).data
# out_features
m_init, v_init = x_init.mean(0).squeeze(
0), x_init.var(0).squeeze(0)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
x_init = scale_init.view(1, -1).expand_as(x_init) \
* (x_init - m_init.view(1, -1).expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return x_init
else:
v, g, b = get_vars_maybe_avg(self, ['V', 'g', 'b'],
self.training,
polyak_decay=self.polyak_decay)
# batch_size * out_features
x = F.linear(x, v)
scalar = g / torch.norm(v, 2, 1).squeeze(1)
x = scalar.view(1, -1).expand_as(x) * x + \
b.view(1, -1).expand_as(x)
return x
class WeightNormConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, init_scale=1.,
polyak_decay=0.9995):
super(WeightNormConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride, padding,
dilation, groups)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_channels))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(self.V.size()))
self.register_buffer('g_avg', torch.zeros(out_channels))
self.register_buffer('b_avg', torch.zeros(out_channels))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# out_channels, in_channels // groups, * kernel_size
self.V.data.copy_(torch.randn(self.V.data.size()
).type_as(self.V.data) * 0.05)
v_norm = self.V.data / self.V.data.view(self.out_channels, -1)\
.norm(2, 1).view(self.out_channels, *(
[1] * (len(self.kernel_size) + 1))).expand_as(self.V.data)
x_init = F.conv2d(x, v_norm, None, self.stride,
self.padding, self.dilation, self.groups).data
t_x_init = x_init.transpose(0, 1).contiguous().view(
self.out_channels, -1)
m_init, v_init = t_x_init.mean(1).squeeze(
1), t_x_init.var(1).squeeze(1)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
m_init_shape = m_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
x_init = scale_init_shape.expand_as(
x_init) * (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return x_init
else:
v, g, b = get_vars_maybe_avg(
self, ['V', 'g', 'b'], self.training,
polyak_decay=self.polyak_decay)
scalar = torch.norm(v.view(self.out_channels, -1), 2, 1)
if len(scalar.size()) == 2:
scalar = g / scalar.squeeze(1)
else:
scalar = g / scalar
w = scalar.view(self.out_channels, *
([1] * (len(v.size()) - 1))).expand_as(v) * v
x = F.conv2d(x, w, b, self.stride,
self.padding, self.dilation, self.groups)
return x
# This is used nowhere in the code at the moment (Vincent Nguyen 05/18/2018)
class WeightNormConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, init_scale=1.,
polyak_decay=0.9995):
super(WeightNormConvTranspose2d, self).__init__(
in_channels, out_channels,
kernel_size, stride,
padding, output_padding,
groups)
# in_channels, out_channels, *kernel_size
self.V = self.weight
self.g = Parameter(torch.Tensor(out_channels))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(self.V.size()))
self.register_buffer('g_avg', torch.zeros(out_channels))
self.register_buffer('b_avg', torch.zeros(out_channels))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# in_channels, out_channels, *kernel_size
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(
self.V.data) * 0.05)
v_norm = self.V.data / self.V.data.transpose(0, 1).contiguous() \
.view(self.out_channels, -1).norm(2, 1).view(
self.in_channels, self.out_channels,
*([1] * len(self.kernel_size))).expand_as(self.V.data)
x_init = F.conv_transpose2d(
x, v_norm, None, self.stride,
self.padding, self.output_padding, self.groups).data
# self.out_channels, 1
t_x_init = x_init.tranpose(0, 1).contiguous().view(
self.out_channels, -1)
# out_features
m_init, v_init = t_x_init.mean(1).squeeze(
1), t_x_init.var(1).squeeze(1)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
m_init_shape = m_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
x_init = scale_init_shape.expand_as(x_init)\
* (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return x_init
else:
v, g, b = get_vars_maybe_avg(
self, ['V', 'g', 'b'], self.training,
polyak_decay=self.polyak_decay)
scalar = g / \
torch.norm(v.transpose(0, 1).contiguous().view(
self.out_channels, -1), 2, 1).squeeze(1)
w = scalar.view(self.in_channels, self.out_channels,
*([1] * (len(v.size()) - 2))).expand_as(v) * v
x = F.conv_transpose2d(x, w, b, self.stride,
self.padding, self.output_padding,
self.groups)
return x
| 9,775 | 38.578947 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/position_ffn.py
|
"""Position feed-forward network from "Attention is All You Need"."""
import torch.nn as nn
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.dropout_1 = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
"""Layer definition.
Args:
x: ``(batch_size, input_len, model_dim)``
Returns:
(FloatTensor): Output ``(batch_size, input_len, model_dim)``.
"""
inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
def update_dropout(self, dropout):
self.dropout_1.p = dropout
self.dropout_2.p = dropout
| 1,308 | 30.166667 | 73 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/multi_headed_attn.py
|
""" Multi-Head Attention module """
import math
import torch
import torch.nn as nn
from onmt.utils.misc import generate_relative_positions_matrix,\
relative_matmul
# from onmt.utils.misc import aeq
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention module from "Attention is All You Need"
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`.
Similar to standard `dot` attention but uses
multiple attention distributions simulataneously
to select relevant items.
.. mermaid::
graph BT
A[key]
B[value]
C[query]
O[output]
subgraph Attn
D[Attn 1]
E[Attn 2]
F[Attn N]
end
A --> D
C --> D
A --> E
C --> E
A --> F
C --> F
D --> O
E --> O
F --> O
B --> O
Also includes several additional tricks.
Args:
head_count (int): number of parallel heads
model_dim (int): the dimension of keys/values/queries,
must be divisible by head_count
dropout (float): dropout parameter
"""
def __init__(self, head_count, model_dim, dropout=0.1,
max_relative_positions=0):
assert model_dim % head_count == 0
self.dim_per_head = model_dim // head_count
self.model_dim = model_dim
super(MultiHeadedAttention, self).__init__()
self.head_count = head_count
self.linear_keys = nn.Linear(model_dim,
head_count * self.dim_per_head)
self.linear_values = nn.Linear(model_dim,
head_count * self.dim_per_head)
self.linear_query = nn.Linear(model_dim,
head_count * self.dim_per_head)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.final_linear = nn.Linear(model_dim, model_dim)
self.max_relative_positions = max_relative_positions
if max_relative_positions > 0:
vocab_size = max_relative_positions * 2 + 1
self.relative_positions_embeddings = nn.Embedding(
vocab_size, self.dim_per_head)
def forward(self, key, value, query, mask=None,
layer_cache=None, attn_type=None):
"""
Compute the context vector and the attention vectors.
Args:
key (FloatTensor): set of `key_len`
key vectors ``(batch, key_len, dim)``
value (FloatTensor): set of `key_len`
value vectors ``(batch, key_len, dim)``
query (FloatTensor): set of `query_len`
query vectors ``(batch, query_len, dim)``
mask: binary mask 1/0 indicating which keys have
zero / non-zero attention ``(batch, query_len, key_len)``
Returns:
(FloatTensor, FloatTensor):
* output context vectors ``(batch, query_len, dim)``
* Attention vector in heads ``(batch, head, query_len, key_len)``.
"""
# CHECKS
# batch, k_len, d = key.size()
# batch_, k_len_, d_ = value.size()
# aeq(batch, batch_)
# aeq(k_len, k_len_)
# aeq(d, d_)
# batch_, q_len, d_ = query.size()
# aeq(batch, batch_)
# aeq(d, d_)
# aeq(self.model_dim % 8, 0)
# if mask is not None:
# batch_, q_len_, k_len_ = mask.size()
# aeq(batch_, batch)
# aeq(k_len_, k_len)
# aeq(q_len_ == q_len)
# END CHECKS
batch_size = key.size(0)
dim_per_head = self.dim_per_head
head_count = self.head_count
key_len = key.size(1)
query_len = query.size(1)
def shape(x):
"""Projection."""
return x.view(batch_size, -1, head_count, dim_per_head) \
.transpose(1, 2)
def unshape(x):
"""Compute context."""
return x.transpose(1, 2).contiguous() \
.view(batch_size, -1, head_count * dim_per_head)
# 1) Project key, value, and query.
if layer_cache is not None:
if attn_type == "self":
query, key, value = self.linear_query(query),\
self.linear_keys(query),\
self.linear_values(query)
key = shape(key)
value = shape(value)
if layer_cache["self_keys"] is not None:
key = torch.cat(
(layer_cache["self_keys"], key),
dim=2)
if layer_cache["self_values"] is not None:
value = torch.cat(
(layer_cache["self_values"], value),
dim=2)
layer_cache["self_keys"] = key
layer_cache["self_values"] = value
elif attn_type == "context":
query = self.linear_query(query)
if layer_cache["memory_keys"] is None:
key, value = self.linear_keys(key),\
self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key, value = layer_cache["memory_keys"],\
layer_cache["memory_values"]
layer_cache["memory_keys"] = key
layer_cache["memory_values"] = value
else:
key = self.linear_keys(key)
value = self.linear_values(value)
query = self.linear_query(query)
key = shape(key)
value = shape(value)
if self.max_relative_positions > 0 and attn_type == "self":
key_len = key.size(2)
# 1 or key_len x key_len
relative_positions_matrix = generate_relative_positions_matrix(
key_len, self.max_relative_positions,
cache=True if layer_cache is not None else False)
# 1 or key_len x key_len x dim_per_head
relations_keys = self.relative_positions_embeddings(
relative_positions_matrix.to(key.device))
# 1 or key_len x key_len x dim_per_head
relations_values = self.relative_positions_embeddings(
relative_positions_matrix.to(key.device))
query = shape(query)
key_len = key.size(2)
query_len = query.size(2)
# 2) Calculate and scale scores.
query = query / math.sqrt(dim_per_head)
# batch x num_heads x query_len x key_len
query_key = torch.matmul(query, key.transpose(2, 3))
if self.max_relative_positions > 0 and attn_type == "self":
scores = query_key + relative_matmul(query, relations_keys, True)
else:
scores = query_key
scores = scores.float()
if mask is not None:
mask = mask.unsqueeze(1) # [B, 1, 1, T_values]
scores = scores.masked_fill(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
attn = self.softmax(scores).to(query.dtype)
drop_attn = self.dropout(attn)
context_original = torch.matmul(drop_attn, value)
if self.max_relative_positions > 0 and attn_type == "self":
context = unshape(context_original
+ relative_matmul(drop_attn,
relations_values,
False))
else:
context = unshape(context_original)
output = self.final_linear(context)
# CHECK
# batch_, q_len_, d_ = output.size()
# aeq(q_len, q_len_)
# aeq(batch, batch_)
# aeq(d, d_)
# Return multi-head attn
attns = attn \
.view(batch_size, head_count,
query_len, key_len)
return output, attns
def update_dropout(self, dropout):
self.dropout.p = dropout
| 8,133 | 34.212121 | 77 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/modules/table_embeddings.py
|
import torch
class TableEmbeddings(torch.nn.Module):
"""
Now that I think about it, we can do more efficiently than rewritting the
onmt module. I will in the future but for now this code works as is,
so I won't chance breaking it!
These embeddings follow the table structure: a table is an unordered set
of tuple (pos, value) where pos can be viewed as column name. As
such, TableEmbeddings' forward returns embeddings for pos and value.
Furthermore, the value embedding can be merged with the pos embedding.
Most argument names are not very fitting but stay the same
as onmt.modules.Embeddings
"""
def __init__(self,
word_vec_size, # dim of the value embeddings
word_vocab_size, # size of the value vocabulary
word_padding_idx, # idx of <pad>
feat_vec_size, # dim of the pos embeddings
feat_vec_exponent, # instead of feat_vec_size
feat_vocab_size, # size of the pos vocabulary
feat_padding_idx, # idx of <pad>
merge="concat", # decide to merge the pos and value
merge_activation='ReLU', # used if merge is mlp
dropout=0,
ent_idx=None):
super().__init__()
assert ent_idx is not None
self.ent_idx = ent_idx
self.word_padding_idx = word_padding_idx
self.word_vec_size = word_vec_size
if feat_vec_size < 0:
if not 0 < feat_vec_exponent <= 1:
raise ValueError('feat_vec_exponent should be between 0 and 1')
feat_vec_size = int(feat_vocab_size ** feat_vec_exponent)
self.value_embeddings = torch.nn.Embedding(word_vocab_size,
word_vec_size, padding_idx=word_padding_idx)
self.pos_embeddings = torch.nn.Embedding(feat_vocab_size,
feat_vec_size, padding_idx=feat_padding_idx)
self._merge = merge
if merge is None:
self.embedding_size = self.word_vec_size
elif merge == 'concat':
self.embedding_size = self.word_vec_size + self.feat_vec_size
elif merge == 'sum':
assert self.word_vec_size == self.feat_vec_size
self.embedding_size = self.word_vec_size
elif merge == 'mlp':
self.embedding_size = self.word_vec_size
val_dim = self.value_embeddings.embedding_dim
pos_dim = self.pos_embeddings.embedding_dim
in_dim = val_dim + pos_dim
self.merge = torch.nn.Linear(in_dim, val_dim)
if merge_activation is None:
self.activation = None
elif merge_activation == 'ReLU':
self.activation = torch.nn.ReLU()
elif merge_activation == 'Tanh':
self.activation = torch.nn.Tanh()
else:
raise ValueError(f'Unknown activation {merge_activation}')
else:
raise ValueError('merge should be one of [None|concat|sum|mlp]')
@property
def word_lut(self):
"""Word look-up table."""
return self.value_embeddings
def load_pretrained_vectors(self, emb_file):
"""
place holder for onmt compatibility
"""
if emb_file:
raise NotImplementedError
def forward(self, inputs):
# unpack the inputs as cell values and pos (column name)
values, pos = [item.squeeze(2) for item in inputs.split(1, dim=2)]
# embed them separatly and maybe merge them
values = self.value_embeddings(values)
pos = self.pos_embeddings(pos)
if self._merge is None:
return values, pos
if self._merge == 'sum':
values = values + pos
return values, pos
values = torch.cat((values, pos), 2)
if self._merge == 'concat':
return values, pos
if self._merge == 'mlp':
values = self.merge(values)
if self.activation:
values = self.activation(values)
return values, pos
| 4,278 | 37.54955 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/models/stacked_rnn.py
|
""" Implementation of ONMT RNN for Input Feeding Decoding """
import torch
import torch.nn as nn
class StackedLSTM(nn.Module):
"""
Our own implementation of stacked LSTM.
Needed for the decoder, because we do input feeding.
"""
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTM, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for _ in range(num_layers):
self.layers.append(nn.LSTMCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input_feed, hidden):
h_0, c_0 = hidden
h_1, c_1 = [], []
for i, layer in enumerate(self.layers):
h_1_i, c_1_i = layer(input_feed, (h_0[i], c_0[i]))
input_feed = h_1_i
if i + 1 != self.num_layers:
input_feed = self.dropout(input_feed)
h_1 += [h_1_i]
c_1 += [c_1_i]
h_1 = torch.stack(h_1)
c_1 = torch.stack(c_1)
return input_feed, (h_1, c_1)
class StackedGRU(nn.Module):
"""
Our own implementation of stacked GRU.
Needed for the decoder, because we do input feeding.
"""
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedGRU, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for _ in range(num_layers):
self.layers.append(nn.GRUCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input_feed, hidden):
h_1 = []
for i, layer in enumerate(self.layers):
h_1_i = layer(input_feed, hidden[0][i])
input_feed = h_1_i
if i + 1 != self.num_layers:
input_feed = self.dropout(input_feed)
h_1 += [h_1_i]
h_1 = torch.stack(h_1)
return input_feed, (h_1,)
| 1,994 | 29.227273 | 66 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/models/model.py
|
""" Onmt NMT Model base class definition """
import torch.nn as nn
class NMTModel(nn.Module):
"""
Core trainable object in OpenNMT. Implements a trainable interface
for a simple, generic encoder + decoder model.
Args:
encoder (onmt.encoders.EncoderBase): an encoder object
decoder (onmt.decoders.DecoderBase): a decoder object
"""
def __init__(self, encoder, decoder):
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, tgt, lengths, bptt=False, with_align=False):
"""Forward propagate a `src` and `tgt` pair for training.
Possible initialized with a beginning decoder state.
Args:
src (Tensor): A source sequence passed to encoder.
typically for inputs this will be a padded `LongTensor`
of size ``(len, batch, features)``. However, may be an
image or other generic input depending on encoder.
tgt (LongTensor): A target sequence passed to decoder.
Size ``(tgt_len, batch, features)``.
lengths(LongTensor): The src lengths, pre-padding ``(batch,)``.
bptt (Boolean): A flag indicating if truncated bptt is set.
If reset then init_state
with_align (Boolean): A flag indicating whether output alignment,
Only valid for transformer decoder.
Returns:
(FloatTensor, dict[str, FloatTensor]):
* decoder output ``(tgt_len, batch, hidden)``
* dictionary attention dists of ``(tgt_len, batch, src_len)``
"""
dec_in = tgt[:-1] # exclude last target from inputs
enc_state, memory_bank, lengths = self.encoder(src, lengths)
if bptt is False:
self.decoder.init_state(src, memory_bank, enc_state)
dec_out, attns = self.decoder(dec_in, memory_bank,
memory_lengths=lengths,
with_align=with_align)
return dec_out, attns
def update_dropout(self, dropout):
self.encoder.update_dropout(dropout)
self.decoder.update_dropout(dropout)
| 2,218 | 37.929825 | 77 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/models/__init__.py
|
"""Module defining models."""
from onmt.models.model_saver import build_model_saver, ModelSaver
from onmt.models.model import NMTModel
__all__ = ["build_model_saver", "ModelSaver", "NMTModel"]
| 194 | 31.5 | 65 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/models/model_saver.py
|
import os
import torch
from collections import deque
from onmt.utils.logging import logger
from copy import deepcopy
def build_model_saver(model_opt, opt, model, fields, optim):
model_saver = ModelSaver(opt.save_model,
model,
model_opt,
fields,
optim,
opt.keep_checkpoint)
return model_saver
class ModelSaverBase(object):
"""Base class for model saving operations
Inherited classes must implement private methods:
* `_save`
* `_rm_checkpoint
"""
def __init__(self, base_path, model, model_opt, fields, optim,
keep_checkpoint=-1):
self.base_path = base_path
self.model = model
self.model_opt = model_opt
self.fields = fields
self.optim = optim
self.last_saved_step = None
self.keep_checkpoint = keep_checkpoint
if keep_checkpoint > 0:
self.checkpoint_queue = deque([], maxlen=keep_checkpoint)
def save(self, step, moving_average=None):
"""Main entry point for model saver
It wraps the `_save` method with checks and apply `keep_checkpoint`
related logic
"""
if self.keep_checkpoint == 0 or step == self.last_saved_step:
return
save_model = self.model
if moving_average:
model_params_data = []
for avg, param in zip(moving_average, save_model.parameters()):
model_params_data.append(param.data)
param.data = avg.data
chkpt, chkpt_name = self._save(step, save_model)
self.last_saved_step = step
if moving_average:
for param_data, param in zip(model_params_data,
save_model.parameters()):
param.data = param_data
if self.keep_checkpoint > 0:
if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:
todel = self.checkpoint_queue.popleft()
self._rm_checkpoint(todel)
self.checkpoint_queue.append(chkpt_name)
def _save(self, step):
"""Save a resumable checkpoint.
Args:
step (int): step number
Returns:
(object, str):
* checkpoint: the saved object
* checkpoint_name: name (or path) of the saved checkpoint
"""
raise NotImplementedError()
def _rm_checkpoint(self, name):
"""Remove a checkpoint
Args:
name(str): name that indentifies the checkpoint
(it may be a filepath)
"""
raise NotImplementedError()
class ModelSaver(ModelSaverBase):
"""Simple model saver to filesystem"""
def _save(self, step, model):
model_state_dict = model.state_dict()
model_state_dict = {k: v for k, v in model_state_dict.items()
if 'generator' not in k}
generator_state_dict = model.generator.state_dict()
# NOTE: We need to trim the vocab to remove any unk tokens that
# were not originally here.
vocab = deepcopy(self.fields)
for side in ["src", "tgt"]:
keys_to_pop = []
if hasattr(vocab[side], "fields"):
unk_token = vocab[side].fields[0][1].vocab.itos[0]
for key, value in vocab[side].fields[0][1].vocab.stoi.items():
if value == 0 and key != unk_token:
keys_to_pop.append(key)
for key in keys_to_pop:
vocab[side].fields[0][1].vocab.stoi.pop(key, None)
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'vocab': vocab,
'opt': self.model_opt,
'optim': self.optim.state_dict(),
}
logger.info("Saving checkpoint %s_step_%d.pt" % (self.base_path, step))
checkpoint_path = '%s_step_%d.pt' % (self.base_path, step)
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _rm_checkpoint(self, name):
os.remove(name)
| 4,230 | 30.340741 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/models/sru.py
|
""" SRU Implementation """
# flake8: noqa
import subprocess
import platform
import os
import re
import configargparse
import torch
import torch.nn as nn
from torch.autograd import Function
from collections import namedtuple
# For command-line option parsing
class CheckSRU(configargparse.Action):
def __init__(self, option_strings, dest, **kwargs):
super(CheckSRU, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if values == 'SRU':
check_sru_requirement(abort=True)
# Check pass, set the args.
setattr(namespace, self.dest, values)
# This SRU version implements its own cuda-level optimization,
# so it requires that:
# 1. `cupy` and `pynvrtc` python package installed.
# 2. pytorch is built with cuda support.
# 3. library path set: export LD_LIBRARY_PATH=<cuda lib path>.
def check_sru_requirement(abort=False):
"""
Return True if check pass; if check fails and abort is True,
raise an Exception, othereise return False.
"""
# Check 1.
try:
if platform.system() == 'Windows':
subprocess.check_output('pip freeze | findstr cupy', shell=True)
subprocess.check_output('pip freeze | findstr pynvrtc',
shell=True)
else: # Unix-like systems
subprocess.check_output('pip freeze | grep -w cupy', shell=True)
subprocess.check_output('pip freeze | grep -w pynvrtc',
shell=True)
except subprocess.CalledProcessError:
if not abort:
return False
raise AssertionError("Using SRU requires 'cupy' and 'pynvrtc' "
"python packages installed.")
# Check 2.
if torch.cuda.is_available() is False:
if not abort:
return False
raise AssertionError("Using SRU requires pytorch built with cuda.")
# Check 3.
pattern = re.compile(".*cuda/lib.*")
ld_path = os.getenv('LD_LIBRARY_PATH', "")
if re.match(pattern, ld_path) is None:
if not abort:
return False
raise AssertionError("Using SRU requires setting cuda lib path, e.g. "
"export LD_LIBRARY_PATH=/usr/local/cuda/lib64.")
return True
SRU_CODE = """
extern "C" {
__forceinline__ __device__ float sigmoidf(float x)
{
return 1.f / (1.f + expf(-x));
}
__forceinline__ __device__ float reluf(float x)
{
return (x > 0.f) ? x : 0.f;
}
__global__ void sru_fwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const int len, const int batch,
const int d, const int k,
float * __restrict__ h,
float * __restrict__ c,
const int activation_type)
{
assert ((k == 3) || (x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
for (int row = 0; row < len; ++row)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u;
xp += ncols_x;
cp += ncols;
hp += ncols;
}
}
__global__ void sru_bwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const float * __restrict__ c,
const float * __restrict__ grad_h,
const float * __restrict__ grad_last,
const int len,
const int batch, const int d, const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_x,
float * __restrict__ grad_bias,
float * __restrict__ grad_init,
int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const float *up = u + (col*k) + (len-1)*ncols_u;
const float *xp = (k == 3) ? (x + col + (len-1)*ncols) : (up + 3);
const float *cp = c + col + (len-1)*ncols;
const float *ghp = grad_h + col + (len-1)*ncols;
float *gup = grad_u + (col*k) + (len-1)*ncols_u;
float *gxp = (k == 3) ? (grad_x + col + (len-1)*ncols) : (gup + 3);
for (int row = len-1; row >= 0; --row)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (row>0) ? (*(cp-ncols)) : (*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u;
xp -= ncols_x;
cp -= ncols;
gup -= ncols_u;
gxp -= ncols_x;
ghp -= ncols;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
__global__ void sru_bi_fwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const int len, const int batch,
const int d, const int k,
float * __restrict__ h,
float * __restrict__ c,
const int activation_type)
{
assert ((k == 3) || (x == NULL));
assert ((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const int d2 = d*2;
const bool flip = (col%d2) >= d;
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
if (flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
hp += (len-1)*ncols;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u_;
xp += ncols_x_;
cp += ncols_;
hp += ncols_;
}
}
__global__ void sru_bi_bwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const float * __restrict__ c,
const float * __restrict__ grad_h,
const float * __restrict__ grad_last,
const int len, const int batch,
const int d, const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_x,
float * __restrict__ grad_bias,
float * __restrict__ grad_init,
int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
assert((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const int d2 = d*2;
const bool flip = ((col%d2) >= d);
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
const float *cp = c + col;
const float *ghp = grad_h + col;
float *gup = grad_u + (col*k);
float *gxp = (k == 3) ? (grad_x + col) : (gup + 3);
if (!flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
ghp += (len-1)*ncols;
gup += (len-1)*ncols_u;
gxp += (len-1)*ncols_x;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (cnt<len-1)?(*(cp-ncols_)):(*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u_;
xp -= ncols_x_;
cp -= ncols_;
gup -= ncols_u_;
gxp -= ncols_x_;
ghp -= ncols_;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
}
"""
SRU_FWD_FUNC, SRU_BWD_FUNC = None, None
SRU_BiFWD_FUNC, SRU_BiBWD_FUNC = None, None
SRU_STREAM = None
def load_sru_mod():
global SRU_FWD_FUNC, SRU_BWD_FUNC, SRU_BiFWD_FUNC, SRU_BiBWD_FUNC
global SRU_STREAM
if check_sru_requirement():
from cupy.cuda import function
from pynvrtc.compiler import Program
# This sets up device to use.
device = torch.device("cuda")
tmp_ = torch.rand(1, 1).to(device)
sru_prog = Program(SRU_CODE.encode('utf-8'),
'sru_prog.cu'.encode('utf-8'))
sru_ptx = sru_prog.compile()
sru_mod = function.Module()
sru_mod.load(bytes(sru_ptx.encode()))
SRU_FWD_FUNC = sru_mod.get_function('sru_fwd')
SRU_BWD_FUNC = sru_mod.get_function('sru_bwd')
SRU_BiFWD_FUNC = sru_mod.get_function('sru_bi_fwd')
SRU_BiBWD_FUNC = sru_mod.get_function('sru_bi_bwd')
stream = namedtuple('Stream', ['ptr'])
SRU_STREAM = stream(ptr=torch.cuda.current_stream().cuda_stream)
class SRU_Compute(Function):
def __init__(self, activation_type, d_out, bidirectional=False):
SRU_Compute.maybe_load_sru_mod()
super(SRU_Compute, self).__init__()
self.activation_type = activation_type
self.d_out = d_out
self.bidirectional = bidirectional
@staticmethod
def maybe_load_sru_mod():
global SRU_FWD_FUNC
if SRU_FWD_FUNC is None:
load_sru_mod()
def forward(self, u, x, bias, init=None, mask_h=None):
bidir = 2 if self.bidirectional else 1
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = self.d_out
k = u.size(-1) // d
k_ = k // 2 if self.bidirectional else k
ncols = batch * d * bidir
thread_per_block = min(512, ncols)
num_block = (ncols - 1) // thread_per_block + 1
init_ = x.new(ncols).zero_() if init is None else init
size = (length, batch, d * bidir) if x.dim() == 3 else (batch, d * bidir)
c = x.new(*size)
h = x.new(*size)
FUNC = SRU_FWD_FUNC if not self.bidirectional else SRU_BiFWD_FUNC
FUNC(args=[
u.contiguous().data_ptr(),
x.contiguous().data_ptr() if k_ == 3 else 0,
bias.data_ptr(),
init_.contiguous().data_ptr(),
mask_h.data_ptr() if mask_h is not None else 0,
length,
batch,
d,
k_,
h.data_ptr(),
c.data_ptr(),
self.activation_type],
block=(thread_per_block, 1, 1), grid=(num_block, 1, 1),
stream=SRU_STREAM
)
self.save_for_backward(u, x, bias, init, mask_h)
self.intermediate = c
if x.dim() == 2:
last_hidden = c
elif self.bidirectional:
# -> directions x batch x dim
last_hidden = torch.stack((c[-1, :, :d], c[0, :, d:]))
else:
last_hidden = c[-1]
return h, last_hidden
def backward(self, grad_h, grad_last):
if self.bidirectional:
grad_last = torch.cat((grad_last[0], grad_last[1]), 1)
bidir = 2 if self.bidirectional else 1
u, x, bias, init, mask_h = self.saved_tensors
c = self.intermediate
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = self.d_out
k = u.size(-1) // d
k_ = k // 2 if self.bidirectional else k
ncols = batch * d * bidir
thread_per_block = min(512, ncols)
num_block = (ncols - 1) // thread_per_block + 1
init_ = x.new(ncols).zero_() if init is None else init
grad_u = u.new(*u.size())
grad_bias = x.new(2, batch, d * bidir)
grad_init = x.new(batch, d * bidir)
# For DEBUG
# size = (length, batch, x.size(-1)) \
# if x.dim() == 3 else (batch, x.size(-1))
# grad_x = x.new(*x.size()) if k_ == 3 else x.new(*size).zero_()
# Normal use
grad_x = x.new(*x.size()) if k_ == 3 else None
FUNC = SRU_BWD_FUNC if not self.bidirectional else SRU_BiBWD_FUNC
FUNC(args=[
u.contiguous().data_ptr(),
x.contiguous().data_ptr() if k_ == 3 else 0,
bias.data_ptr(),
init_.contiguous().data_ptr(),
mask_h.data_ptr() if mask_h is not None else 0,
c.data_ptr(),
grad_h.contiguous().data_ptr(),
grad_last.contiguous().data_ptr(),
length,
batch,
d,
k_,
grad_u.data_ptr(),
grad_x.data_ptr() if k_ == 3 else 0,
grad_bias.data_ptr(),
grad_init.data_ptr(),
self.activation_type],
block=(thread_per_block, 1, 1), grid=(num_block, 1, 1),
stream=SRU_STREAM
)
return grad_u, grad_x, grad_bias.sum(1).view(-1), grad_init, None
class SRUCell(nn.Module):
def __init__(self, n_in, n_out, dropout=0, rnn_dropout=0,
bidirectional=False, use_tanh=1, use_relu=0):
super(SRUCell, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.rnn_dropout = rnn_dropout
self.dropout = dropout
self.bidirectional = bidirectional
self.activation_type = 2 if use_relu else (1 if use_tanh else 0)
out_size = n_out * 2 if bidirectional else n_out
k = 4 if n_in != out_size else 3
self.size_per_dir = n_out * k
self.weight = nn.Parameter(torch.Tensor(
n_in,
self.size_per_dir * 2 if bidirectional else self.size_per_dir
))
self.bias = nn.Parameter(torch.Tensor(
n_out * 4 if bidirectional else n_out * 2
))
self.init_weight()
def init_weight(self):
val_range = (3.0 / self.n_in)**0.5
self.weight.data.uniform_(-val_range, val_range)
self.bias.data.zero_()
def set_bias(self, bias_val=0):
n_out = self.n_out
if self.bidirectional:
self.bias.data[n_out * 2:].zero_().add_(bias_val)
else:
self.bias.data[n_out:].zero_().add_(bias_val)
def forward(self, input, c0=None):
assert input.dim() == 2 or input.dim() == 3
n_in, n_out = self.n_in, self.n_out
batch = input.size(-2)
if c0 is None:
c0 = input.data.new(
batch, n_out if not self.bidirectional else n_out * 2
).zero_()
if self.training and (self.rnn_dropout > 0):
mask = self.get_dropout_mask_((batch, n_in), self.rnn_dropout)
x = input * mask.expand_as(input)
else:
x = input
x_2d = x if x.dim() == 2 else x.contiguous().view(-1, n_in)
u = x_2d.mm(self.weight)
if self.training and (self.dropout > 0):
bidir = 2 if self.bidirectional else 1
mask_h = self.get_dropout_mask_(
(batch, n_out * bidir), self.dropout)
h, c = SRU_Compute(self.activation_type, n_out,
self.bidirectional)(
u, input, self.bias, c0, mask_h
)
else:
h, c = SRU_Compute(self.activation_type, n_out,
self.bidirectional)(
u, input, self.bias, c0
)
return h, c
def get_dropout_mask_(self, size, p):
w = self.weight.data
return w.new(*size).bernoulli_(1 - p).div_(1 - p)
class SRU(nn.Module):
"""
Implementation of "Training RNNs as Fast as CNNs"
:cite:`DBLP:journals/corr/abs-1709-02755`
TODO: turn to pytorch's implementation when it is available.
This implementation is adpoted from the author of the paper:
https://github.com/taolei87/sru/blob/master/cuda_functional.py.
Args:
input_size (int): input to model
hidden_size (int): hidden dimension
num_layers (int): number of layers
dropout (float): dropout to use (stacked)
rnn_dropout (float): dropout to use (recurrent)
bidirectional (bool): bidirectional
use_tanh (bool): activation
use_relu (bool): activation
"""
def __init__(self, input_size, hidden_size,
num_layers=2, dropout=0, rnn_dropout=0,
bidirectional=False, use_tanh=1, use_relu=0):
# An entry check here, will catch on train side and translate side
# if requirements are not satisfied.
check_sru_requirement(abort=True)
super(SRU, self).__init__()
self.n_in = input_size
self.n_out = hidden_size
self.depth = num_layers
self.dropout = dropout
self.rnn_dropout = rnn_dropout
self.rnn_lst = nn.ModuleList()
self.bidirectional = bidirectional
self.out_size = hidden_size * 2 if bidirectional else hidden_size
for i in range(num_layers):
sru_cell = SRUCell(
n_in=self.n_in if i == 0 else self.out_size,
n_out=self.n_out,
dropout=dropout if i + 1 != num_layers else 0,
rnn_dropout=rnn_dropout,
bidirectional=bidirectional,
use_tanh=use_tanh,
use_relu=use_relu,
)
self.rnn_lst.append(sru_cell)
def set_bias(self, bias_val=0):
for l in self.rnn_lst:
l.set_bias(bias_val)
def forward(self, input, c0=None, return_hidden=True):
assert input.dim() == 3 # (len, batch, n_in)
dir_ = 2 if self.bidirectional else 1
if c0 is None:
zeros = input.data.new(
input.size(1), self.n_out * dir_
).zero_()
c0 = [zeros for i in range(self.depth)]
else:
if isinstance(c0, tuple):
# RNNDecoderState wraps hidden as a tuple.
c0 = c0[0]
assert c0.dim() == 3 # (depth, batch, dir_*n_out)
c0 = [h.squeeze(0) for h in c0.chunk(self.depth, 0)]
prevx = input
lstc = []
for i, rnn in enumerate(self.rnn_lst):
h, c = rnn(prevx, c0[i])
prevx = h
lstc.append(c)
if self.bidirectional:
# fh -> (layers*directions) x batch x dim
fh = torch.cat(lstc)
else:
fh = torch.stack(lstc)
if return_hidden:
return prevx, fh
else:
return prevx
| 24,302 | 36.27454 | 81 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/bin/average_models.py
|
#!/usr/bin/env python
import argparse
import torch
def average_models(model_files, fp32=False):
vocab = None
opt = None
avg_model = None
avg_generator = None
for i, model_file in enumerate(model_files):
m = torch.load(model_file, map_location='cpu')
model_weights = m['model']
generator_weights = m['generator']
if fp32:
for k, v in model_weights.items():
model_weights[k] = v.float()
for k, v in generator_weights.items():
generator_weights[k] = v.float()
if i == 0:
vocab, opt = m['vocab'], m['opt']
avg_model = model_weights
avg_generator = generator_weights
else:
for (k, v) in avg_model.items():
avg_model[k].mul_(i).add_(model_weights[k]).div_(i + 1)
for (k, v) in avg_generator.items():
avg_generator[k].mul_(i).add_(generator_weights[k]).div_(i + 1)
final = {"vocab": vocab, "opt": opt, "optim": None,
"generator": avg_generator, "model": avg_model}
return final
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("-models", "-m", nargs="+", required=True,
help="List of models")
parser.add_argument("-output", "-o", required=True,
help="Output file")
parser.add_argument("-fp32", "-f", action="store_true",
help="Cast params to float32")
opt = parser.parse_args()
final = average_models(opt.models, opt.fp32)
torch.save(final, opt.output)
if __name__ == "__main__":
main()
| 1,665 | 29.290909 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/bin/server.py
|
#!/usr/bin/env python
import configargparse
from flask import Flask, jsonify, request
from onmt.translate import TranslationServer, ServerModelError
STATUS_OK = "ok"
STATUS_ERROR = "error"
def start(config_file,
url_root="./translator",
host="0.0.0.0",
port=5000,
debug=True):
def prefix_route(route_function, prefix='', mask='{0}{1}'):
def newroute(route, *args, **kwargs):
return route_function(mask.format(prefix, route), *args, **kwargs)
return newroute
app = Flask(__name__)
app.route = prefix_route(app.route, url_root)
translation_server = TranslationServer()
translation_server.start(config_file)
@app.route('/models', methods=['GET'])
def get_models():
out = translation_server.list_models()
return jsonify(out)
@app.route('/health', methods=['GET'])
def health():
out = {}
out['status'] = STATUS_OK
return jsonify(out)
@app.route('/clone_model/<int:model_id>', methods=['POST'])
def clone_model(model_id):
out = {}
data = request.get_json(force=True)
timeout = -1
if 'timeout' in data:
timeout = data['timeout']
del data['timeout']
opt = data.get('opt', None)
try:
model_id, load_time = translation_server.clone_model(
model_id, opt, timeout)
except ServerModelError as e:
out['status'] = STATUS_ERROR
out['error'] = str(e)
else:
out['status'] = STATUS_OK
out['model_id'] = model_id
out['load_time'] = load_time
return jsonify(out)
@app.route('/unload_model/<int:model_id>', methods=['GET'])
def unload_model(model_id):
out = {"model_id": model_id}
try:
translation_server.unload_model(model_id)
out['status'] = STATUS_OK
except Exception as e:
out['status'] = STATUS_ERROR
out['error'] = str(e)
return jsonify(out)
@app.route('/translate', methods=['POST'])
def translate():
inputs = request.get_json(force=True)
out = {}
try:
trans, scores, n_best, _, aligns = translation_server.run(inputs)
assert len(trans) == len(inputs) * n_best
assert len(scores) == len(inputs) * n_best
assert len(aligns) == len(inputs) * n_best
out = [[] for _ in range(n_best)]
for i in range(len(trans)):
response = {"src": inputs[i // n_best]['src'], "tgt": trans[i],
"n_best": n_best, "pred_score": scores[i]}
if aligns[i] is not None:
response["align"] = aligns[i]
out[i % n_best].append(response)
except ServerModelError as e:
out['error'] = str(e)
out['status'] = STATUS_ERROR
return jsonify(out)
@app.route('/to_cpu/<int:model_id>', methods=['GET'])
def to_cpu(model_id):
out = {'model_id': model_id}
translation_server.models[model_id].to_cpu()
out['status'] = STATUS_OK
return jsonify(out)
@app.route('/to_gpu/<int:model_id>', methods=['GET'])
def to_gpu(model_id):
out = {'model_id': model_id}
translation_server.models[model_id].to_gpu()
out['status'] = STATUS_OK
return jsonify(out)
app.run(debug=debug, host=host, port=port, use_reloader=False,
threaded=True)
def _get_parser():
parser = configargparse.ArgumentParser(
config_file_parser_class=configargparse.YAMLConfigFileParser,
description="OpenNMT-py REST Server")
parser.add_argument("--ip", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int, default="5000")
parser.add_argument("--url_root", type=str, default="/translator")
parser.add_argument("--debug", "-d", action="store_true")
parser.add_argument("--config", "-c", type=str,
default="./available_models/conf.json")
return parser
def main():
parser = _get_parser()
args = parser.parse_args()
start(args.config, url_root=args.url_root, host=args.ip, port=args.port,
debug=args.debug)
if __name__ == "__main__":
main()
| 4,328 | 30.369565 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/bin/translate.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from onmt.utils.logging import init_logger
from onmt.utils.misc import split_corpus
from onmt.translate.translator import build_translator
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
def translate(opt):
ArgumentParser.validate_translate_opts(opt)
logger = init_logger(opt.log_file)
translator = build_translator(opt, report_score=True)
src_shards = split_corpus(opt.src, opt.shard_size)
tgt_shards = split_corpus(opt.tgt, opt.shard_size)
shard_pairs = zip(src_shards, tgt_shards)
for i, (src_shard, tgt_shard) in enumerate(shard_pairs):
logger.info("Translating shard %d." % i)
translator.translate(
src=src_shard,
tgt=tgt_shard,
src_dir=opt.src_dir,
batch_size=opt.batch_size,
batch_type=opt.batch_type,
attn_debug=opt.attn_debug,
align_debug=opt.align_debug
)
def _get_parser():
parser = ArgumentParser(description='translate.py')
opts.config_opts(parser)
opts.translate_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
translate(opt)
if __name__ == "__main__":
main()
| 1,308 | 23.698113 | 60 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/bin/__init__.py
| 0 | 0 | 0 |
py
|
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/bin/train.py
|
#!/usr/bin/env python
"""Train models."""
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.misc import set_random_seed
from onmt.utils.logging import init_logger, logger
from onmt.train_single import main as single_main
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import build_dataset_iter, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple
from itertools import cycle
def train(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
set_random_seed(opt.seed, False)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
vocab = checkpoint['vocab']
else:
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None:
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
nb_gpu = len(opt.gpu_ranks)
if opt.world_size > 1:
queues = []
mp = torch.multiprocessing.get_context('spawn')
semaphore = mp.Semaphore(opt.world_size * opt.queue_size)
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
q = mp.Queue(opt.queue_size)
queues += [q]
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, q, semaphore), daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
producer = mp.Process(target=batch_producer,
args=(train_iter, queues, semaphore, opt,),
daemon=True)
producer.start()
error_handler.add_child(producer.pid)
for p in procs:
p.join()
producer.terminate()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
def batch_producer(generator_to_serve, queues, semaphore, opt):
init_logger(opt.log_file)
set_random_seed(opt.seed, False)
# generator_to_serve = iter(generator_to_serve)
def pred(x):
"""
Filters batches that belong only
to gpu_ranks of current node
"""
for rank in opt.gpu_ranks:
if x[0] % opt.world_size == rank:
return True
generator_to_serve = filter(
pred, enumerate(generator_to_serve))
def next_batch(device_id):
new_batch = next(generator_to_serve)
semaphore.acquire()
return new_batch[1]
b = next_batch(0)
for device_id, q in cycle(enumerate(queues)):
b.dataset = None
if isinstance(b.src, tuple):
b.src = tuple([_.to(torch.device(device_id))
for _ in b.src])
else:
b.src = b.src.to(torch.device(device_id))
b.tgt = b.tgt.to(torch.device(device_id))
b.indices = b.indices.to(torch.device(device_id))
b.alignment = b.alignment.to(torch.device(device_id)) \
if hasattr(b, 'alignment') else None
b.src_map = b.src_map.to(torch.device(device_id)) \
if hasattr(b, 'src_map') else None
b.align = b.align.to(torch.device(device_id)) \
if hasattr(b, 'align') else None
# hack to dodge unpicklable `dict_keys`
b.fields = list(b.fields)
q.put(b)
b = next_batch(device_id)
def run(opt, device_id, error_queue, batch_queue, semaphore):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id, batch_queue, semaphore)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def _get_parser():
parser = ArgumentParser(description='train.py')
opts.config_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
train(opt)
if __name__ == "__main__":
main()
| 6,849 | 31.77512 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/bin/preprocess.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pre-process Data / features files and build vocabulary
"""
import codecs
import glob
import gc
import torch
from collections import Counter, defaultdict
from onmt.utils.logging import init_logger, logger
from onmt.utils.misc import split_corpus
import onmt.inputters as inputters
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import _build_fields_vocab,\
_load_vocab
from functools import partial
from multiprocessing import Pool
def check_existing_pt_files(opt, corpus_type, ids, existing_fields):
""" Check if there are existing .pt files to avoid overwriting them """
existing_shards = []
for maybe_id in ids:
if maybe_id:
shard_base = corpus_type + "_" + maybe_id
else:
shard_base = corpus_type
pattern = opt.save_data + '.{}.*.pt'.format(shard_base)
if glob.glob(pattern):
if opt.overwrite:
maybe_overwrite = ("will be overwritten because "
"`-overwrite` option is set.")
else:
maybe_overwrite = ("won't be overwritten, pass the "
"`-overwrite` option if you want to.")
logger.warning("Shards for corpus {} already exist, {}"
.format(shard_base, maybe_overwrite))
existing_shards += [maybe_id]
return existing_shards
def process_one_shard(corpus_params, params):
corpus_type, fields, src_reader, tgt_reader, align_reader, opt,\
existing_fields, src_vocab, tgt_vocab = corpus_params
i, (src_shard, tgt_shard, align_shard, maybe_id, filter_pred) = params
# create one counter per shard
sub_sub_counter = defaultdict(Counter)
assert len(src_shard) == len(tgt_shard)
logger.info("Building shard %d." % i)
src_data = {"reader": src_reader, "data": src_shard, "dir": opt.src_dir}
tgt_data = {"reader": tgt_reader, "data": tgt_shard, "dir": None}
align_data = {"reader": align_reader, "data": align_shard, "dir": None}
_readers, _data, _dir = inputters.Dataset.config(
[('src', src_data), ('tgt', tgt_data), ('align', align_data)])
dataset = inputters.Dataset(
fields, readers=_readers, data=_data, dirs=_dir,
sort_key=inputters.str2sortkey[opt.data_type],
filter_pred=filter_pred
)
if corpus_type == "train" and existing_fields is None:
for ex in dataset.examples:
for name, field in fields.items():
if ((opt.data_type == "audio") and (name == "src")):
continue
try:
f_iter = iter(field)
except TypeError:
f_iter = [(name, field)]
all_data = [getattr(ex, name, None)]
else:
all_data = getattr(ex, name)
for (sub_n, sub_f), fd in zip(
f_iter, all_data):
has_vocab = (sub_n == 'src' and
src_vocab is not None) or \
(sub_n == 'tgt' and
tgt_vocab is not None)
if (hasattr(sub_f, 'sequential')
and sub_f.sequential and not has_vocab):
val = fd
sub_sub_counter[sub_n].update(val)
if maybe_id:
shard_base = corpus_type + "_" + maybe_id
else:
shard_base = corpus_type
data_path = "{:s}.{:s}.{:d}.pt".\
format(opt.save_data, shard_base, i)
logger.info(" * saving %sth %s data shard to %s."
% (i, shard_base, data_path))
dataset.save(data_path)
del dataset.examples
gc.collect()
del dataset
gc.collect()
return sub_sub_counter
def maybe_load_vocab(corpus_type, counters, opt):
src_vocab = None
tgt_vocab = None
existing_fields = None
if corpus_type == "train":
if opt.src_vocab != "":
try:
logger.info("Using existing vocabulary...")
existing_fields = torch.load(opt.src_vocab)
except torch.serialization.pickle.UnpicklingError:
logger.info("Building vocab from text file...")
src_vocab, src_vocab_size = _load_vocab(
opt.src_vocab, "src", counters,
opt.src_words_min_frequency)
if opt.tgt_vocab != "":
tgt_vocab, tgt_vocab_size = _load_vocab(
opt.tgt_vocab, "tgt", counters,
opt.tgt_words_min_frequency)
return src_vocab, tgt_vocab, existing_fields
def build_save_dataset(corpus_type, fields, src_reader, tgt_reader,
align_reader, opt):
assert corpus_type in ['train', 'valid']
if corpus_type == 'train':
counters = defaultdict(Counter)
srcs = opt.train_src
tgts = opt.train_tgt
ids = opt.train_ids
aligns = opt.train_align
elif corpus_type == 'valid':
counters = None
srcs = [opt.valid_src]
tgts = [opt.valid_tgt]
ids = [None]
aligns = [opt.valid_align]
src_vocab, tgt_vocab, existing_fields = maybe_load_vocab(
corpus_type, counters, opt)
existing_shards = check_existing_pt_files(
opt, corpus_type, ids, existing_fields)
# every corpus has shards, no new one
if existing_shards == ids and not opt.overwrite:
return
def shard_iterator(srcs, tgts, ids, aligns, existing_shards,
existing_fields, corpus_type, opt):
"""
Builds a single iterator yielding every shard of every corpus.
"""
for src, tgt, maybe_id, maybe_align in zip(srcs, tgts, ids, aligns):
if maybe_id in existing_shards:
if opt.overwrite:
logger.warning("Overwrite shards for corpus {}"
.format(maybe_id))
else:
if corpus_type == "train":
assert existing_fields is not None,\
("A 'vocab.pt' file should be passed to "
"`-src_vocab` when adding a corpus to "
"a set of already existing shards.")
logger.warning("Ignore corpus {} because "
"shards already exist"
.format(maybe_id))
continue
if ((corpus_type == "train" or opt.filter_valid)
and tgt is not None):
filter_pred = partial(
inputters.filter_example,
use_src_len=opt.data_type == "text",
max_src_len=opt.src_seq_length,
max_tgt_len=opt.tgt_seq_length)
else:
filter_pred = None
src_shards = split_corpus(src, opt.shard_size)
tgt_shards = split_corpus(tgt, opt.shard_size)
align_shards = split_corpus(maybe_align, opt.shard_size)
for i, (ss, ts, a_s) in enumerate(
zip(src_shards, tgt_shards, align_shards)):
yield (i, (ss, ts, a_s, maybe_id, filter_pred))
shard_iter = shard_iterator(srcs, tgts, ids, aligns, existing_shards,
existing_fields, corpus_type, opt)
with Pool(opt.num_threads) as p:
dataset_params = (corpus_type, fields, src_reader, tgt_reader,
align_reader, opt, existing_fields,
src_vocab, tgt_vocab)
func = partial(process_one_shard, dataset_params)
for sub_counter in p.imap(func, shard_iter):
if sub_counter is not None:
for key, value in sub_counter.items():
counters[key].update(value)
if corpus_type == "train":
vocab_path = opt.save_data + '.vocab.pt'
if existing_fields is None:
fields = _build_fields_vocab(
fields, counters, opt.data_type,
opt.share_vocab, opt.vocab_size_multiple,
opt.src_vocab_size, opt.src_words_min_frequency,
opt.tgt_vocab_size, opt.tgt_words_min_frequency)
else:
fields = existing_fields
torch.save(fields, vocab_path)
def build_save_vocab(train_dataset, fields, opt):
fields = inputters.build_vocab(
train_dataset, fields, opt.data_type, opt.share_vocab,
opt.src_vocab, opt.src_vocab_size, opt.src_words_min_frequency,
opt.tgt_vocab, opt.tgt_vocab_size, opt.tgt_words_min_frequency,
vocab_size_multiple=opt.vocab_size_multiple
)
vocab_path = opt.save_data + '.vocab.pt'
torch.save(fields, vocab_path)
def count_features(path):
"""
path: location of a corpus file with whitespace-delimited tokens and
│-delimited features within the token
returns: the number of features in the dataset
"""
with codecs.open(path, "r", "utf-8") as f:
first_tok = f.readline().split(None, 1)[0]
return len(first_tok.split(u"│")) - 1
def preprocess(opt):
ArgumentParser.validate_preprocess_args(opt)
torch.manual_seed(opt.seed)
init_logger(opt.log_file)
logger.info("Extracting features...")
src_nfeats = 0
tgt_nfeats = 0
for src, tgt in zip(opt.train_src, opt.train_tgt):
src_nfeats += count_features(src) if opt.data_type == 'text' \
else 0
tgt_nfeats += count_features(tgt) # tgt always text so far
logger.info(" * number of source features: %d." % src_nfeats)
logger.info(" * number of target features: %d." % tgt_nfeats)
logger.info("Building `Fields` object...")
fields = inputters.get_fields(
opt.data_type,
src_nfeats,
tgt_nfeats,
dynamic_dict=opt.dynamic_dict,
with_align=opt.train_align[0] is not None,
src_truncate=opt.src_seq_length_trunc,
tgt_truncate=opt.tgt_seq_length_trunc)
src_reader = inputters.str2reader[opt.data_type].from_opt(opt)
tgt_reader = inputters.str2reader["text"].from_opt(opt)
align_reader = inputters.str2reader["text"].from_opt(opt)
logger.info("Building & saving training data...")
build_save_dataset(
'train', fields, src_reader, tgt_reader, align_reader, opt)
if opt.valid_src and opt.valid_tgt:
logger.info("Building & saving validation data...")
build_save_dataset(
'valid', fields, src_reader, tgt_reader, align_reader, opt)
def _get_parser():
parser = ArgumentParser(description='preprocess.py')
opts.config_opts(parser)
opts.preprocess_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
preprocess(opt)
if __name__ == "__main__":
main()
| 11,018 | 35.97651 | 76 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/decoders/transformer.py
|
"""
Implementation of "Attention is All You Need"
"""
import torch
import torch.nn as nn
from onmt.decoders.decoder import DecoderBase
from onmt.modules import MultiHeadedAttention, AverageAttention
from onmt.modules.position_ffn import PositionwiseFeedForward
from onmt.utils.misc import sequence_mask
class TransformerDecoderLayer(nn.Module):
"""
Args:
d_model (int): the dimension of keys/values/queries in
:class:`MultiHeadedAttention`, also the input size of
the first-layer of the :class:`PositionwiseFeedForward`.
heads (int): the number of heads for MultiHeadedAttention.
d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`.
dropout (float): dropout probability.
self_attn_type (string): type of self-attention scaled-dot, average
"""
def __init__(self, d_model, heads, d_ff, dropout, attention_dropout,
self_attn_type="scaled-dot", max_relative_positions=0,
aan_useffn=False, full_context_alignment=False,
alignment_heads=None):
super(TransformerDecoderLayer, self).__init__()
if self_attn_type == "scaled-dot":
self.self_attn = MultiHeadedAttention(
heads, d_model, dropout=dropout,
max_relative_positions=max_relative_positions)
elif self_attn_type == "average":
self.self_attn = AverageAttention(d_model,
dropout=attention_dropout,
aan_useffn=aan_useffn)
self.context_attn = MultiHeadedAttention(
heads, d_model, dropout=attention_dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6)
self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6)
self.drop = nn.Dropout(dropout)
self.full_context_alignment = full_context_alignment
self.alignment_heads = alignment_heads
def forward(self, *args, **kwargs):
""" Extend _forward for (possibly) multiple decoder pass:
1. Always a default (future masked) decoder forward pass,
2. Possibly a second future aware decoder pass for joint learn
full context alignement.
Args:
* All arguments of _forward.
with_align (bool): whether return alignment attention.
Returns:
(FloatTensor, FloatTensor, FloatTensor or None):
* output ``(batch_size, 1, model_dim)``
* top_attn ``(batch_size, 1, src_len)``
* attn_align ``(batch_size, 1, src_len)`` or None
"""
with_align = kwargs.pop('with_align', False)
output, attns = self._forward(*args, **kwargs)
top_attn = attns[:, 0, :, :].contiguous()
attn_align = None
if with_align:
if self.full_context_alignment:
# return _, (B, Q_len, K_len)
_, attns = self._forward(*args, **kwargs, future=True)
if self.alignment_heads is not None:
attns = attns[:, :self.alignment_heads, :, :].contiguous()
# layer average attention across heads, get ``(B, Q, K)``
# Case 1: no full_context, no align heads -> layer avg baseline
# Case 2: no full_context, 1 align heads -> guided align
# Case 3: full_context, 1 align heads -> full cte guided align
attn_align = attns.mean(dim=1)
return output, top_attn, attn_align
def _forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,
layer_cache=None, step=None, future=False):
""" A naive forward pass for transformer decoder.
# TODO: change 1 to T as T could be 1 or tgt_len
Args:
inputs (FloatTensor): ``(batch_size, 1, model_dim)``
memory_bank (FloatTensor): ``(batch_size, src_len, model_dim)``
src_pad_mask (LongTensor): ``(batch_size, 1, src_len)``
tgt_pad_mask (LongTensor): ``(batch_size, 1, 1)``
Returns:
(FloatTensor, FloatTensor):
* output ``(batch_size, 1, model_dim)``
* attns ``(batch_size, head, 1, src_len)``
"""
dec_mask = None
if step is None:
tgt_len = tgt_pad_mask.size(-1)
if not future: # apply future_mask, result mask in (B, T, T)
future_mask = torch.ones(
[tgt_len, tgt_len],
device=tgt_pad_mask.device,
dtype=torch.uint8)
future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len)
# BoolTensor was introduced in pytorch 1.2
try:
future_mask = future_mask.bool()
except AttributeError:
pass
dec_mask = torch.gt(tgt_pad_mask + future_mask, 0)
else: # only mask padding, result mask in (B, 1, T)
dec_mask = tgt_pad_mask
input_norm = self.layer_norm_1(inputs)
if isinstance(self.self_attn, MultiHeadedAttention):
query, _ = self.self_attn(input_norm, input_norm, input_norm,
mask=dec_mask,
layer_cache=layer_cache,
attn_type="self")
elif isinstance(self.self_attn, AverageAttention):
query, _ = self.self_attn(input_norm, mask=dec_mask,
layer_cache=layer_cache, step=step)
query = self.drop(query) + inputs
query_norm = self.layer_norm_2(query)
mid, attns = self.context_attn(memory_bank, memory_bank, query_norm,
mask=src_pad_mask,
layer_cache=layer_cache,
attn_type="context")
output = self.feed_forward(self.drop(mid) + query)
return output, attns
def update_dropout(self, dropout, attention_dropout):
self.self_attn.update_dropout(attention_dropout)
self.context_attn.update_dropout(attention_dropout)
self.feed_forward.update_dropout(dropout)
self.drop.p = dropout
class TransformerDecoder(DecoderBase):
"""The Transformer decoder from "Attention is All You Need".
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`
.. mermaid::
graph BT
A[input]
B[multi-head self-attn]
BB[multi-head src-attn]
C[feed forward]
O[output]
A --> B
B --> BB
BB --> C
C --> O
Args:
num_layers (int): number of encoder layers.
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
copy_attn (bool): if using a separate copy attention
self_attn_type (str): type of self-attention scaled-dot, average
dropout (float): dropout parameters
embeddings (onmt.modules.Embeddings):
embeddings to use, should have positional encodings
"""
def __init__(self, num_layers, d_model, heads, d_ff,
copy_attn, self_attn_type, dropout, attention_dropout,
embeddings, max_relative_positions, aan_useffn,
full_context_alignment, alignment_layer,
alignment_heads=None):
super(TransformerDecoder, self).__init__()
self.embeddings = embeddings
# Decoder State
self.state = {}
self.transformer_layers = nn.ModuleList(
[TransformerDecoderLayer(d_model, heads, d_ff, dropout,
attention_dropout, self_attn_type=self_attn_type,
max_relative_positions=max_relative_positions,
aan_useffn=aan_useffn,
full_context_alignment=full_context_alignment,
alignment_heads=alignment_heads)
for i in range(num_layers)])
# previously, there was a GlobalAttention module here for copy
# attention. But it was never actually used -- the "copy" attention
# just reuses the context attention.
self._copy = copy_attn
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.alignment_layer = alignment_layer
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.dec_layers,
opt.dec_rnn_size,
opt.heads,
opt.transformer_ff,
opt.copy_attn,
opt.self_attn_type,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
opt.attention_dropout[0] if type(opt.attention_dropout)
is list else opt.dropout,
embeddings,
opt.max_relative_positions,
opt.aan_useffn,
opt.full_context_alignment,
opt.alignment_layer,
alignment_heads=opt.alignment_heads)
def init_state(self, src, memory_bank, enc_hidden):
"""Initialize decoder state."""
self.state["src"] = src
self.state["cache"] = None
def map_state(self, fn):
def _recursive_map(struct, batch_dim=0):
for k, v in struct.items():
if v is not None:
if isinstance(v, dict):
_recursive_map(v)
else:
struct[k] = fn(v, batch_dim)
self.state["src"] = fn(self.state["src"], 1)
if self.state["cache"] is not None:
_recursive_map(self.state["cache"])
def detach_state(self):
self.state["src"] = self.state["src"].detach()
def forward(self, tgt, memory_bank, step=None, **kwargs):
"""Decode, possibly stepwise."""
if step == 0:
self._init_cache(memory_bank)
tgt_words = tgt[:, :, 0].transpose(0, 1)
emb = self.embeddings(tgt, step=step)
assert emb.dim() == 3 # len x batch x embedding_dim
output = emb.transpose(0, 1).contiguous()
src_memory_bank = memory_bank.transpose(0, 1).contiguous()
pad_idx = self.embeddings.word_padding_idx
src_lens = kwargs["memory_lengths"]
src_max_len = self.state["src"].shape[0]
src_pad_mask = ~sequence_mask(src_lens, src_max_len).unsqueeze(1)
tgt_pad_mask = tgt_words.data.eq(pad_idx).unsqueeze(1) # [B, 1, T_tgt]
with_align = kwargs.pop('with_align', False)
attn_aligns = []
for i, layer in enumerate(self.transformer_layers):
layer_cache = self.state["cache"]["layer_{}".format(i)] \
if step is not None else None
output, attn, attn_align = layer(
output,
src_memory_bank,
src_pad_mask,
tgt_pad_mask,
layer_cache=layer_cache,
step=step,
with_align=with_align)
if attn_align is not None:
attn_aligns.append(attn_align)
output = self.layer_norm(output)
dec_outs = output.transpose(0, 1).contiguous()
attn = attn.transpose(0, 1).contiguous()
attns = {"std": attn}
if self._copy:
attns["copy"] = attn
if with_align:
attns["align"] = attn_aligns[self.alignment_layer] # `(B, Q, K)`
# attns["align"] = torch.stack(attn_aligns, 0).mean(0) # All avg
# TODO change the way attns is returned dict => list or tuple (onnx)
return dec_outs, attns
def _init_cache(self, memory_bank):
self.state["cache"] = {}
batch_size = memory_bank.size(1)
depth = memory_bank.size(-1)
for i, layer in enumerate(self.transformer_layers):
layer_cache = {"memory_keys": None, "memory_values": None}
if isinstance(layer.self_attn, AverageAttention):
layer_cache["prev_g"] = torch.zeros((batch_size, 1, depth),
device=memory_bank.device)
else:
layer_cache["self_keys"] = None
layer_cache["self_values"] = None
self.state["cache"]["layer_{}".format(i)] = layer_cache
def update_dropout(self, dropout, attention_dropout):
self.embeddings.update_dropout(dropout)
for layer in self.transformer_layers:
layer.update_dropout(dropout, attention_dropout)
| 12,530 | 38.282132 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/decoders/decoder.py
|
import torch
import torch.nn as nn
from onmt.models.stacked_rnn import StackedLSTM, StackedGRU
from onmt.modules import context_gate_factory, GlobalAttention
from onmt.utils.rnn_factory import rnn_factory
from onmt.utils.misc import aeq
class DecoderBase(nn.Module):
"""Abstract class for decoders.
Args:
attentional (bool): The decoder returns non-empty attention.
"""
def __init__(self, attentional=True):
super(DecoderBase, self).__init__()
self.attentional = attentional
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor.
Subclasses should override this method.
"""
raise NotImplementedError
class RNNDecoderBase(DecoderBase):
"""Base recurrent attention-based decoder class.
Specifies the interface used by different decoder types
and required by :class:`~onmt.models.NMTModel`.
.. mermaid::
graph BT
A[Input]
subgraph RNN
C[Pos 1]
D[Pos 2]
E[Pos N]
end
G[Decoder State]
H[Decoder State]
I[Outputs]
F[memory_bank]
A--emb-->C
A--emb-->D
A--emb-->E
H-->C
C-- attn --- F
D-- attn --- F
E-- attn --- F
C-->I
D-->I
E-->I
E-->G
F---I
Args:
rnn_type (str):
style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]
bidirectional_encoder (bool) : use with a bidirectional encoder
num_layers (int) : number of stacked layers
hidden_size (int) : hidden size of each layer
attn_type (str) : see :class:`~onmt.modules.GlobalAttention`
attn_func (str) : see :class:`~onmt.modules.GlobalAttention`
coverage_attn (str): see :class:`~onmt.modules.GlobalAttention`
context_gate (str): see :class:`~onmt.modules.ContextGate`
copy_attn (bool): setup a separate copy attention mechanism
dropout (float) : dropout value for :class:`torch.nn.Dropout`
embeddings (onmt.modules.Embeddings): embedding module to use
reuse_copy_attn (bool): reuse the attention for copying
copy_attn_type (str): The copy attention style. See
:class:`~onmt.modules.GlobalAttention`.
"""
def __init__(self, rnn_type, bidirectional_encoder, num_layers,
hidden_size, attn_type="general", attn_func="softmax",
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=0.0, embeddings=None,
reuse_copy_attn=False, copy_attn_type="general"):
super(RNNDecoderBase, self).__init__(
attentional=attn_type != "none" and attn_type is not None)
self.bidirectional_encoder = bidirectional_encoder
self.num_layers = num_layers
self.hidden_size = hidden_size
self.embeddings = embeddings
self.dropout = nn.Dropout(dropout)
# Decoder state
self.state = {}
# Build the RNN.
self.rnn = self._build_rnn(rnn_type,
input_size=self._input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout)
# Set up the context gate.
self.context_gate = None
if context_gate is not None:
self.context_gate = context_gate_factory(
context_gate, self._input_size,
hidden_size, hidden_size, hidden_size
)
# Set up the standard attention.
self._coverage = coverage_attn
if not self.attentional:
if self._coverage:
raise ValueError("Cannot use coverage term with no attention.")
self.attn = None
else:
self.attn = GlobalAttention(
hidden_size, coverage=coverage_attn,
attn_type=attn_type, attn_func=attn_func
)
if copy_attn and not reuse_copy_attn:
if copy_attn_type == "none" or copy_attn_type is None:
raise ValueError(
"Cannot use copy_attn with copy_attn_type none")
self.copy_attn = GlobalAttention(
hidden_size, attn_type=copy_attn_type, attn_func=attn_func
)
else:
self.copy_attn = None
self._reuse_copy_attn = reuse_copy_attn and copy_attn
if self._reuse_copy_attn and not self.attentional:
raise ValueError("Cannot reuse copy attention with no attention.")
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.rnn_type,
opt.brnn,
opt.dec_layers,
opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout[0] if type(opt.dropout) is list
else opt.dropout,
embeddings,
opt.reuse_copy_attn,
opt.copy_attn_type)
def init_state(self, src, memory_bank, encoder_final):
"""Initialize decoder state with last state of the encoder."""
def _fix_enc_hidden(hidden):
# The encoder hidden is (layers*directions) x batch x dim.
# We need to convert it to layers x batch x (directions*dim).
if self.bidirectional_encoder:
hidden = torch.cat([hidden[0:hidden.size(0):2],
hidden[1:hidden.size(0):2]], 2)
return hidden
if isinstance(encoder_final, tuple): # LSTM
self.state["hidden"] = tuple(_fix_enc_hidden(enc_hid)
for enc_hid in encoder_final)
else: # GRU
self.state["hidden"] = (_fix_enc_hidden(encoder_final), )
# Init the input feed.
batch_size = self.state["hidden"][0].size(1)
h_size = (batch_size, self.hidden_size)
self.state["input_feed"] = \
self.state["hidden"][0].data.new(*h_size).zero_().unsqueeze(0)
self.state["coverage"] = None
def map_state(self, fn):
self.state["hidden"] = tuple(fn(h, 1) for h in self.state["hidden"])
self.state["input_feed"] = fn(self.state["input_feed"], 1)
if self._coverage and self.state["coverage"] is not None:
self.state["coverage"] = fn(self.state["coverage"], 1)
def detach_state(self):
self.state["hidden"] = tuple(h.detach() for h in self.state["hidden"])
self.state["input_feed"] = self.state["input_feed"].detach()
def forward(self, tgt, memory_bank, memory_lengths=None, step=None,
**kwargs):
"""
Args:
tgt (LongTensor): sequences of padded tokens
``(tgt_len, batch, nfeats)``.
memory_bank (FloatTensor): vectors from the encoder
``(src_len, batch, hidden)``.
memory_lengths (LongTensor): the padded source lengths
``(batch,)``.
Returns:
(FloatTensor, dict[str, FloatTensor]):
* dec_outs: output from the decoder (after attn)
``(tgt_len, batch, hidden)``.
* attns: distribution over src at each tgt
``(tgt_len, batch, src_len)``.
"""
dec_state, dec_outs, attns = self._run_forward_pass(
tgt, memory_bank, memory_lengths=memory_lengths)
# Update the state with the result.
if not isinstance(dec_state, tuple):
dec_state = (dec_state,)
self.state["hidden"] = dec_state
self.state["input_feed"] = dec_outs[-1].unsqueeze(0)
self.state["coverage"] = None
if "coverage" in attns:
self.state["coverage"] = attns["coverage"][-1].unsqueeze(0)
# Concatenates sequence of tensors along a new dimension.
# NOTE: v0.3 to 0.4: dec_outs / attns[*] may not be list
# (in particular in case of SRU) it was not raising error in 0.3
# since stack(Variable) was allowed.
# In 0.4, SRU returns a tensor that shouldn't be stacke
if type(dec_outs) == list:
dec_outs = torch.stack(dec_outs)
for k in attns:
if type(attns[k]) == list:
attns[k] = torch.stack(attns[k])
return dec_outs, attns
def update_dropout(self, dropout):
self.dropout.p = dropout
self.embeddings.update_dropout(dropout)
class StdRNNDecoder(RNNDecoderBase):
"""Standard fully batched RNN decoder with attention.
Faster implementation, uses CuDNN for implementation.
See :class:`~onmt.decoders.decoder.RNNDecoderBase` for options.
Based around the approach from
"Neural Machine Translation By Jointly Learning To Align and Translate"
:cite:`Bahdanau2015`
Implemented without input_feeding and currently with no `coverage_attn`
or `copy_attn` support.
"""
def _run_forward_pass(self, tgt, memory_bank, memory_lengths=None):
"""
Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
``(len, batch, nfeats)``.
memory_bank (FloatTensor): output(tensor sequence) from the
encoder RNN of size ``(src_len, batch, hidden_size)``.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
(Tensor, List[FloatTensor], Dict[str, List[FloatTensor]):
* dec_state: final hidden state from the decoder.
* dec_outs: an array of output of every time
step from the decoder.
* attns: a dictionary of different
type of attention Tensor array of every time
step from the decoder.
"""
assert self.copy_attn is None # TODO, no support yet.
assert not self._coverage # TODO, no support yet.
attns = {}
emb = self.embeddings(tgt)
if isinstance(self.rnn, nn.GRU):
rnn_output, dec_state = self.rnn(emb, self.state["hidden"][0])
else:
rnn_output, dec_state = self.rnn(emb, self.state["hidden"])
# Check
tgt_len, tgt_batch, _ = tgt.size()
output_len, output_batch, _ = rnn_output.size()
aeq(tgt_len, output_len)
aeq(tgt_batch, output_batch)
# Calculate the attention.
if not self.attentional:
dec_outs = rnn_output
else:
dec_outs, p_attn = self.attn(
rnn_output.transpose(0, 1).contiguous(),
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths
)
attns["std"] = p_attn
# Calculate the context gate.
if self.context_gate is not None:
dec_outs = self.context_gate(
emb.view(-1, emb.size(2)),
rnn_output.view(-1, rnn_output.size(2)),
dec_outs.view(-1, dec_outs.size(2))
)
dec_outs = dec_outs.view(tgt_len, tgt_batch, self.hidden_size)
dec_outs = self.dropout(dec_outs)
return dec_state, dec_outs, attns
def _build_rnn(self, rnn_type, **kwargs):
rnn, _ = rnn_factory(rnn_type, **kwargs)
return rnn
@property
def _input_size(self):
return self.embeddings.embedding_size
class InputFeedRNNDecoder(RNNDecoderBase):
"""Input feeding based decoder.
See :class:`~onmt.decoders.decoder.RNNDecoderBase` for options.
Based around the input feeding approach from
"Effective Approaches to Attention-based Neural Machine Translation"
:cite:`Luong2015`
.. mermaid::
graph BT
A[Input n-1]
AB[Input n]
subgraph RNN
E[Pos n-1]
F[Pos n]
E --> F
end
G[Encoder]
H[memory_bank n-1]
A --> E
AB --> F
E --> H
G --> H
"""
def _run_forward_pass(self, tgt, memory_bank, memory_lengths=None):
"""
See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
"""
# Additional args check.
input_feed = self.state["input_feed"].squeeze(0)
input_feed_batch, _ = input_feed.size()
_, tgt_batch, _ = tgt.size()
aeq(tgt_batch, input_feed_batch)
# END Additional args check.
dec_outs = []
attns = {}
if self.attn is not None:
attns["std"] = []
if self.copy_attn is not None or self._reuse_copy_attn:
attns["copy"] = []
if self._coverage:
attns["coverage"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
dec_state = self.state["hidden"]
coverage = self.state["coverage"].squeeze(0) \
if self.state["coverage"] is not None else None
# Input feed concatenates hidden state with
# input at every time step.
for emb_t in emb.split(1):
decoder_input = torch.cat([emb_t.squeeze(0), input_feed], 1)
rnn_output, dec_state = self.rnn(decoder_input, dec_state)
if self.attentional:
decoder_output, p_attn = self.attn(
rnn_output,
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths)
attns["std"].append(p_attn)
else:
decoder_output = rnn_output
if self.context_gate is not None:
# TODO: context gate should be employed
# instead of second RNN transform.
decoder_output = self.context_gate(
decoder_input, rnn_output, decoder_output
)
decoder_output = self.dropout(decoder_output)
input_feed = decoder_output
dec_outs += [decoder_output]
# Update the coverage attention.
if self._coverage:
coverage = p_attn if coverage is None else p_attn + coverage
attns["coverage"] += [coverage]
if self.copy_attn is not None:
_, copy_attn = self.copy_attn(
decoder_output, memory_bank.transpose(0, 1))
attns["copy"] += [copy_attn]
elif self._reuse_copy_attn:
attns["copy"] = attns["std"]
return dec_state, dec_outs, attns
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
assert rnn_type != "SRU", "SRU doesn't support input feed! " \
"Please set -input_feed 0!"
stacked_cell = StackedLSTM if rnn_type == "LSTM" else StackedGRU
return stacked_cell(num_layers, input_size, hidden_size, dropout)
@property
def _input_size(self):
"""Using input feed by concatenating input with attention vectors."""
return self.embeddings.embedding_size + self.hidden_size
def update_dropout(self, dropout):
self.dropout.p = dropout
self.rnn.dropout.p = dropout
self.embeddings.update_dropout(dropout)
| 15,510 | 34.172336 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/decoders/__init__.py
|
"""Module defining decoders."""
from onmt.decoders.decoder import DecoderBase, InputFeedRNNDecoder, \
StdRNNDecoder
from onmt.decoders.transformer import TransformerDecoder
from onmt.decoders.cnn_decoder import CNNDecoder
from onmt.decoders.hierarchical_decoder import HierarchicalRNNDecoder
str2dec = {"rnn": StdRNNDecoder, "ifrnn": InputFeedRNNDecoder,
"cnn": CNNDecoder, "transformer": TransformerDecoder,
"hrnn": HierarchicalRNNDecoder}
__all__ = ["DecoderBase", "TransformerDecoder", "StdRNNDecoder", "CNNDecoder",
"InputFeedRNNDecoder", "str2dec", "HierarchicalRNNDecoder"]
| 620 | 40.4 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/decoders/ensemble.py
|
"""Ensemble decoding.
Decodes using multiple models simultaneously,
combining their prediction distributions by averaging.
All models in the ensemble must share a target vocabulary.
"""
import torch
import torch.nn as nn
from onmt.encoders.encoder import EncoderBase
from onmt.decoders.decoder import DecoderBase
from onmt.models import NMTModel
import onmt.model_builder
class EnsembleDecoderOutput(object):
"""Wrapper around multiple decoder final hidden states."""
def __init__(self, model_dec_outs):
self.model_dec_outs = tuple(model_dec_outs)
def squeeze(self, dim=None):
"""Delegate squeeze to avoid modifying
:func:`onmt.translate.translator.Translator.translate_batch()`
"""
return EnsembleDecoderOutput([
x.squeeze(dim) for x in self.model_dec_outs])
def __getitem__(self, index):
return self.model_dec_outs[index]
class EnsembleEncoder(EncoderBase):
"""Dummy Encoder that delegates to individual real Encoders."""
def __init__(self, model_encoders):
super(EnsembleEncoder, self).__init__()
self.model_encoders = nn.ModuleList(model_encoders)
def forward(self, src, lengths=None):
enc_hidden, memory_bank, _ = zip(*[
model_encoder(src, lengths)
for model_encoder in self.model_encoders])
return enc_hidden, memory_bank, lengths
class EnsembleDecoder(DecoderBase):
"""Dummy Decoder that delegates to individual real Decoders."""
def __init__(self, model_decoders):
model_decoders = nn.ModuleList(model_decoders)
attentional = any([dec.attentional for dec in model_decoders])
super(EnsembleDecoder, self).__init__(attentional)
self.model_decoders = model_decoders
def forward(self, tgt, memory_bank, memory_lengths=None, step=None,
**kwargs):
"""See :func:`onmt.decoders.decoder.DecoderBase.forward()`."""
# Memory_lengths is a single tensor shared between all models.
# This assumption will not hold if Translator is modified
# to calculate memory_lengths as something other than the length
# of the input.
dec_outs, attns = zip(*[
model_decoder(
tgt, memory_bank[i],
memory_lengths=memory_lengths, step=step)
for i, model_decoder in enumerate(self.model_decoders)])
mean_attns = self.combine_attns(attns)
return EnsembleDecoderOutput(dec_outs), mean_attns
def combine_attns(self, attns):
result = {}
for key in attns[0].keys():
result[key] = torch.stack(
[attn[key] for attn in attns if attn[key] is not None]).mean(0)
return result
def init_state(self, src, memory_bank, enc_hidden):
""" See :obj:`RNNDecoderBase.init_state()` """
for i, model_decoder in enumerate(self.model_decoders):
model_decoder.init_state(src, memory_bank[i], enc_hidden[i])
def map_state(self, fn):
for model_decoder in self.model_decoders:
model_decoder.map_state(fn)
class EnsembleGenerator(nn.Module):
"""
Dummy Generator that delegates to individual real Generators,
and then averages the resulting target distributions.
"""
def __init__(self, model_generators, raw_probs=False):
super(EnsembleGenerator, self).__init__()
self.model_generators = nn.ModuleList(model_generators)
self._raw_probs = raw_probs
def forward(self, hidden, attn=None, src_map=None):
"""
Compute a distribution over the target dictionary
by averaging distributions from models in the ensemble.
All models in the ensemble must share a target vocabulary.
"""
distributions = torch.stack(
[mg(h) if attn is None else mg(h, attn, src_map)
for h, mg in zip(hidden, self.model_generators)]
)
if self._raw_probs:
return torch.log(torch.exp(distributions).mean(0))
else:
return distributions.mean(0)
class EnsembleModel(NMTModel):
"""Dummy NMTModel wrapping individual real NMTModels."""
def __init__(self, models, raw_probs=False):
encoder = EnsembleEncoder(model.encoder for model in models)
decoder = EnsembleDecoder(model.decoder for model in models)
super(EnsembleModel, self).__init__(encoder, decoder)
self.generator = EnsembleGenerator(
[model.generator for model in models], raw_probs)
self.models = nn.ModuleList(models)
def load_test_model(opt):
"""Read in multiple models for ensemble."""
shared_fields = None
shared_model_opt = None
models = []
for model_path in opt.models:
fields, model, model_opt = \
onmt.model_builder.load_test_model(opt, model_path=model_path)
if shared_fields is None:
shared_fields = fields
else:
for key, field in fields.items():
try:
f_iter = iter(field)
except TypeError:
f_iter = [(key, field)]
for sn, sf in f_iter:
if sf is not None and 'vocab' in sf.__dict__:
sh_field = shared_fields[key]
try:
sh_f_iter = iter(sh_field)
except TypeError:
sh_f_iter = [(key, sh_field)]
sh_f_dict = dict(sh_f_iter)
assert sf.vocab.stoi == sh_f_dict[sn].vocab.stoi, \
"Ensemble models must use the same " \
"preprocessed data"
models.append(model)
if shared_model_opt is None:
shared_model_opt = model_opt
ensemble_model = EnsembleModel(models, opt.avg_raw_probs)
return shared_fields, ensemble_model, shared_model_opt
| 5,956 | 37.432258 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/decoders/cnn_decoder.py
|
"""Implementation of the CNN Decoder part of
"Convolutional Sequence to Sequence Learning"
"""
import torch
import torch.nn as nn
from onmt.modules import ConvMultiStepAttention, GlobalAttention
from onmt.utils.cnn_factory import shape_transform, GatedConv
from onmt.decoders.decoder import DecoderBase
SCALE_WEIGHT = 0.5 ** 0.5
class CNNDecoder(DecoderBase):
"""Decoder based on "Convolutional Sequence to Sequence Learning"
:cite:`DBLP:journals/corr/GehringAGYD17`.
Consists of residual convolutional layers, with ConvMultiStepAttention.
"""
def __init__(self, num_layers, hidden_size, attn_type,
copy_attn, cnn_kernel_width, dropout, embeddings,
copy_attn_type):
super(CNNDecoder, self).__init__()
self.cnn_kernel_width = cnn_kernel_width
self.embeddings = embeddings
# Decoder State
self.state = {}
input_size = self.embeddings.embedding_size
self.linear = nn.Linear(input_size, hidden_size)
self.conv_layers = nn.ModuleList(
[GatedConv(hidden_size, cnn_kernel_width, dropout, True)
for i in range(num_layers)]
)
self.attn_layers = nn.ModuleList(
[ConvMultiStepAttention(hidden_size) for i in range(num_layers)]
)
# CNNDecoder has its own attention mechanism.
# Set up a separate copy attention layer if needed.
assert not copy_attn, "Copy mechanism not yet tested in conv2conv"
if copy_attn:
self.copy_attn = GlobalAttention(
hidden_size, attn_type=copy_attn_type)
else:
self.copy_attn = None
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.dec_layers,
opt.dec_rnn_size,
opt.global_attention,
opt.copy_attn,
opt.cnn_kernel_width,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
embeddings,
opt.copy_attn_type)
def init_state(self, _, memory_bank, enc_hidden):
"""Init decoder state."""
self.state["src"] = (memory_bank + enc_hidden) * SCALE_WEIGHT
self.state["previous_input"] = None
def map_state(self, fn):
self.state["src"] = fn(self.state["src"], 1)
if self.state["previous_input"] is not None:
self.state["previous_input"] = fn(self.state["previous_input"], 1)
def detach_state(self):
self.state["previous_input"] = self.state["previous_input"].detach()
def forward(self, tgt, memory_bank, step=None, **kwargs):
""" See :obj:`onmt.modules.RNNDecoderBase.forward()`"""
if self.state["previous_input"] is not None:
tgt = torch.cat([self.state["previous_input"], tgt], 0)
dec_outs = []
attns = {"std": []}
if self.copy_attn is not None:
attns["copy"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
tgt_emb = emb.transpose(0, 1).contiguous()
# The output of CNNEncoder.
src_memory_bank_t = memory_bank.transpose(0, 1).contiguous()
# The combination of output of CNNEncoder and source embeddings.
src_memory_bank_c = self.state["src"].transpose(0, 1).contiguous()
emb_reshape = tgt_emb.contiguous().view(
tgt_emb.size(0) * tgt_emb.size(1), -1)
linear_out = self.linear(emb_reshape)
x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)
x = shape_transform(x)
pad = torch.zeros(x.size(0), x.size(1), self.cnn_kernel_width - 1, 1)
pad = pad.type_as(x)
base_target_emb = x
for conv, attention in zip(self.conv_layers, self.attn_layers):
new_target_input = torch.cat([pad, x], 2)
out = conv(new_target_input)
c, attn = attention(base_target_emb, out,
src_memory_bank_t, src_memory_bank_c)
x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT
output = x.squeeze(3).transpose(1, 2)
# Process the result and update the attentions.
dec_outs = output.transpose(0, 1).contiguous()
if self.state["previous_input"] is not None:
dec_outs = dec_outs[self.state["previous_input"].size(0):]
attn = attn[:, self.state["previous_input"].size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self.copy_attn is not None:
attns["copy"] = attn
# Update the state.
self.state["previous_input"] = tgt
# TODO change the way attns is returned dict => list or tuple (onnx)
return dec_outs, attns
def update_dropout(self, dropout):
for layer in self.conv_layers:
layer.dropout.p = dropout
| 4,890 | 35.5 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/decoders/hierarchical_decoder.py
|
"""Same as normal RNNDecoder but using hierarchical attention"""
import torch
from .decoder import RNNDecoderBase
from ..modules import HierarchicalAttention
from ..models.stacked_rnn import StackedLSTM, StackedGRU
from ..utils.rnn_factory import rnn_factory
from ..utils.misc import aeq, nwise, sequence_mask
from torch.nn.utils.rnn import pad_sequence
import onmt
class ContainsNaN(Exception):
pass
def _check_for_nan(tensor):
if (tensor!=tensor).any():
raise ContainsNaN
class HierarchicalRNNDecoder(RNNDecoderBase):
"""Input feeding based decoder.
See :class:`~onmt.decoders.decoder.RNNDecoderBase` for options.
Based around the input feeding approach from
"Effective Approaches to Attention-based Neural Machine Translation"
:cite:`Luong2015`
.. mermaid::
graph BT
A[Input n-1]
AB[Input n]
subgraph RNN
E[Pos n-1]
F[Pos n]
E --> F
end
G[Encoder]
H[memory_bank n-1]
A --> E
AB --> F
E --> H
G --> H
"""
def __init__(self, rnn_type, bidirectional_encoder, num_layers,
hidden_size, attn_type="general", attn_func="softmax",
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=0.0, embeddings=None,
reuse_copy_attn=False, copy_attn_type="general", use_pos=True):
super(RNNDecoderBase, self).__init__(
attentional=attn_type != "none" and attn_type is not None)
assert not coverage_attn
self.ent_size = onmt.ENT_SIZE
self.bidirectional_encoder = bidirectional_encoder
self.num_layers = num_layers
if isinstance(hidden_size, tuple):
self.hidden_size = hidden_size[0]
self.units_size = hidden_size[1]
elif isinstance(hidden_size, int):
self.hidden_size = hidden_size
self.units_size = hidden_size
else:
raise ValueError('hidden_size should be one or two ints')
self.embeddings = embeddings
self.dropout = torch.nn.Dropout(dropout)
# Decoder state
self.state = {}
# Build the RNN.
self.rnn_type = rnn_type
self.num_layers = num_layers
self.rnn = self._build_rnn(rnn_type=self.rnn_type,
input_size=self._input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
dropout=dropout)
# Set up the context gate.
self.context_gate = None
if context_gate is not None:
self.context_gate = context_gate_factory(
context_gate, self._input_size,
self.hidden_size, self.hidden_size, self.hidden_size
)
# Set up the standard attention.
self._coverage = coverage_attn
if not self.attentional:
if self._coverage:
raise ValueError("Cannot use coverage term with no attention.")
self.attn = None
else:
self.attn = HierarchicalAttention(
(self.hidden_size, self.units_size),
coverage=coverage_attn, use_pos=use_pos,
attn_type=attn_type, attn_func=attn_func)
if copy_attn and not reuse_copy_attn:
if copy_attn_type == "none" or copy_attn_type is None:
raise ValueError(
"Cannot use copy_attn with copy_attn_type none")
self.copy_attn = HierarchicalAttention(
(self.hidden_size, self.units_size),
attn_type=copy_attn_type, attn_func=attn_func,
use_pos=use_pos)
else:
self.copy_attn = None
self._reuse_copy_attn = reuse_copy_attn and copy_attn
if self._reuse_copy_attn and not self.attentional:
raise ValueError("Cannot reuse copy attention with no attention.")
def init_state(self, src, memory_bank, encoder_final):
"""
Here we initialize the hidden state of the hierarchical_decoder
This function only works with the hierarchical_transformer.
encoder_final is [1, bsz, dim]. We need to:
- convert it to a tuple if decoder_rnn is LSTM
- Duplicate it to mimick a multi-layer encoder
"""
hidden = encoder_final.repeat(self.num_layers, 1, 1)
self.state["hidden"] = (hidden, hidden) if self.rnn_type == "LSTM" else (hidden, )
# Init the input feed.
batch_size = self.state["hidden"][0].size(1)
h_size = (batch_size, self.hidden_size)
self.state["input_feed"] = \
self.state["hidden"][0].data.new(*h_size).zero_().unsqueeze(0)
self.state["coverage"] = None
# super().init_state(src, memory_bank, encoder_final)
# num_dirs = 2 if self.bidirectional_encoder else 1
# def f(hidden):
# # The encoder hidden is (layers*directions) x batch x dim
# tmp_dim, bsz, dim = hidden.shape
# hidden = hidden.view(-1, num_dirs, bsz, dim)
# num_layers = hidden.size(0)
# delta = num_layers - self.num_layers
# if delta > 0:
# return hidden[delta:, ...].view(-1, bsz, dim)
# elif delta < 0:
# for _ in range(delta):
# hidden = torch.cat((hidden, hidden[-1].unsqueeze(0)), dim=0)
# return hidden.view(-1, bsz, dim)
# return hidden.view(-1, bsz, dim)
# if isinstance(encoder_final, tuple):
# hidden = tuple(f(h) for h in encoder_final)
# else:
# hidden = f(encoder_final)
@classmethod
def from_opt(cls, opt, embeddings, dims=None):
"""Alternate constructor."""
"""
dims are the dimention of the table embeddings
It is a tuple of size two (dim_value, dim_pos)
"""
if dims is None:
dims = opt.dec_rnn_size
return cls(
rnn_type=opt.rnn_type,
bidirectional_encoder=opt.brnn,
num_layers=opt.dec_layers,
hidden_size=dims,
attn_type=opt.global_attention,
attn_func=opt.global_attention_function,
coverage_attn=opt.coverage_attn,
context_gate=opt.context_gate,
copy_attn=opt.copy_attn,
dropout=opt.dropout[0] if type(opt.dropout) is list
else opt.dropout,
embeddings=embeddings,
reuse_copy_attn=opt.reuse_copy_attn,
copy_attn_type=opt.copy_attn_type,
use_pos=opt.use_pos)
def _run_forward_pass(self, tgt, memory_bank, memory_lengths=None):
"""
memory_bank is a tuple (chunks, units, pos_embs, unit_mask, chunk_mask)
"""
# Additional args check.
input_feed = self.state["input_feed"].squeeze(0)
input_feed_batch, _ = input_feed.size()
tgt_len, tgt_batch, _ = tgt.size()
aeq(tgt_batch, input_feed_batch)
# END Additional args check.
dec_outs = []
attns = dict()
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
dec_state = self.state["hidden"]
coverage = self.state["coverage"].squeeze(0) \
if self.state["coverage"] is not None else None
# Input feed concatenates hidden state with
# input at every time step.
for emb_t in emb.split(1):
decoder_input = torch.cat([emb_t.squeeze(0), input_feed], 1)
rnn_output, dec_state = self.rnn(decoder_input, dec_state)
# If the RNN has several layers, we only use the last one to compute
# the attention scores. In pytorch, the outs of the rnn are:
# - rnn_output [seq_len, bsz, n-directions * hidden_size]
# - dec_state [n-layers * n-directions, bsz, hidden_size] * 2
# We unpack the rnn_output on dim 2 and keep the last layer
if self.attentional:
decoder_output, ret = self.attn(
rnn_output,
memory_bank)
for postfix, tensor in ret.items():
key = 'std' + postfix
attns.setdefault(key, list())
attns[key].append(tensor)
else:
decoder_output = rnn_output
if self.context_gate is not None:
# TODO: context gate should be employed
# instead of second RNN transform.
decoder_output = self.context_gate(
decoder_input, rnn_output, decoder_output
)
decoder_output = self.dropout(decoder_output)
input_feed = decoder_output
dec_outs += [decoder_output]
# Update the coverage attention.
if self._coverage:
coverage = p_attn if coverage is None else p_attn + coverage
attns.setdefault('coverage', list())
attns['coverage'].append(coverage)
if self.copy_attn is not None:
_, copy_attn = self.copy_attn(
decoder_output, memory_bank)
for postfix, tensor in copy_attn.items():
key = 'copy' + postfix
attns.setdefault(key, list())
attns[key].append(tensor)
# this trick should save memory because torch.stack creates a new
# object.
for key in list(attns):
if key.startswith('std'):
attns[key] = torch.stack(attns[key])
if self._reuse_copy_attn:
attns[key.replace('std', 'copy')] = attns[key]
return dec_state, dec_outs, attns
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
assert rnn_type != "SRU", "SRU doesn't support input feed! " \
"Please set -input_feed 0!"
stacked_cell = StackedLSTM if rnn_type == "LSTM" else StackedGRU
return stacked_cell(num_layers, input_size, hidden_size, dropout)
@property
def _input_size(self):
"""Using input feed by concatenating input with attention vectors."""
return self.embeddings.embedding_size + self.hidden_size
def update_dropout(self, dropout):
self.dropout.p = dropout
self.rnn.dropout.p = dropout
self.embeddings.update_dropout(dropout)
| 10,819 | 36.439446 | 90 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_copy_generator.py
|
import unittest
from onmt.modules.copy_generator import CopyGenerator, CopyGeneratorLoss
import itertools
from copy import deepcopy
import torch
from torch.nn.functional import softmax
from onmt.tests.utils_for_tests import product_dict
class TestCopyGenerator(unittest.TestCase):
INIT_CASES = list(product_dict(
input_size=[172],
output_size=[319],
pad_idx=[0, 39],
))
PARAMS = list(product_dict(
batch_size=[1, 14],
max_seq_len=[23],
tgt_max_len=[50],
n_extra_words=[107]
))
@classmethod
def dummy_inputs(cls, params, init_case):
hidden = torch.randn((params["batch_size"] * params["tgt_max_len"],
init_case["input_size"]))
attn = torch.randn((params["batch_size"] * params["tgt_max_len"],
params["max_seq_len"]))
src_map = torch.randn((params["max_seq_len"], params["batch_size"],
params["n_extra_words"]))
return hidden, attn, src_map
@classmethod
def expected_shape(cls, params, init_case):
return params["tgt_max_len"] * params["batch_size"], \
init_case["output_size"] + params["n_extra_words"]
def test_copy_gen_forward_shape(self):
for params, init_case in itertools.product(
self.PARAMS, self.INIT_CASES):
cgen = CopyGenerator(**init_case)
dummy_in = self.dummy_inputs(params, init_case)
res = cgen(*dummy_in)
expected_shape = self.expected_shape(params, init_case)
self.assertEqual(res.shape, expected_shape, init_case.__str__())
def test_copy_gen_outp_has_no_prob_of_pad(self):
for params, init_case in itertools.product(
self.PARAMS, self.INIT_CASES):
cgen = CopyGenerator(**init_case)
dummy_in = self.dummy_inputs(params, init_case)
res = cgen(*dummy_in)
self.assertTrue(
res[:, init_case["pad_idx"]].allclose(torch.tensor(0.0)))
def test_copy_gen_trainable_params_update(self):
for params, init_case in itertools.product(
self.PARAMS, self.INIT_CASES):
cgen = CopyGenerator(**init_case)
trainable_params = {n: p for n, p in cgen.named_parameters()
if p.requires_grad}
assert len(trainable_params) > 0 # sanity check
old_weights = deepcopy(trainable_params)
dummy_in = self.dummy_inputs(params, init_case)
res = cgen(*dummy_in)
pretend_loss = res.sum()
pretend_loss.backward()
dummy_optim = torch.optim.SGD(trainable_params.values(), 1)
dummy_optim.step()
for param_name in old_weights.keys():
self.assertTrue(
trainable_params[param_name]
.ne(old_weights[param_name]).any(),
param_name + " " + init_case.__str__())
class TestCopyGeneratorLoss(unittest.TestCase):
INIT_CASES = list(product_dict(
vocab_size=[172],
unk_index=[0, 39],
ignore_index=[1, 17], # pad idx
force_copy=[True, False]
))
PARAMS = list(product_dict(
batch_size=[1, 14],
tgt_max_len=[50],
n_extra_words=[107]
))
@classmethod
def dummy_inputs(cls, params, init_case):
n_unique_src_words = 13
scores = torch.randn((params["batch_size"] * params["tgt_max_len"],
init_case["vocab_size"] + n_unique_src_words))
scores = softmax(scores, dim=1)
align = torch.randint(0, n_unique_src_words,
(params["batch_size"] * params["tgt_max_len"],))
target = torch.randint(0, init_case["vocab_size"],
(params["batch_size"] * params["tgt_max_len"],))
target[0] = init_case["unk_index"]
target[1] = init_case["ignore_index"]
return scores, align, target
@classmethod
def expected_shape(cls, params, init_case):
return (params["batch_size"] * params["tgt_max_len"],)
def test_copy_loss_forward_shape(self):
for params, init_case in itertools.product(
self.PARAMS, self.INIT_CASES):
loss = CopyGeneratorLoss(**init_case)
dummy_in = self.dummy_inputs(params, init_case)
res = loss(*dummy_in)
expected_shape = self.expected_shape(params, init_case)
self.assertEqual(res.shape, expected_shape, init_case.__str__())
def test_copy_loss_ignore_index_is_ignored(self):
for params, init_case in itertools.product(
self.PARAMS, self.INIT_CASES):
loss = CopyGeneratorLoss(**init_case)
scores, align, target = self.dummy_inputs(params, init_case)
res = loss(scores, align, target)
should_be_ignored = (target == init_case["ignore_index"]).nonzero()
assert len(should_be_ignored) > 0 # otherwise not testing anything
self.assertTrue(res[should_be_ignored].allclose(torch.tensor(0.0)))
def test_copy_loss_output_range_is_positive(self):
for params, init_case in itertools.product(
self.PARAMS, self.INIT_CASES):
loss = CopyGeneratorLoss(**init_case)
dummy_in = self.dummy_inputs(params, init_case)
res = loss(*dummy_in)
self.assertTrue((res >= 0).all())
| 5,518 | 39.284672 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_text_dataset.py
|
import unittest
from onmt.inputters.text_dataset import TextMultiField, TextDataReader
import itertools
import os
from copy import deepcopy
from torchtext.data import Field
from onmt.tests.utils_for_tests import product_dict
class TestTextMultiField(unittest.TestCase):
INIT_CASES = list(product_dict(
base_name=["base_field", "zbase_field"],
base_field=[Field],
feats_fields=[
[],
[("a", Field)],
[("r", Field), ("b", Field)]]))
PARAMS = list(product_dict(
include_lengths=[False, True]))
@classmethod
def initialize_case(cls, init_case, params):
# initialize fields at the top of each unit test to prevent
# any undesired stateful effects
case = deepcopy(init_case)
case["base_field"] = case["base_field"](
include_lengths=params["include_lengths"])
for i, (n, f_cls) in enumerate(case["feats_fields"]):
case["feats_fields"][i] = (n, f_cls(sequential=True))
return case
def test_process_shape(self):
dummy_input_bs_1 = [[
["this", "is", "for", "the", "unittest"],
["NOUN", "VERB", "PREP", "ART", "NOUN"],
["", "", "", "", "MODULE"]]]
dummy_input_bs_5 = [
[["this", "is", "for", "the", "unittest"],
["NOUN", "VERB", "PREP", "ART", "NOUN"],
["", "", "", "", "MODULE"]],
[["batch", "2"],
["NOUN", "NUM"],
["", ""]],
[["batch", "3", "is", "the", "longest", "batch"],
["NOUN", "NUM", "VERB", "ART", "ADJ", "NOUN"],
["", "", "", "", "", ""]],
[["fourth", "batch"],
["ORD", "NOUN"],
["", ""]],
[["and", "another", "one"],
["CONJ", "?", "NUM"],
["", "", ""]]]
for bs, max_len, dummy_input in [
(1, 5, dummy_input_bs_1), (5, 6, dummy_input_bs_5)]:
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
init_case = self.initialize_case(init_case, params)
mf = TextMultiField(**init_case)
fields = [init_case["base_field"]] \
+ [f for _, f in init_case["feats_fields"]]
nfields = len(fields)
for i, f in enumerate(fields):
all_sents = [b[i] for b in dummy_input]
f.build_vocab(all_sents)
inp_only_desired_fields = [b[:nfields] for b in dummy_input]
data = mf.process(inp_only_desired_fields)
if params["include_lengths"]:
data, lengths = data
self.assertEqual(lengths.shape, (bs,))
expected_shape = (max_len, bs, nfields)
self.assertEqual(data.shape, expected_shape)
def test_preprocess_shape(self):
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
init_case = self.initialize_case(init_case, params)
mf = TextMultiField(**init_case)
sample_str = "dummy input here ."
proc = mf.preprocess(sample_str)
self.assertEqual(len(proc), len(init_case["feats_fields"]) + 1)
def test_base_field(self):
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
init_case = self.initialize_case(init_case, params)
mf = TextMultiField(**init_case)
self.assertIs(mf.base_field, init_case["base_field"])
def test_correct_n_fields(self):
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
init_case = self.initialize_case(init_case, params)
mf = TextMultiField(**init_case)
self.assertEqual(len(mf.fields),
len(init_case["feats_fields"]) + 1)
def test_fields_order_correct(self):
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
init_case = self.initialize_case(init_case, params)
mf = TextMultiField(**init_case)
fnames = [name for name, _ in init_case["feats_fields"]]
correct_order = [init_case["base_name"]] + list(sorted(fnames))
self.assertEqual([name for name, _ in mf.fields], correct_order)
def test_getitem_0_returns_correct_field(self):
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
init_case = self.initialize_case(init_case, params)
mf = TextMultiField(**init_case)
self.assertEqual(mf[0][0], init_case["base_name"])
self.assertIs(mf[0][1], init_case["base_field"])
def test_getitem_nonzero_returns_correct_field(self):
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
init_case = self.initialize_case(init_case, params)
mf = TextMultiField(**init_case)
fnames = [name for name, _ in init_case["feats_fields"]]
if len(fnames) > 0:
ordered_names = list(sorted(fnames))
name2field = dict(init_case["feats_fields"])
for i, name in enumerate(ordered_names, 1):
expected_field = name2field[name]
self.assertIs(mf[i][1], expected_field)
def test_getitem_has_correct_number_of_indexes(self):
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
init_case = self.initialize_case(init_case, params)
mf = TextMultiField(**init_case)
nfields = len(init_case["feats_fields"]) + 1
with self.assertRaises(IndexError):
mf[nfields]
class TestTextDataReader(unittest.TestCase):
def test_read(self):
strings = [
"hello world".encode("utf-8"),
"this's a string with punctuation .".encode("utf-8"),
"ThIs Is A sTrInG wItH oDD CapitALIZAtion".encode("utf-8")
]
rdr = TextDataReader()
for i, ex in enumerate(rdr.read(strings, "src")):
self.assertEqual(ex["src"], strings[i].decode("utf-8"))
class TestTextDataReaderFromFS(unittest.TestCase):
# this test touches the file system, so it could be considered an
# integration test
STRINGS = [
"hello world\n".encode("utf-8"),
"this's a string with punctuation . \n".encode("utf-8"),
"ThIs Is A sTrInG wItH oDD CapitALIZAtion\n".encode("utf-8")
]
FILE_NAME = "test_strings.txt"
@classmethod
def setUpClass(cls):
# write utf-8 bytes
with open(cls.FILE_NAME, "wb") as f:
for str_ in cls.STRINGS:
f.write(str_)
@classmethod
def tearDownClass(cls):
os.remove(cls.FILE_NAME)
def test_read(self):
rdr = TextDataReader()
for i, ex in enumerate(rdr.read(self.FILE_NAME, "src")):
self.assertEqual(ex["src"], self.STRINGS[i].decode("utf-8"))
| 7,251 | 39.741573 | 76 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_simple.py
|
import onmt
def test_load():
onmt
pass
| 49 | 6.142857 | 16 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_beam_search.py
|
import unittest
from onmt.translate.beam_search import BeamSearch, GNMTGlobalScorer
from copy import deepcopy
import torch
class GlobalScorerStub(object):
alpha = 0
beta = 0
def __init__(self):
self.length_penalty = lambda x, alpha: 1.
self.cov_penalty = lambda cov, beta: torch.zeros(
(1, cov.shape[-2]), device=cov.device, dtype=torch.float)
self.has_cov_pen = False
self.has_len_pen = False
def update_global_state(self, beam):
pass
def score(self, beam, scores):
return scores
class TestBeamSearch(unittest.TestCase):
BLOCKED_SCORE = -10e20
def test_advance_with_all_repeats_gets_blocked(self):
# all beams repeat (beam >= 1 repeat dummy scores)
beam_sz = 5
n_words = 100
repeat_idx = 47
ngram_repeat = 3
device_init = torch.zeros(1, 1)
for batch_sz in [1, 3]:
beam = BeamSearch(
beam_sz, batch_sz, 0, 1, 2, 2,
GlobalScorerStub(), 0, 30,
False, ngram_repeat, set(),
False, 0.)
beam.initialize(device_init, torch.randint(0, 30, (batch_sz,)))
for i in range(ngram_repeat + 4):
# predict repeat_idx over and over again
word_probs = torch.full(
(batch_sz * beam_sz, n_words), -float('inf'))
word_probs[0::beam_sz, repeat_idx] = 0
attns = torch.randn(1, batch_sz * beam_sz, 53)
beam.advance(word_probs, attns)
if i < ngram_repeat:
# before repeat, scores are either 0 or -inf
expected_scores = torch.tensor(
[0] + [-float('inf')] * (beam_sz - 1))\
.repeat(batch_sz, 1)
self.assertTrue(beam.topk_log_probs.equal(expected_scores))
elif i % ngram_repeat == 0:
# on repeat, `repeat_idx` score is BLOCKED_SCORE
# (but it's still the best score, thus we have
# [BLOCKED_SCORE, -inf, -inf, -inf, -inf]
expected_scores = torch.tensor(
[0] + [-float('inf')] * (beam_sz - 1))\
.repeat(batch_sz, 1)
expected_scores[:, 0] = self.BLOCKED_SCORE
self.assertTrue(beam.topk_log_probs.equal(expected_scores))
else:
# repetitions keeps maximizing score
# index 0 has been blocked, so repeating=>+0.0 score
# other indexes are -inf so repeating=>BLOCKED_SCORE
# which is higher
expected_scores = torch.tensor(
[0] + [-float('inf')] * (beam_sz - 1))\
.repeat(batch_sz, 1)
expected_scores[:, :] = self.BLOCKED_SCORE
expected_scores = torch.tensor(
self.BLOCKED_SCORE).repeat(batch_sz, beam_sz)
def test_advance_with_some_repeats_gets_blocked(self):
# beam 0 and beam >=2 will repeat (beam >= 2 repeat dummy scores)
beam_sz = 5
n_words = 100
repeat_idx = 47
ngram_repeat = 3
no_repeat_score = -2.3
repeat_score = -0.1
device_init = torch.zeros(1, 1)
for batch_sz in [1, 3]:
beam = BeamSearch(
beam_sz, batch_sz, 0, 1, 2, 2,
GlobalScorerStub(), 0, 30,
False, ngram_repeat, set(),
False, 0.)
beam.initialize(device_init, torch.randint(0, 30, (batch_sz,)))
for i in range(ngram_repeat + 4):
# non-interesting beams are going to get dummy values
word_probs = torch.full(
(batch_sz * beam_sz, n_words), -float('inf'))
if i == 0:
# on initial round, only predicted scores for beam 0
# matter. Make two predictions. Top one will be repeated
# in beam zero, second one will live on in beam 1.
word_probs[0::beam_sz, repeat_idx] = repeat_score
word_probs[0::beam_sz, repeat_idx +
i + 1] = no_repeat_score
else:
# predict the same thing in beam 0
word_probs[0::beam_sz, repeat_idx] = 0
# continue pushing around what beam 1 predicts
word_probs[1::beam_sz, repeat_idx + i + 1] = 0
attns = torch.randn(1, batch_sz * beam_sz, 53)
beam.advance(word_probs, attns)
if i < ngram_repeat:
self.assertFalse(
beam.topk_log_probs[0::beam_sz].eq(
self.BLOCKED_SCORE).any())
self.assertFalse(
beam.topk_log_probs[1::beam_sz].eq(
self.BLOCKED_SCORE).any())
elif i == ngram_repeat:
# now beam 0 dies (along with the others), beam 1 -> beam 0
self.assertFalse(
beam.topk_log_probs[:, 0].eq(
self.BLOCKED_SCORE).any())
expected = torch.full([batch_sz, beam_sz], float("-inf"))
expected[:, 0] = no_repeat_score
expected[:, 1] = self.BLOCKED_SCORE
self.assertTrue(
beam.topk_log_probs[:, :].equal(expected))
else:
# now beam 0 dies (along with the others), beam 1 -> beam 0
self.assertFalse(
beam.topk_log_probs[:, 0].eq(
self.BLOCKED_SCORE).any())
expected = torch.full([batch_sz, beam_sz], float("-inf"))
expected[:, 0] = no_repeat_score
expected[:, 1:] = self.BLOCKED_SCORE
self.assertTrue(
beam.topk_log_probs.equal(expected))
def test_repeating_excluded_index_does_not_die(self):
# beam 0 and beam >= 2 will repeat (beam 2 repeats excluded idx)
beam_sz = 5
n_words = 100
repeat_idx = 47 # will be repeated and should be blocked
repeat_idx_ignored = 7 # will be repeated and should not be blocked
ngram_repeat = 3
device_init = torch.zeros(1, 1)
for batch_sz in [1, 3]:
beam = BeamSearch(
beam_sz, batch_sz, 0, 1, 2, 2,
GlobalScorerStub(), 0, 30,
False, ngram_repeat, {repeat_idx_ignored},
False, 0.)
beam.initialize(device_init, torch.randint(0, 30, (batch_sz,)))
for i in range(ngram_repeat + 4):
# non-interesting beams are going to get dummy values
word_probs = torch.full(
(batch_sz * beam_sz, n_words), -float('inf'))
if i == 0:
word_probs[0::beam_sz, repeat_idx] = -0.1
word_probs[0::beam_sz, repeat_idx + i + 1] = -2.3
word_probs[0::beam_sz, repeat_idx_ignored] = -5.0
else:
# predict the same thing in beam 0
word_probs[0::beam_sz, repeat_idx] = 0
# continue pushing around what beam 1 predicts
word_probs[1::beam_sz, repeat_idx + i + 1] = 0
# predict the allowed-repeat again in beam 2
word_probs[2::beam_sz, repeat_idx_ignored] = 0
attns = torch.randn(1, batch_sz * beam_sz, 53)
beam.advance(word_probs, attns)
if i < ngram_repeat:
self.assertFalse(beam.topk_log_probs[:, 0].eq(
self.BLOCKED_SCORE).any())
self.assertFalse(beam.topk_log_probs[:, 1].eq(
self.BLOCKED_SCORE).any())
self.assertFalse(beam.topk_log_probs[:, 2].eq(
self.BLOCKED_SCORE).any())
else:
# now beam 0 dies, beam 1 -> beam 0, beam 2 -> beam 1
# and the rest die
self.assertFalse(beam.topk_log_probs[:, 0].eq(
self.BLOCKED_SCORE).any())
# since all preds after i=0 are 0, we can check
# that the beam is the correct idx by checking that
# the curr score is the initial score
self.assertTrue(beam.topk_log_probs[:, 0].eq(-2.3).all())
self.assertFalse(beam.topk_log_probs[:, 1].eq(
self.BLOCKED_SCORE).all())
self.assertTrue(beam.topk_log_probs[:, 1].eq(-5.0).all())
self.assertTrue(beam.topk_log_probs[:, 2].eq(
self.BLOCKED_SCORE).all())
def test_doesnt_predict_eos_if_shorter_than_min_len(self):
# beam 0 will always predict EOS. The other beams will predict
# non-eos scores.
for batch_sz in [1, 3]:
beam_sz = 5
n_words = 100
_non_eos_idxs = [47, 51, 13, 88, 99]
valid_score_dist = torch.log_softmax(torch.tensor(
[6., 5., 4., 3., 2., 1.]), dim=0)
min_length = 5
eos_idx = 2
lengths = torch.randint(0, 30, (batch_sz,))
beam = BeamSearch(beam_sz, batch_sz, 0, 1, 2, 2,
GlobalScorerStub(),
min_length, 30, False, 0, set(),
False, 0.)
device_init = torch.zeros(1, 1)
beam.initialize(device_init, lengths)
all_attns = []
for i in range(min_length + 4):
# non-interesting beams are going to get dummy values
word_probs = torch.full(
(batch_sz * beam_sz, n_words), -float('inf'))
if i == 0:
# "best" prediction is eos - that should be blocked
word_probs[0::beam_sz, eos_idx] = valid_score_dist[0]
# include at least beam_sz predictions OTHER than EOS
# that are greater than -1e20
for j, score in zip(_non_eos_idxs, valid_score_dist[1:]):
word_probs[0::beam_sz, j] = score
else:
# predict eos in beam 0
word_probs[0::beam_sz, eos_idx] = valid_score_dist[0]
# provide beam_sz other good predictions
for k, (j, score) in enumerate(
zip(_non_eos_idxs, valid_score_dist[1:])):
beam_idx = min(beam_sz - 1, k)
word_probs[beam_idx::beam_sz, j] = score
attns = torch.randn(1, batch_sz * beam_sz, 53)
all_attns.append(attns)
beam.advance(word_probs, attns)
if i < min_length:
expected_score_dist = \
(i + 1) * valid_score_dist[1:].unsqueeze(0)
self.assertTrue(
beam.topk_log_probs.allclose(
expected_score_dist))
elif i == min_length:
# now the top beam has ended and no others have
self.assertTrue(beam.is_finished[:, 0].eq(1).all())
self.assertTrue(beam.is_finished[:, 1:].eq(0).all())
else: # i > min_length
# not of interest, but want to make sure it keeps running
# since only beam 0 terminates and n_best = 2
pass
def test_beam_is_done_when_n_best_beams_eos_using_min_length(self):
# this is also a test that when block_ngram_repeat=0,
# repeating is acceptable
beam_sz = 5
batch_sz = 3
n_words = 100
_non_eos_idxs = [47, 51, 13, 88, 99]
valid_score_dist = torch.log_softmax(torch.tensor(
[6., 5., 4., 3., 2., 1.]), dim=0)
min_length = 5
eos_idx = 2
beam = BeamSearch(
beam_sz, batch_sz, 0, 1, 2, 2,
GlobalScorerStub(),
min_length, 30, False, 0, set(),
False, 0.)
device_init = torch.zeros(1, 1)
beam.initialize(device_init, torch.randint(0, 30, (batch_sz,)))
for i in range(min_length + 4):
# non-interesting beams are going to get dummy values
word_probs = torch.full(
(batch_sz * beam_sz, n_words), -float('inf'))
if i == 0:
# "best" prediction is eos - that should be blocked
word_probs[0::beam_sz, eos_idx] = valid_score_dist[0]
# include at least beam_sz predictions OTHER than EOS
# that are greater than -1e20
for j, score in zip(_non_eos_idxs, valid_score_dist[1:]):
word_probs[0::beam_sz, j] = score
elif i <= min_length:
# predict eos in beam 1
word_probs[1::beam_sz, eos_idx] = valid_score_dist[0]
# provide beam_sz other good predictions in other beams
for k, (j, score) in enumerate(
zip(_non_eos_idxs, valid_score_dist[1:])):
beam_idx = min(beam_sz - 1, k)
word_probs[beam_idx::beam_sz, j] = score
else:
word_probs[0::beam_sz, eos_idx] = valid_score_dist[0]
word_probs[1::beam_sz, eos_idx] = valid_score_dist[0]
# provide beam_sz other good predictions in other beams
for k, (j, score) in enumerate(
zip(_non_eos_idxs, valid_score_dist[1:])):
beam_idx = min(beam_sz - 1, k)
word_probs[beam_idx::beam_sz, j] = score
attns = torch.randn(1, batch_sz * beam_sz, 53)
beam.advance(word_probs, attns)
if i < min_length:
self.assertFalse(beam.done)
elif i == min_length:
# beam 1 dies on min_length
self.assertTrue(beam.is_finished[:, 1].all())
beam.update_finished()
self.assertFalse(beam.done)
else: # i > min_length
# beam 0 dies on the step after beam 1 dies
self.assertTrue(beam.is_finished[:, 0].all())
beam.update_finished()
self.assertTrue(beam.done)
def test_beam_returns_attn_with_correct_length(self):
beam_sz = 5
batch_sz = 3
n_words = 100
_non_eos_idxs = [47, 51, 13, 88, 99]
valid_score_dist = torch.log_softmax(torch.tensor(
[6., 5., 4., 3., 2., 1.]), dim=0)
min_length = 5
eos_idx = 2
inp_lens = torch.randint(1, 30, (batch_sz,))
beam = BeamSearch(
beam_sz, batch_sz, 0, 1, 2, 2,
GlobalScorerStub(),
min_length, 30, True, 0, set(),
False, 0.)
device_init = torch.zeros(1, 1)
_, _, inp_lens, _ = beam.initialize(device_init, inp_lens)
# inp_lens is tiled in initialize, reassign to make attn match
for i in range(min_length + 2):
# non-interesting beams are going to get dummy values
word_probs = torch.full(
(batch_sz * beam_sz, n_words), -float('inf'))
if i == 0:
# "best" prediction is eos - that should be blocked
word_probs[0::beam_sz, eos_idx] = valid_score_dist[0]
# include at least beam_sz predictions OTHER than EOS
# that are greater than -1e20
for j, score in zip(_non_eos_idxs, valid_score_dist[1:]):
word_probs[0::beam_sz, j] = score
elif i <= min_length:
# predict eos in beam 1
word_probs[1::beam_sz, eos_idx] = valid_score_dist[0]
# provide beam_sz other good predictions in other beams
for k, (j, score) in enumerate(
zip(_non_eos_idxs, valid_score_dist[1:])):
beam_idx = min(beam_sz - 1, k)
word_probs[beam_idx::beam_sz, j] = score
else:
word_probs[0::beam_sz, eos_idx] = valid_score_dist[0]
word_probs[1::beam_sz, eos_idx] = valid_score_dist[0]
# provide beam_sz other good predictions in other beams
for k, (j, score) in enumerate(
zip(_non_eos_idxs, valid_score_dist[1:])):
beam_idx = min(beam_sz - 1, k)
word_probs[beam_idx::beam_sz, j] = score
attns = torch.randn(1, batch_sz * beam_sz, 53)
beam.advance(word_probs, attns)
if i < min_length:
self.assertFalse(beam.done)
# no top beams are finished yet
for b in range(batch_sz):
self.assertEqual(beam.attention[b], [])
elif i == min_length:
# beam 1 dies on min_length
self.assertTrue(beam.is_finished[:, 1].all())
beam.update_finished()
self.assertFalse(beam.done)
# no top beams are finished yet
for b in range(batch_sz):
self.assertEqual(beam.attention[b], [])
else: # i > min_length
# beam 0 dies on the step after beam 1 dies
self.assertTrue(beam.is_finished[:, 0].all())
beam.update_finished()
self.assertTrue(beam.done)
# top beam is finished now so there are attentions
for b in range(batch_sz):
# two beams are finished in each batch
self.assertEqual(len(beam.attention[b]), 2)
for k in range(2):
# second dim is cut down to the non-padded src length
self.assertEqual(beam.attention[b][k].shape[-1],
inp_lens[b])
# first dim is equal to the time of death
# (beam 0 died at current step - adjust for SOS)
self.assertEqual(beam.attention[b][0].shape[0], i + 1)
# (beam 1 died at last step - adjust for SOS)
self.assertEqual(beam.attention[b][1].shape[0], i)
# behavior gets weird when beam is already done so just stop
break
class TestBeamSearchAgainstReferenceCase(unittest.TestCase):
# this is just test_beam.TestBeamAgainstReferenceCase repeated
# in each batch.
BEAM_SZ = 5
EOS_IDX = 2 # don't change this - all the scores would need updated
N_WORDS = 8 # also don't change for same reason
N_BEST = 3
DEAD_SCORE = -1e20
BATCH_SZ = 3
INP_SEQ_LEN = 53
def random_attn(self):
return torch.randn(1, self.BATCH_SZ * self.BEAM_SZ, self.INP_SEQ_LEN)
def init_step(self, beam, expected_len_pen):
# init_preds: [4, 3, 5, 6, 7] - no EOS's
init_scores = torch.log_softmax(torch.tensor(
[[0, 0, 0, 4, 5, 3, 2, 1]], dtype=torch.float), dim=1)
init_scores = deepcopy(init_scores.repeat(
self.BATCH_SZ * self.BEAM_SZ, 1))
new_scores = init_scores + beam.topk_log_probs.view(-1).unsqueeze(1)
expected_beam_scores, expected_preds_0 = new_scores \
.view(self.BATCH_SZ, self.BEAM_SZ * self.N_WORDS) \
.topk(self.BEAM_SZ, dim=-1)
beam.advance(deepcopy(init_scores), self.random_attn())
self.assertTrue(beam.topk_log_probs.allclose(expected_beam_scores))
self.assertTrue(beam.topk_ids.equal(expected_preds_0))
self.assertFalse(beam.is_finished.any())
self.assertFalse(beam.done)
return expected_beam_scores
def first_step(self, beam, expected_beam_scores, expected_len_pen):
# no EOS's yet
assert beam.is_finished.sum() == 0
scores_1 = torch.log_softmax(torch.tensor(
[[0, 0, 0, .3, 0, .51, .2, 0],
[0, 0, 1.5, 0, 0, 0, 0, 0],
[0, 0, 0, 0, .49, .48, 0, 0],
[0, 0, 0, .2, .2, .2, .2, .2],
[0, 0, 0, .2, .2, .2, .2, .2]]
), dim=1)
scores_1 = scores_1.repeat(self.BATCH_SZ, 1)
beam.advance(deepcopy(scores_1), self.random_attn())
new_scores = scores_1 + expected_beam_scores.view(-1).unsqueeze(1)
expected_beam_scores, unreduced_preds = new_scores\
.view(self.BATCH_SZ, self.BEAM_SZ * self.N_WORDS)\
.topk(self.BEAM_SZ, -1)
expected_bptr_1 = unreduced_preds / self.N_WORDS
# [5, 3, 2, 6, 0], so beam 2 predicts EOS!
expected_preds_1 = unreduced_preds - expected_bptr_1 * self.N_WORDS
self.assertTrue(beam.topk_log_probs.allclose(expected_beam_scores))
self.assertTrue(beam.topk_scores.allclose(
expected_beam_scores / expected_len_pen))
self.assertTrue(beam.topk_ids.equal(expected_preds_1))
self.assertTrue(beam.current_backptr.equal(expected_bptr_1))
self.assertEqual(beam.is_finished.sum(), self.BATCH_SZ)
self.assertTrue(beam.is_finished[:, 2].all()) # beam 2 finished
beam.update_finished()
self.assertFalse(beam.top_beam_finished.any())
self.assertFalse(beam.done)
return expected_beam_scores
def second_step(self, beam, expected_beam_scores, expected_len_pen):
# assumes beam 2 finished on last step
scores_2 = torch.log_softmax(torch.tensor(
[[0, 0, 0, .3, 0, .51, .2, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 5000, .48, 0, 0], # beam 2 shouldn't continue
[0, 0, 50, .2, .2, .2, .2, .2], # beam 3 -> beam 0 should die
[0, 0, 0, .2, .2, .2, .2, .2]]
), dim=1)
scores_2 = scores_2.repeat(self.BATCH_SZ, 1)
beam.advance(deepcopy(scores_2), self.random_attn())
# ended beam 2 shouldn't continue
expected_beam_scores[:, 2::self.BEAM_SZ] = self.DEAD_SCORE
new_scores = scores_2 + expected_beam_scores.view(-1).unsqueeze(1)
expected_beam_scores, unreduced_preds = new_scores\
.view(self.BATCH_SZ, self.BEAM_SZ * self.N_WORDS)\
.topk(self.BEAM_SZ, -1)
expected_bptr_2 = unreduced_preds / self.N_WORDS
# [2, 5, 3, 6, 0] repeat self.BATCH_SZ, so beam 0 predicts EOS!
expected_preds_2 = unreduced_preds - expected_bptr_2 * self.N_WORDS
# [-2.4879, -3.8910, -4.1010, -4.2010, -4.4010] repeat self.BATCH_SZ
self.assertTrue(beam.topk_log_probs.allclose(expected_beam_scores))
self.assertTrue(beam.topk_scores.allclose(
expected_beam_scores / expected_len_pen))
self.assertTrue(beam.topk_ids.equal(expected_preds_2))
self.assertTrue(beam.current_backptr.equal(expected_bptr_2))
# another beam is finished in all batches
self.assertEqual(beam.is_finished.sum(), self.BATCH_SZ)
# new beam 0 finished
self.assertTrue(beam.is_finished[:, 0].all())
# new beam 0 is old beam 3
self.assertTrue(expected_bptr_2[:, 0].eq(3).all())
beam.update_finished()
self.assertTrue(beam.top_beam_finished.all())
self.assertFalse(beam.done)
return expected_beam_scores
def third_step(self, beam, expected_beam_scores, expected_len_pen):
# assumes beam 0 finished on last step
scores_3 = torch.log_softmax(torch.tensor(
[[0, 0, 5000, 0, 5000, .51, .2, 0], # beam 0 shouldn't cont
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 5000, 0, 0],
[0, 0, 0, .2, .2, .2, .2, .2],
[0, 0, 50, 0, .2, .2, .2, .2]] # beam 4 -> beam 1 should die
), dim=1)
scores_3 = scores_3.repeat(self.BATCH_SZ, 1)
beam.advance(deepcopy(scores_3), self.random_attn())
expected_beam_scores[:, 0::self.BEAM_SZ] = self.DEAD_SCORE
new_scores = scores_3 + expected_beam_scores.view(-1).unsqueeze(1)
expected_beam_scores, unreduced_preds = new_scores\
.view(self.BATCH_SZ, self.BEAM_SZ * self.N_WORDS)\
.topk(self.BEAM_SZ, -1)
expected_bptr_3 = unreduced_preds / self.N_WORDS
# [5, 2, 6, 1, 0] repeat self.BATCH_SZ, so beam 1 predicts EOS!
expected_preds_3 = unreduced_preds - expected_bptr_3 * self.N_WORDS
self.assertTrue(beam.topk_log_probs.allclose(
expected_beam_scores))
self.assertTrue(beam.topk_scores.allclose(
expected_beam_scores / expected_len_pen))
self.assertTrue(beam.topk_ids.equal(expected_preds_3))
self.assertTrue(beam.current_backptr.equal(expected_bptr_3))
self.assertEqual(beam.is_finished.sum(), self.BATCH_SZ)
# new beam 1 finished
self.assertTrue(beam.is_finished[:, 1].all())
# new beam 1 is old beam 4
self.assertTrue(expected_bptr_3[:, 1].eq(4).all())
beam.update_finished()
self.assertTrue(beam.top_beam_finished.all())
self.assertTrue(beam.done)
return expected_beam_scores
def test_beam_advance_against_known_reference(self):
beam = BeamSearch(
self.BEAM_SZ, self.BATCH_SZ, 0, 1, 2, self.N_BEST,
GlobalScorerStub(),
0, 30, False, 0, set(),
False, 0.)
device_init = torch.zeros(1, 1)
beam.initialize(device_init, torch.randint(0, 30, (self.BATCH_SZ,)))
expected_beam_scores = self.init_step(beam, 1)
expected_beam_scores = self.first_step(beam, expected_beam_scores, 1)
expected_beam_scores = self.second_step(beam, expected_beam_scores, 1)
self.third_step(beam, expected_beam_scores, 1)
class TestBeamWithLengthPenalty(TestBeamSearchAgainstReferenceCase):
# this could be considered an integration test because it tests
# interactions between the GNMT scorer and the beam
def test_beam_advance_against_known_reference(self):
scorer = GNMTGlobalScorer(0.7, 0., "avg", "none")
beam = BeamSearch(
self.BEAM_SZ, self.BATCH_SZ, 0, 1, 2, self.N_BEST,
scorer,
0, 30, False, 0, set(),
False, 0.)
device_init = torch.zeros(1, 1)
beam.initialize(device_init, torch.randint(0, 30, (self.BATCH_SZ,)))
expected_beam_scores = self.init_step(beam, 1.)
expected_beam_scores = self.first_step(beam, expected_beam_scores, 3)
expected_beam_scores = self.second_step(beam, expected_beam_scores, 4)
self.third_step(beam, expected_beam_scores, 5)
| 27,033 | 46.345009 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_translation_server.py
|
import unittest
from onmt.translate.translation_server import ServerModel, TranslationServer
import os
from six import string_types
from textwrap import dedent
import torch
from onmt.translate.translator import Translator
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
class TestServerModel(unittest.TestCase):
def test_deferred_loading_model_and_unload(self):
model_id = 0
opt = {"models": ["test_model.pt"]}
model_root = TEST_DIR
sm = ServerModel(opt, model_id, model_root=model_root, load=False)
self.assertFalse(sm.loaded)
sm.load()
self.assertTrue(sm.loaded)
self.assertIsInstance(sm.translator, Translator)
sm.unload()
self.assertFalse(sm.loaded)
def test_load_model_on_init_and_unload(self):
model_id = 0
opt = {"models": ["test_model.pt"]}
model_root = TEST_DIR
sm = ServerModel(opt, model_id, model_root=model_root, load=True)
self.assertTrue(sm.loaded)
self.assertIsInstance(sm.translator, Translator)
sm.unload()
self.assertFalse(sm.loaded)
def test_tokenizing_with_no_tokenizer_fails(self):
model_id = 0
opt = {"models": ["test_model.pt"]}
model_root = TEST_DIR
sm = ServerModel(opt, model_id, model_root=model_root, load=True)
with self.assertRaises(ValueError):
sm.tokenize("hello world")
def test_detokenizing_with_no_tokenizer_fails(self):
model_id = 0
opt = {"models": ["test_model.pt"]}
model_root = TEST_DIR
sm = ServerModel(opt, model_id, model_root=model_root, load=True)
with self.assertRaises(ValueError):
sm.detokenize("hello world")
if torch.cuda.is_available():
def test_moving_to_gpu_and_back(self):
torch.cuda.set_device(torch.device("cuda", 0))
model_id = 0
opt = {"models": ["test_model.pt"]}
model_root = TEST_DIR
sm = ServerModel(opt, model_id, model_root=model_root, load=True)
for p in sm.translator.model.parameters():
self.assertEqual(p.device.type, "cpu")
sm.to_gpu()
for p in sm.translator.model.parameters():
self.assertEqual(p.device.type, "cuda")
self.assertEqual(p.device.index, 0)
sm.to_cpu()
for p in sm.translator.model.parameters():
self.assertEqual(p.device.type, "cpu")
def test_initialize_on_gpu_and_move_back(self):
torch.cuda.set_device(torch.device("cuda", 0))
model_id = 0
opt = {"models": ["test_model.pt"], "gpu": 0}
model_root = TEST_DIR
sm = ServerModel(opt, model_id, model_root=model_root, load=True)
for p in sm.translator.model.parameters():
self.assertEqual(p.device.type, "cuda")
self.assertEqual(p.device.index, 0)
sm.to_gpu()
for p in sm.translator.model.parameters():
self.assertEqual(p.device.type, "cuda")
self.assertEqual(p.device.index, 0)
sm.to_cpu()
for p in sm.translator.model.parameters():
self.assertEqual(p.device.type, "cpu")
if torch.cuda.device_count() > 1:
def test_initialize_on_nonzero_gpu_and_back(self):
torch.cuda.set_device(torch.device("cuda", 1))
model_id = 0
opt = {"models": ["test_model.pt"], "gpu": 1}
model_root = TEST_DIR
sm = ServerModel(opt, model_id, model_root=model_root,
load=True)
for p in sm.translator.model.parameters():
self.assertEqual(p.device.type, "cuda")
self.assertEqual(p.device.index, 1)
sm.to_gpu()
for p in sm.translator.model.parameters():
self.assertEqual(p.device.type, "cuda")
self.assertEqual(p.device.index, 1)
sm.to_cpu()
for p in sm.translator.model.parameters():
self.assertEqual(p.device.type, "cpu")
def test_run(self):
model_id = 0
opt = {"models": ["test_model.pt"]}
model_root = TEST_DIR
sm = ServerModel(opt, model_id, model_root=model_root, load=True)
inp = [{"src": "hello how are you today"},
{"src": "good morning to you ."}]
results, scores, n_best, time, aligns = sm.run(inp)
self.assertIsInstance(results, list)
for sentence_string in results:
self.assertIsInstance(sentence_string, string_types)
self.assertIsInstance(scores, list)
for elem in scores:
self.assertIsInstance(elem, float)
self.assertIsInstance(aligns, list)
for align_string in aligns:
if align_string is not None:
self.assertIsInstance(align_string, string_types)
self.assertEqual(len(results), len(scores))
self.assertEqual(len(scores), len(inp) * n_best)
self.assertEqual(len(time), 1)
self.assertIsInstance(time, dict)
self.assertIn("translation", time)
class TestTranslationServer(unittest.TestCase):
# this could be considered an integration test because it touches
# the filesystem for the config file (and the models)
CFG_F = os.path.join(
TEST_DIR, "test_translation_server_config_file.json")
def tearDown(self):
if os.path.exists(self.CFG_F):
os.remove(self.CFG_F)
def write(self, cfg):
with open(self.CFG_F, "w") as f:
f.write(cfg)
CFG_NO_LOAD = dedent("""\
{
"models_root": "%s",
"models": [
{
"id": 100,
"model": "test_model.pt",
"timeout": -1,
"on_timeout": "to_cpu",
"load": false,
"opt": {
"beam_size": 5
}
}
]
}
""" % TEST_DIR)
def test_start_without_initial_loading(self):
self.write(self.CFG_NO_LOAD)
sv = TranslationServer()
sv.start(self.CFG_F)
self.assertFalse(sv.models[100].loaded)
self.assertEqual(set(sv.models.keys()), {100})
CFG_LOAD = dedent("""\
{
"models_root": "%s",
"models": [
{
"id": 100,
"model": "test_model.pt",
"timeout": -1,
"on_timeout": "to_cpu",
"load": true,
"opt": {
"beam_size": 5
}
}
]
}
""" % TEST_DIR)
def test_start_with_initial_loading(self):
self.write(self.CFG_LOAD)
sv = TranslationServer()
sv.start(self.CFG_F)
self.assertTrue(sv.models[100].loaded)
self.assertEqual(set(sv.models.keys()), {100})
CFG_2_MODELS = dedent("""\
{
"models_root": "%s",
"models": [
{
"id": 100,
"model": "test_model.pt",
"timeout": -1,
"on_timeout": "to_cpu",
"load": true,
"opt": {
"beam_size": 5
}
},
{
"id": 1000,
"model": "test_model2.pt",
"timeout": -1,
"on_timeout": "to_cpu",
"load": false,
"opt": {
"beam_size": 5
}
}
]
}
""" % TEST_DIR)
def test_start_with_two_models(self):
self.write(self.CFG_2_MODELS)
sv = TranslationServer()
sv.start(self.CFG_F)
self.assertTrue(sv.models[100].loaded)
self.assertFalse(sv.models[1000].loaded)
self.assertEqual(set(sv.models.keys()), {100, 1000})
| 8,233 | 34.339056 | 77 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_greedy_search.py
|
import unittest
from onmt.translate.greedy_search import GreedySearch
import torch
class TestGreedySearch(unittest.TestCase):
BATCH_SZ = 3
INP_SEQ_LEN = 53
DEAD_SCORE = -1e20
BLOCKED_SCORE = -10e20
def test_doesnt_predict_eos_if_shorter_than_min_len(self):
# batch 0 will always predict EOS. The other batches will predict
# non-eos scores.
for batch_sz in [1, 3]:
n_words = 100
_non_eos_idxs = [47]
valid_score_dist = torch.log_softmax(torch.tensor(
[6., 5.]), dim=0)
min_length = 5
eos_idx = 2
lengths = torch.randint(0, 30, (batch_sz,))
samp = GreedySearch(
0, 1, 2, batch_sz, min_length,
False, set(), False, 30, 1., 1)
samp.initialize(torch.zeros(1), lengths)
all_attns = []
for i in range(min_length + 4):
word_probs = torch.full(
(batch_sz, n_words), -float('inf'))
# "best" prediction is eos - that should be blocked
word_probs[0, eos_idx] = valid_score_dist[0]
# include at least one prediction OTHER than EOS
# that is greater than -1e20
word_probs[0, _non_eos_idxs[0]] = valid_score_dist[1]
word_probs[1:, _non_eos_idxs[0] + i] = 0
attns = torch.randn(1, batch_sz, 53)
all_attns.append(attns)
samp.advance(word_probs, attns)
if i < min_length:
self.assertTrue(
samp.topk_scores[0].allclose(valid_score_dist[1]))
self.assertTrue(
samp.topk_scores[1:].eq(0).all())
elif i == min_length:
# now batch 0 has ended and no others have
self.assertTrue(samp.is_finished[0, :].eq(1).all())
self.assertTrue(samp.is_finished[1:, 1:].eq(0).all())
else: # i > min_length
break
def test_returns_correct_scores_deterministic(self):
for batch_sz in [1, 13]:
for temp in [1., 3.]:
n_words = 100
_non_eos_idxs = [47, 51, 13, 88, 99]
valid_score_dist_1 = torch.log_softmax(torch.tensor(
[6., 5., 4., 3., 2., 1.]), dim=0)
valid_score_dist_2 = torch.log_softmax(torch.tensor(
[6., 1.]), dim=0)
eos_idx = 2
lengths = torch.randint(0, 30, (batch_sz,))
samp = GreedySearch(
0, 1, 2, batch_sz, 0,
False, set(), False, 30, temp, 1)
samp.initialize(torch.zeros(1), lengths)
# initial step
i = 0
word_probs = torch.full(
(batch_sz, n_words), -float('inf'))
# batch 0 dies on step 0
word_probs[0, eos_idx] = valid_score_dist_1[0]
# include at least one prediction OTHER than EOS
# that is greater than -1e20
word_probs[0, _non_eos_idxs] = valid_score_dist_1[1:]
word_probs[1:, _non_eos_idxs[0] + i] = 0
attns = torch.randn(1, batch_sz, 53)
samp.advance(word_probs, attns)
self.assertTrue(samp.is_finished[0].eq(1).all())
samp.update_finished()
self.assertEqual(
samp.scores[0], [valid_score_dist_1[0] / temp])
if batch_sz == 1:
self.assertTrue(samp.done)
continue
else:
self.assertFalse(samp.done)
# step 2
i = 1
word_probs = torch.full(
(batch_sz - 1, n_words), -float('inf'))
# (old) batch 8 dies on step 1
word_probs[7, eos_idx] = valid_score_dist_2[0]
word_probs[0:7, _non_eos_idxs[:2]] = valid_score_dist_2
word_probs[8:, _non_eos_idxs[:2]] = valid_score_dist_2
attns = torch.randn(1, batch_sz, 53)
samp.advance(word_probs, attns)
self.assertTrue(samp.is_finished[7].eq(1).all())
samp.update_finished()
self.assertEqual(
samp.scores[8], [valid_score_dist_2[0] / temp])
# step 3
i = 2
word_probs = torch.full(
(batch_sz - 2, n_words), -float('inf'))
# everything dies
word_probs[:, eos_idx] = 0
attns = torch.randn(1, batch_sz, 53)
samp.advance(word_probs, attns)
self.assertTrue(samp.is_finished.eq(1).all())
samp.update_finished()
for b in range(batch_sz):
if b != 0 and b != 8:
self.assertEqual(samp.scores[b], [0])
self.assertTrue(samp.done)
def test_returns_correct_scores_non_deterministic(self):
for batch_sz in [1, 13]:
for temp in [1., 3.]:
n_words = 100
_non_eos_idxs = [47, 51, 13, 88, 99]
valid_score_dist_1 = torch.log_softmax(torch.tensor(
[6., 5., 4., 3., 2., 1.]), dim=0)
valid_score_dist_2 = torch.log_softmax(torch.tensor(
[6., 1.]), dim=0)
eos_idx = 2
lengths = torch.randint(0, 30, (batch_sz,))
samp = GreedySearch(
0, 1, 2, batch_sz, 0,
False, set(), False, 30, temp, 2)
samp.initialize(torch.zeros(1), lengths)
# initial step
i = 0
for _ in range(100):
word_probs = torch.full(
(batch_sz, n_words), -float('inf'))
# batch 0 dies on step 0
word_probs[0, eos_idx] = valid_score_dist_1[0]
# include at least one prediction OTHER than EOS
# that is greater than -1e20
word_probs[0, _non_eos_idxs] = valid_score_dist_1[1:]
word_probs[1:, _non_eos_idxs[0] + i] = 0
attns = torch.randn(1, batch_sz, 53)
samp.advance(word_probs, attns)
if samp.is_finished[0].eq(1).all():
break
else:
self.fail("Batch 0 never ended (very unlikely but maybe "
"due to stochasticisty. If so, please increase "
"the range of the for-loop.")
samp.update_finished()
self.assertEqual(
samp.scores[0], [valid_score_dist_1[0] / temp])
if batch_sz == 1:
self.assertTrue(samp.done)
continue
else:
self.assertFalse(samp.done)
# step 2
i = 1
for _ in range(100):
word_probs = torch.full(
(batch_sz - 1, n_words), -float('inf'))
# (old) batch 8 dies on step 1
word_probs[7, eos_idx] = valid_score_dist_2[0]
word_probs[0:7, _non_eos_idxs[:2]] = valid_score_dist_2
word_probs[8:, _non_eos_idxs[:2]] = valid_score_dist_2
attns = torch.randn(1, batch_sz, 53)
samp.advance(word_probs, attns)
if samp.is_finished[7].eq(1).all():
break
else:
self.fail("Batch 8 never ended (very unlikely but maybe "
"due to stochasticisty. If so, please increase "
"the range of the for-loop.")
samp.update_finished()
self.assertEqual(
samp.scores[8], [valid_score_dist_2[0] / temp])
# step 3
i = 2
for _ in range(250):
word_probs = torch.full(
(samp.alive_seq.shape[0], n_words), -float('inf'))
# everything dies
word_probs[:, eos_idx] = 0
attns = torch.randn(1, batch_sz, 53)
samp.advance(word_probs, attns)
if samp.is_finished.any():
samp.update_finished()
if samp.is_finished.eq(1).all():
break
else:
self.fail("All batches never ended (very unlikely but "
"maybe due to stochasticisty. If so, please "
"increase the range of the for-loop.")
for b in range(batch_sz):
if b != 0 and b != 8:
self.assertEqual(samp.scores[b], [0])
self.assertTrue(samp.done)
| 9,200 | 41.400922 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_models.py
|
import copy
import unittest
import math
import torch
import onmt
import onmt.inputters
import onmt.opts
from onmt.model_builder import build_embeddings, \
build_encoder, build_decoder
from onmt.encoders.image_encoder import ImageEncoder
from onmt.encoders.audio_encoder import AudioEncoder
from onmt.utils.parse import ArgumentParser
parser = ArgumentParser(description='train.py')
onmt.opts.model_opts(parser)
onmt.opts.train_opts(parser)
# -data option is required, but not used in this test, so dummy.
opt = parser.parse_known_args(['-data', 'dummy'])[0]
class TestModel(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestModel, self).__init__(*args, **kwargs)
self.opt = opt
def get_field(self):
src = onmt.inputters.get_fields("text", 0, 0)["src"]
src.base_field.build_vocab([])
return src
def get_batch(self, source_l=3, bsize=1):
# len x batch x nfeat
test_src = torch.ones(source_l, bsize, 1).long()
test_tgt = torch.ones(source_l, bsize, 1).long()
test_length = torch.ones(bsize).fill_(source_l).long()
return test_src, test_tgt, test_length
def get_batch_image(self, tgt_l=3, bsize=1, h=15, w=17):
# batch x c x h x w
test_src = torch.ones(bsize, 3, h, w).float()
test_tgt = torch.ones(tgt_l, bsize, 1).long()
test_length = None
return test_src, test_tgt, test_length
def get_batch_audio(self, tgt_l=7, bsize=3, sample_rate=5500,
window_size=0.03, t=37):
# batch x 1 x nfft x t
nfft = int(math.floor((sample_rate * window_size) / 2) + 1)
test_src = torch.ones(bsize, 1, nfft, t).float()
test_tgt = torch.ones(tgt_l, bsize, 1).long()
test_length = torch.ones(bsize).long().fill_(tgt_l)
return test_src, test_tgt, test_length
def embeddings_forward(self, opt, source_l=3, bsize=1):
'''
Tests if the embeddings works as expected
args:
opt: set of options
source_l: Length of generated input sentence
bsize: Batchsize of generated input
'''
word_field = self.get_field()
emb = build_embeddings(opt, word_field)
test_src, _, __ = self.get_batch(source_l=source_l, bsize=bsize)
if opt.decoder_type == 'transformer':
input = torch.cat([test_src, test_src], 0)
res = emb(input)
compare_to = torch.zeros(source_l * 2, bsize,
opt.src_word_vec_size)
else:
res = emb(test_src)
compare_to = torch.zeros(source_l, bsize, opt.src_word_vec_size)
self.assertEqual(res.size(), compare_to.size())
def encoder_forward(self, opt, source_l=3, bsize=1):
'''
Tests if the encoder works as expected
args:
opt: set of options
source_l: Length of generated input sentence
bsize: Batchsize of generated input
'''
if opt.rnn_size > 0:
opt.enc_rnn_size = opt.rnn_size
word_field = self.get_field()
embeddings = build_embeddings(opt, word_field)
enc = build_encoder(opt, embeddings)
test_src, test_tgt, test_length = self.get_batch(source_l=source_l,
bsize=bsize)
hidden_t, outputs, test_length = enc(test_src, test_length)
# Initialize vectors to compare size with
test_hid = torch.zeros(self.opt.enc_layers, bsize, opt.enc_rnn_size)
test_out = torch.zeros(source_l, bsize, opt.dec_rnn_size)
# Ensure correct sizes and types
self.assertEqual(test_hid.size(),
hidden_t[0].size(),
hidden_t[1].size())
self.assertEqual(test_out.size(), outputs.size())
self.assertEqual(type(outputs), torch.Tensor)
def nmtmodel_forward(self, opt, source_l=3, bsize=1):
"""
Creates a nmtmodel with a custom opt function.
Forwards a testbatch and checks output size.
Args:
opt: Namespace with options
source_l: length of input sequence
bsize: batchsize
"""
if opt.rnn_size > 0:
opt.enc_rnn_size = opt.rnn_size
opt.dec_rnn_size = opt.rnn_size
word_field = self.get_field()
embeddings = build_embeddings(opt, word_field)
enc = build_encoder(opt, embeddings)
embeddings = build_embeddings(opt, word_field, for_encoder=False)
dec = build_decoder(opt, embeddings)
model = onmt.models.model.NMTModel(enc, dec)
test_src, test_tgt, test_length = self.get_batch(source_l=source_l,
bsize=bsize)
outputs, attn = model(test_src, test_tgt, test_length)
outputsize = torch.zeros(source_l - 1, bsize, opt.dec_rnn_size)
# Make sure that output has the correct size and type
self.assertEqual(outputs.size(), outputsize.size())
self.assertEqual(type(outputs), torch.Tensor)
def imagemodel_forward(self, opt, tgt_l=2, bsize=1, h=15, w=17):
"""
Creates an image-to-text nmtmodel with a custom opt function.
Forwards a testbatch and checks output size.
Args:
opt: Namespace with options
source_l: length of input sequence
bsize: batchsize
"""
if opt.encoder_type == 'transformer' or opt.encoder_type == 'cnn':
return
word_field = self.get_field()
enc = ImageEncoder(
opt.enc_layers, opt.brnn, opt.enc_rnn_size,
opt.dropout)
embeddings = build_embeddings(opt, word_field, for_encoder=False)
dec = build_decoder(opt, embeddings)
model = onmt.models.model.NMTModel(enc, dec)
test_src, test_tgt, test_length = self.get_batch_image(
h=h, w=w,
bsize=bsize,
tgt_l=tgt_l)
outputs, attn = model(test_src, test_tgt, test_length)
outputsize = torch.zeros(tgt_l - 1, bsize, opt.dec_rnn_size)
# Make sure that output has the correct size and type
self.assertEqual(outputs.size(), outputsize.size())
self.assertEqual(type(outputs), torch.Tensor)
def audiomodel_forward(self, opt, tgt_l=7, bsize=3, t=37):
"""
Creates a speech-to-text nmtmodel with a custom opt function.
Forwards a testbatch and checks output size.
Args:
opt: Namespace with options
source_l: length of input sequence
bsize: batchsize
"""
if opt.encoder_type == 'transformer' or opt.encoder_type == 'cnn':
return
if opt.rnn_type == 'SRU':
return
word_field = self.get_field()
enc = AudioEncoder(opt.rnn_type, opt.enc_layers, opt.dec_layers,
opt.brnn, opt.enc_rnn_size, opt.dec_rnn_size,
opt.audio_enc_pooling, opt.dropout,
opt.sample_rate, opt.window_size)
embeddings = build_embeddings(opt, word_field, for_encoder=False)
dec = build_decoder(opt, embeddings)
model = onmt.models.model.NMTModel(enc, dec)
test_src, test_tgt, test_length = self.get_batch_audio(
bsize=bsize,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
t=t, tgt_l=tgt_l)
outputs, attn = model(test_src, test_tgt, test_length)
outputsize = torch.zeros(tgt_l - 1, bsize, opt.dec_rnn_size)
# Make sure that output has the correct size and type
self.assertEqual(outputs.size(), outputsize.size())
self.assertEqual(type(outputs), torch.Tensor)
def _add_test(param_setting, methodname):
"""
Adds a Test to TestModel according to settings
Args:
param_setting: list of tuples of (param, setting)
methodname: name of the method that gets called
"""
def test_method(self):
opt = copy.deepcopy(self.opt)
if param_setting:
for param, setting in param_setting:
setattr(opt, param, setting)
ArgumentParser.update_model_opts(opt)
getattr(self, methodname)(opt)
if param_setting:
name = 'test_' + methodname + "_" + "_".join(
str(param_setting).split())
else:
name = 'test_' + methodname + '_standard'
setattr(TestModel, name, test_method)
test_method.__name__ = name
'''
TEST PARAMETERS
'''
opt.brnn = False
test_embeddings = [[],
[('decoder_type', 'transformer')]
]
for p in test_embeddings:
_add_test(p, 'embeddings_forward')
tests_encoder = [[],
[('encoder_type', 'mean')],
# [('encoder_type', 'transformer'),
# ('word_vec_size', 16), ('rnn_size', 16)],
[]
]
for p in tests_encoder:
_add_test(p, 'encoder_forward')
tests_nmtmodel = [[('rnn_type', 'GRU')],
[('layers', 10)],
[('input_feed', 0)],
[('decoder_type', 'transformer'),
('encoder_type', 'transformer'),
('src_word_vec_size', 16),
('tgt_word_vec_size', 16),
('rnn_size', 16)],
[('decoder_type', 'transformer'),
('encoder_type', 'transformer'),
('src_word_vec_size', 16),
('tgt_word_vec_size', 16),
('rnn_size', 16),
('position_encoding', True)],
[('coverage_attn', True)],
[('copy_attn', True)],
[('global_attention', 'mlp')],
[('context_gate', 'both')],
[('context_gate', 'target')],
[('context_gate', 'source')],
[('encoder_type', "brnn"),
('brnn_merge', 'sum')],
[('encoder_type', "brnn")],
[('decoder_type', 'cnn'),
('encoder_type', 'cnn')],
[('encoder_type', 'rnn'),
('global_attention', None)],
[('encoder_type', 'rnn'),
('global_attention', None),
('copy_attn', True),
('copy_attn_type', 'general')],
[('encoder_type', 'rnn'),
('global_attention', 'mlp'),
('copy_attn', True),
('copy_attn_type', 'general')],
[],
]
if onmt.models.sru.check_sru_requirement():
# """ Only do SRU test if requirment is safisfied. """
# SRU doesn't support input_feed.
tests_nmtmodel.append([('rnn_type', 'SRU'), ('input_feed', 0)])
for p in tests_nmtmodel:
_add_test(p, 'nmtmodel_forward')
for p in tests_nmtmodel:
_add_test(p, 'imagemodel_forward')
for p in tests_nmtmodel:
p.append(('sample_rate', 5500))
p.append(('window_size', 0.03))
# when reasonable, set audio_enc_pooling to 2
for arg, val in p:
if arg == "layers" and int(val) > 2:
# Need lengths >= audio_enc_pooling**n_layers.
# That condition is unrealistic for large n_layers,
# so leave audio_enc_pooling at 1.
break
else:
p.append(('audio_enc_pooling', '2'))
_add_test(p, 'audiomodel_forward')
| 11,557 | 34.563077 | 76 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/utils_for_tests.py
|
import itertools
def product_dict(**kwargs):
keys = kwargs.keys()
vals = kwargs.values()
for instance in itertools.product(*vals):
yield dict(zip(keys, instance))
| 185 | 19.666667 | 45 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_preprocess.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import configargparse
import copy
import unittest
import glob
import os
import codecs
import onmt
import onmt.inputters
import onmt.opts
import onmt.bin.preprocess as preprocess
parser = configargparse.ArgumentParser(description='preprocess.py')
onmt.opts.preprocess_opts(parser)
SAVE_DATA_PREFIX = 'data/test_preprocess'
default_opts = [
'-data_type', 'text',
'-train_src', 'data/src-train.txt',
'-train_tgt', 'data/tgt-train.txt',
'-valid_src', 'data/src-val.txt',
'-valid_tgt', 'data/tgt-val.txt',
'-save_data', SAVE_DATA_PREFIX
]
opt = parser.parse_known_args(default_opts)[0]
class TestData(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestData, self).__init__(*args, **kwargs)
self.opt = opt
def dataset_build(self, opt):
fields = onmt.inputters.get_fields("text", 0, 0)
if hasattr(opt, 'src_vocab') and len(opt.src_vocab) > 0:
with codecs.open(opt.src_vocab, 'w', 'utf-8') as f:
f.write('a\nb\nc\nd\ne\nf\n')
if hasattr(opt, 'tgt_vocab') and len(opt.tgt_vocab) > 0:
with codecs.open(opt.tgt_vocab, 'w', 'utf-8') as f:
f.write('a\nb\nc\nd\ne\nf\n')
src_reader = onmt.inputters.str2reader[opt.data_type].from_opt(opt)
tgt_reader = onmt.inputters.str2reader["text"].from_opt(opt)
align_reader = onmt.inputters.str2reader["text"].from_opt(opt)
preprocess.build_save_dataset(
'train', fields, src_reader, tgt_reader, align_reader, opt)
preprocess.build_save_dataset(
'valid', fields, src_reader, tgt_reader, align_reader, opt)
# Remove the generated *pt files.
for pt in glob.glob(SAVE_DATA_PREFIX + '*.pt'):
os.remove(pt)
if hasattr(opt, 'src_vocab') and os.path.exists(opt.src_vocab):
os.remove(opt.src_vocab)
if hasattr(opt, 'tgt_vocab') and os.path.exists(opt.tgt_vocab):
os.remove(opt.tgt_vocab)
def _add_test(param_setting, methodname):
"""
Adds a Test to TestData according to settings
Args:
param_setting: list of tuples of (param, setting)
methodname: name of the method that gets called
"""
def test_method(self):
if param_setting:
opt = copy.deepcopy(self.opt)
for param, setting in param_setting:
setattr(opt, param, setting)
else:
opt = self.opt
getattr(self, methodname)(opt)
if param_setting:
name = 'test_' + methodname + "_" + "_".join(
str(param_setting).split())
else:
name = 'test_' + methodname + '_standard'
setattr(TestData, name, test_method)
test_method.__name__ = name
test_databuild = [[],
[('src_vocab_size', 1),
('tgt_vocab_size', 1)],
[('src_vocab_size', 10000),
('tgt_vocab_size', 10000)],
[('src_seq_len', 1)],
[('src_seq_len', 5000)],
[('src_seq_length_trunc', 1)],
[('src_seq_length_trunc', 5000)],
[('tgt_seq_len', 1)],
[('tgt_seq_len', 5000)],
[('tgt_seq_length_trunc', 1)],
[('tgt_seq_length_trunc', 5000)],
[('shuffle', 0)],
[('lower', True)],
[('dynamic_dict', True)],
[('share_vocab', True)],
[('dynamic_dict', True),
('share_vocab', True)],
[('dynamic_dict', True),
('shard_size', 500000)],
[('src_vocab', '/tmp/src_vocab.txt'),
('tgt_vocab', '/tmp/tgt_vocab.txt')],
]
for p in test_databuild:
_add_test(p, 'dataset_build')
# Test image preprocessing
test_databuild = [[],
[('tgt_vocab_size', 1)],
[('tgt_vocab_size', 10000)],
[('tgt_seq_len', 1)],
[('tgt_seq_len', 5000)],
[('tgt_seq_length_trunc', 1)],
[('tgt_seq_length_trunc', 5000)],
[('shuffle', 0)],
[('lower', True)],
[('shard_size', 5)],
[('shard_size', 50)],
[('tgt_vocab', '/tmp/tgt_vocab.txt')],
]
test_databuild_common = [('data_type', 'img'),
('src_dir', '/tmp/im2text/images'),
('train_src', ['/tmp/im2text/src-train-head.txt']),
('train_tgt', ['/tmp/im2text/tgt-train-head.txt']),
('valid_src', '/tmp/im2text/src-val-head.txt'),
('valid_tgt', '/tmp/im2text/tgt-val-head.txt'),
]
for p in test_databuild:
_add_test(p + test_databuild_common, 'dataset_build')
# Test audio preprocessing
test_databuild = [[],
[('tgt_vocab_size', 1)],
[('tgt_vocab_size', 10000)],
[('src_seq_len', 1)],
[('src_seq_len', 5000)],
[('src_seq_length_trunc', 3200)],
[('src_seq_length_trunc', 5000)],
[('tgt_seq_len', 1)],
[('tgt_seq_len', 5000)],
[('tgt_seq_length_trunc', 1)],
[('tgt_seq_length_trunc', 5000)],
[('shuffle', 0)],
[('lower', True)],
[('shard_size', 5)],
[('shard_size', 50)],
[('tgt_vocab', '/tmp/tgt_vocab.txt')],
]
test_databuild_common = [('data_type', 'audio'),
('src_dir', '/tmp/speech/an4_dataset'),
('train_src', ['/tmp/speech/src-train-head.txt']),
('train_tgt', ['/tmp/speech/tgt-train-head.txt']),
('valid_src', '/tmp/speech/src-val-head.txt'),
('valid_tgt', '/tmp/speech/tgt-val-head.txt'),
('sample_rate', 16000),
('window_size', 0.04),
('window_stride', 0.02),
('window', 'hamming'),
]
for p in test_databuild:
_add_test(p + test_databuild_common, 'dataset_build')
| 6,455 | 35.474576 | 76 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_image_dataset.py
|
import unittest
from onmt.inputters.image_dataset import ImageDataReader
import os
import shutil
import cv2
import numpy as np
import torch
class TestImageDataReader(unittest.TestCase):
# this test touches the file system, so it could be considered an
# integration test
_THIS_DIR = os.path.dirname(os.path.abspath(__file__))
_IMG_DATA_DIRNAME = "test_image_data"
_IMG_DATA_DIR = os.path.join(_THIS_DIR, _IMG_DATA_DIRNAME)
_IMG_DATA_FMT = "test_img_{:d}.png"
_IMG_DATA_PATH_FMT = os.path.join(_IMG_DATA_DIR, _IMG_DATA_FMT)
_IMG_LIST_DIR = "test_image_filenames"
# file to hold full paths to image data
_IMG_LIST_PATHS_FNAME = "test_files.txt"
_IMG_LIST_PATHS_PATH = os.path.join(
_IMG_LIST_DIR, _IMG_LIST_PATHS_FNAME)
# file to hold image paths relative to _IMG_DATA_DIR (i.e. file names)
_IMG_LIST_FNAMES_FNAME = "test_fnames.txt"
_IMG_LIST_FNAMES_PATH = os.path.join(
_IMG_LIST_DIR, _IMG_LIST_FNAMES_FNAME)
# it's ok if non-image files co-exist with image files in the data dir
_JUNK_FILE = os.path.join(
_IMG_DATA_DIR, "this_is_junk.txt")
_N_EXAMPLES = 20
_N_CHANNELS = 3
@classmethod
def setUpClass(cls):
if not os.path.exists(cls._IMG_DATA_DIR):
os.makedirs(cls._IMG_DATA_DIR)
if not os.path.exists(cls._IMG_LIST_DIR):
os.makedirs(cls._IMG_LIST_DIR)
with open(cls._JUNK_FILE, "w") as f:
f.write("this is some garbage\nShould have no impact.")
with open(cls._IMG_LIST_PATHS_PATH, "w") as f_list_fnames, \
open(cls._IMG_LIST_FNAMES_PATH, "w") as f_list_paths:
cls.n_rows = torch.randint(30, 314, (cls._N_EXAMPLES,))
cls.n_cols = torch.randint(30, 314, (cls._N_EXAMPLES,))
for i in range(cls._N_EXAMPLES):
img = np.random.randint(
0, 255, (cls.n_rows[i], cls.n_cols[i], cls._N_CHANNELS))
f_path = cls._IMG_DATA_PATH_FMT.format(i)
cv2.imwrite(f_path, img)
f_name_short = cls._IMG_DATA_FMT.format(i)
f_list_fnames.write(f_name_short + "\n")
f_list_paths.write(f_path + "\n")
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls._IMG_DATA_DIR)
shutil.rmtree(cls._IMG_LIST_DIR)
def test_read_from_dir_and_data_file_containing_filenames(self):
rdr = ImageDataReader(channel_size=self._N_CHANNELS)
i = 0 # initialize since there's a sanity check on i
for i, img in enumerate(rdr.read(
self._IMG_LIST_FNAMES_PATH, "src", self._IMG_DATA_DIR)):
self.assertEqual(
img["src"].shape,
(self._N_CHANNELS, self.n_rows[i], self.n_cols[i]))
self.assertEqual(img["src_path"],
self._IMG_DATA_PATH_FMT.format(i))
self.assertGreater(i, 0, "No image data was read.")
def test_read_from_dir_and_data_file_containing_paths(self):
rdr = ImageDataReader(channel_size=self._N_CHANNELS)
i = 0 # initialize since there's a sanity check on i
for i, img in enumerate(rdr.read(
self._IMG_LIST_PATHS_PATH, "src", self._IMG_DATA_DIR)):
self.assertEqual(
img["src"].shape,
(self._N_CHANNELS, self.n_rows[i], self.n_cols[i]))
self.assertEqual(img["src_path"],
self._IMG_DATA_FMT.format(i))
self.assertGreater(i, 0, "No image data was read.")
class TestImageDataReader1Channel(TestImageDataReader):
_N_CHANNELS = 1
| 3,641 | 38.16129 | 76 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.