python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
onnxFile0 = "model-0.onnx-backup"
onnxFile1 = "model-1.onnx"
"""
# extract subgraph from wenet encoder, should not be used in this example, TODO: rewrite this part by ONNX
onnxFileS = "./encoder.onnx"
graph = gs.import_onnx(onnx.load(onnxFileS))
graph.outputs = []
for node in graph.nodes:
if node.op == "Slice" and node.name == "Slice_74":
table1x5000x256 = node.inputs[0].values
constantData = gs.Constant("constantData", np.ascontiguousarray(table1x5000x256[:,:512,:])) # keep only 512 elements to reduce the volume of the tensor
inputTensor = node.inputs[2]
inputTensor.name = "inputT0"
graph.inputs = [inputTensor]
node.inputs[0] = constantData
for i in range(1, 24, 2):
graph.outputs.append(node.o(i).o().o().outputs[0]) # Transpose
continue
graph.cleanup()
onnx.save(gs.export_onnx(graph), onnxFile0)
"""
graph = gs.import_onnx(onnx.load(onnxFile0))
wiliConstant0 = gs.Constant("wiliConstant0", np.ascontiguousarray(np.array([0], dtype=np.int64)))
wiliConstant1 = gs.Constant("wiliConstant1", np.ascontiguousarray(np.array([1], dtype=np.int64)))
wiliConstant3 = gs.Constant("wiliConstant3", np.ascontiguousarray(np.array([3], dtype=np.int64)))
nSlice = 0
graph.outputs = []
for node in graph.nodes:
if node.op == "Slice" and node.name == "Slice_74":
table512x256 = node.inputs[0].values[0]
for i in range(1, 24, 2):
factor256x256 = node.o(i).inputs[1].values
tansposeNode = node.o(i).o().o()
newTable = np.matmul(table512x256, factor256x256).transpose().reshape(1, 4, 64, 512)
constantData = gs.Constant("wiliConstant-" + str(nSlice), np.ascontiguousarray(newTable))
sliceV = gs.Variable(tansposeNode.outputs[0].name, np.dtype(np.float32), [1, 4, 64, "t4"])
sliceN = gs.Node(
"Slice",
"wiliSliceN-" + str(nSlice),
inputs=[
constantData, # data
wiliConstant0, # start=0
graph.inputs[0], # end
wiliConstant3, # axes=3
wiliConstant1, # step=1
],
outputs=[sliceV]
)
graph.nodes.append(sliceN)
graph.outputs.append(sliceV)
nSlice += 1
tansposeNode.outputs = []
continue
graph.cleanup()
onnx.save(gs.export_onnx(graph), onnxFile1)
def run(onnxFile):
logger = trt.Logger(trt.Logger.VERBOSE)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.max_workspace_size = 22 << 30
parser = trt.OnnxParser(network, logger)
with open(onnxFile, "rb") as model:
parser.parse(model.read())
inputT0 = network.get_input(0)
inputT0.shape = [1]
profile.set_shape_input(inputT0.name, [32], [32], [32]) # set_shape_input rather than set_shape
config.add_optimization_profile(profile)
engineString = builder.build_serialized_network(network, config)
trtFile = onnxFile.split(".")[0] + ".plan"
with open(trtFile, "wb") as f:
f.write(engineString)
print("Succeeded building %s!" % (trtFile))
os.system("trtexec --loadEngine=%s --verbose --useCudaGraph --noDataTransfers --shapes=inputTensor:32" % trtFile)
run(onnxFile0)
run(onnxFile1)
| trt-samples-for-hackathon-cn-master | cookbook/09-BestPractice/ComputationInAdvance/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from collections import OrderedDict
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
onnxFile3D = "model-3D.onnx"
onnxFile2D = "model-2D.onnx"
nLoop = 10
nBS = 32
nSL = 256
np.random.seed(31193)
# ONNX network with 3D matrix multipication ------------------------------------
tensor0 = gs.Variable("tensor-0", np.float32, ["B", "T", 1])
constant1x256 = gs.Constant("constant1x256", np.ascontiguousarray(np.random.rand(1, 256).reshape(1, 256).astype(np.float32) * 2 - 1))
constant256 = gs.Constant("constant256", np.ascontiguousarray(np.random.rand(256).astype(np.float32) * 2 - 1))
constant256x2048 = gs.Constant("constant256x2048", np.ascontiguousarray(np.random.rand(256, 2048).reshape(256, 2048).astype(np.float32) * 2 - 1))
constant2048 = gs.Constant("constant2048", np.ascontiguousarray(np.random.rand(2048).astype(np.float32) * 2 - 1))
constant2048x256 = gs.Constant("constant2048x256", np.ascontiguousarray(np.random.rand(2048, 256).reshape(2048, 256).astype(np.float32) * 2 - 1))
constantM1 = gs.Constant("constantM1", np.ascontiguousarray(np.array([-1], dtype=np.int64)))
graphNodeList = []
tensor1 = gs.Variable("tensor-1", np.float32, None)
node1 = gs.Node("MatMul", "MMU0", inputs=[tensor0, constant1x256], outputs=[tensor1])
graphNodeList.append(node1)
tensorLoop = tensor1
for i in range(nLoop):
tensor2 = gs.Variable("tensor-%d-1" % i, np.float32, None)
node2 = gs.Node("MatMul", "MMU-" + str(i), inputs=[tensorLoop, constant256x2048], outputs=[tensor2])
graphNodeList.append(node2)
tensor3 = gs.Variable("tensor-%d-2" % i, dtype=np.float32, shape=None)
node3 = gs.Node("Add", "AddU-" + str(i), inputs=[tensor2, constant2048], outputs=[tensor3])
graphNodeList.append(node3)
tensor4 = gs.Variable("tensor-%d-3" % i, dtype=np.float32, shape=None)
node4 = gs.Node("Relu", "ReLUU-" + str(i), inputs=[tensor3], outputs=[tensor4])
graphNodeList.append(node4)
tensor5 = gs.Variable("tensor-%d-4" % i, dtype=np.float32, shape=None)
node5 = gs.Node("MatMul", "MMD-" + str(i), inputs=[tensor4, constant2048x256], outputs=[tensor5])
graphNodeList.append(node5)
tensor6 = gs.Variable("tensor-%d-5" % i, dtype=np.float32, shape=None)
node6 = gs.Node("Add", "AddD-" + str(i), inputs=[tensor5, constant256], outputs=[tensor6])
graphNodeList.append(node6)
tensor7 = gs.Variable("tensor-%d-6" % i, dtype=np.float32, shape=None)
node7 = gs.Node("Relu", "ReLUD-" + str(i), inputs=[tensor6], outputs=[tensor7])
graphNodeList.append(node7)
tensorLoop = tensor7
tensor8 = gs.Variable("tensor-8", dtype=np.float32, shape=None)
node8 = gs.Node("ReduceSum", "Reduce", inputs=[tensorLoop, constantM1], outputs=[tensor8], attrs=OrderedDict([("keepdims", 0)]))
graphNodeList.append(node8)
graph = gs.Graph(nodes=graphNodeList, inputs=[tensor0], outputs=[tensor8], opset=13)
onnx.save(gs.export_onnx(graph.cleanup().toposort()), onnxFile3D)
print("Succeeded building %s!" % (onnxFile3D))
# Add two Reshape nodes to convert the 3D matrix multipication into 2D matrix multipication
graph = gs.import_onnx(onnx.load(onnxFile3D))
constant0 = gs.Constant("constant0", np.ascontiguousarray(np.array([0], dtype=np.int64)))
constant1 = gs.Constant("constant1", np.ascontiguousarray(np.array([1], dtype=np.int64)))
constantS0 = gs.Constant("constantS0", np.array(0, dtype=np.int64)).to_variable(np.dtype(np.int64), []).to_constant(np.array(0, dtype=np.dtype(np.int64)))
constantS1 = gs.Constant("constantS1", np.array(1, dtype=np.int64)).to_variable(np.dtype(np.int64), []).to_constant(np.array(1, dtype=np.dtype(np.int64)))
shapeV = gs.Variable("myShapeV", np.dtype(np.int64), [3])
shapeN = gs.Node("Shape", "myShapeN", inputs=[graph.inputs[0]], outputs=[shapeV])
graph.nodes.append(shapeN)
# shape = [], value = ["B"]
bTensorScalar = gs.Variable("bTensorScalar", np.dtype(np.int64), [])
gatherN = gs.Node("Gather", "myGatherN0", inputs=[shapeV, constantS0], outputs=[bTensorScalar], attrs=OrderedDict([("axis", 0)]))
graph.nodes.append(gatherN)
# shape = [1,], value = ["B"]
bTensor = gs.Variable("bTensor", np.dtype(np.int64), [1])
unsqueezeN = gs.Node("Unsqueeze", "myUnsqueezeN0", inputs=[bTensorScalar, constant0], outputs=[bTensor])
graph.nodes.append(unsqueezeN)
# shape = [], value = ["T"]
tTensorScalar = gs.Variable("tTensorScalar", np.dtype(np.int64), [])
gatherN = gs.Node("Gather", "myGatherN1", inputs=[shapeV, constantS1], outputs=[tTensorScalar], attrs=OrderedDict([("axis", 0)]))
graph.nodes.append(gatherN)
# shape = [1,], value = ["T"]
tTensor = gs.Variable("tTensor", np.dtype(np.int64), [1])
unsqueezeN = gs.Node("Unsqueeze", "myUnsqueezeN1", inputs=[tTensorScalar, constant0], outputs=[tTensor])
graph.nodes.append(unsqueezeN)
# shape = [1,], value = ["B"*"T"]
bTTensor = gs.Variable("bTTensor", np.dtype(np.int64), [1])
mulN = gs.Node("Mul", "myMulN", inputs=[bTensor, tTensor], outputs=[bTTensor])
graph.nodes.append(mulN)
# shape = [2,], value = ["B"*"T",1]
bTComma1Tensor = gs.Variable("bTComma1Tensor", np.dtype(np.int64), [2])
concatN = gs.Node("Concat", "myConcatN", inputs=[bTTensor, constant1], outputs=[bTComma1Tensor], attrs=OrderedDict([("axis", 0)]))
graph.nodes.append(concatN)
for node in graph.nodes:
if node.name == "MMU0":
reshapeV = gs.Variable("reshapeV-input", np.dtype(np.float32), ['B*T', 1])
reshapeN = gs.Node("Reshape", "myReshapeN-input", inputs=[node.inputs[0], bTComma1Tensor], outputs=[reshapeV])
graph.nodes.append(reshapeN)
node.inputs[0] = reshapeV
if node.name == "Reduce":
reshapeV = gs.Variable("reshapeV-output", np.dtype(np.float32), ["B", "T", 1])
reshapeN = gs.Node("Reshape", "myReshapeN-output", inputs=[node.outputs[0], shapeV], outputs=[reshapeV])
graph.nodes.append(reshapeN)
graph.outputs = [reshapeV]
onnx.save(gs.export_onnx(graph.cleanup().toposort()), onnxFile2D)
print("Succeeded building %s!" % (onnxFile2D))
def run(onnxFile):
logger = trt.Logger(trt.Logger.VERBOSE)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.max_workspace_size = 22 << 30
parser = trt.OnnxParser(network, logger)
with open(onnxFile, "rb") as model:
parser.parse(model.read())
inputT0 = network.get_input(0)
inputT0.shape = [-1, -1, 1]
profile.set_shape(inputT0.name, [1, 1, 1], [nBS, nSL, 1], [nBS, nSL, 1])
config.add_optimization_profile(profile)
engineString = builder.build_serialized_network(network, config)
trtFile = onnxFile.split(".")[0] + ".plan"
with open(trtFile, "wb") as f:
f.write(engineString)
print("Succeeded building %s!" % (trtFile))
os.system("trtexec --loadEngine=%s --verbose --useCudaGraph --noDataTransfers --shapes=tensor-0:%dx%dx1" % (trtFile, nBS, nSL))
run(onnxFile3D)
run(onnxFile2D)
| trt-samples-for-hackathon-cn-master | cookbook/09-BestPractice/ComputationInAdvance/Convert3DMMTo2DMM/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
information = os.popen("nvidia-smi").read().split("\n")[2]
driverV = re.search(r"Driver Version: \d+\.\d+(\.\d+)?", information)
cudaV = re.search(r"CUDA Version: \d+\.\d+(\.\d+)?", information)
driverV = "None" if driverV is None else driverV.group().split(": ")[-1]
cudaV = "None" if cudaV is None else cudaV.group().split(": ")[-1]
print("Driver: %s" % driverV)
print("CUDA: %s" % cudaV)
information = os.popen(r"cat /usr/include/x86_64-linux-gnu/cudnn_version_v*.h").read()
cudnnMajorV = re.search(r"CUDNN_MAJOR \d+", information)
cudnnMinorV = re.search(r"CUDNN_MINOR \d+", information)
cudnnPatchV = re.search(r"CUDNN_PATCHLEVEL \d+", information)
cudnnMajorV = "None" if cudnnMajorV is None else cudnnMajorV.group().split(" ")[-1]
cudnnMinorV = "None" if cudnnMinorV is None else cudnnMinorV.group().split(" ")[-1]
cudnnPatchV = "None" if cudnnPatchV is None else cudnnPatchV.group().split(" ")[-1]
print("cuDNN: %s" % cudnnMajorV + "." + cudnnMinorV + "." + cudnnPatchV)
information = os.popen(r"cat /usr/local/cuda/include/cublas_api.h").read()
cublasMajorV = re.search(r"CUBLAS_VER_MAJOR \d+", information)
cublasMinorV = re.search(r"CUBLAS_VER_MINOR \d+", information)
cublasPatchV = re.search(r"CUBLAS_VER_PATCH \d+", information)
cublasBuildV = re.search(r"CUBLAS_VER_BUILD \d+", information)
cublasMajorV = "None" if cublasMajorV is None else cublasMajorV.group().split(" ")[-1]
cublasMinorV = "None" if cublasMinorV is None else cublasMinorV.group().split(" ")[-1]
cublasPatchV = "None" if cublasPatchV is None else cublasPatchV.group().split(" ")[-1]
cublasBuildV = "None" if cublasBuildV is None else cublasBuildV.group().split(" ")[-1]
print("cuBLAS: %s" % cublasMajorV + "." + cublasMinorV + "." + cublasPatchV + "." + cublasBuildV)
information = os.popen(r"cat /usr/include/x86_64-linux-gnu/NvInferVersion.h").read()
tensorrtMajorV = re.search(r"NV_TENSORRT_MAJOR \d+", information)
tensorrtMinorV = re.search(r"NV_TENSORRT_MINOR \d+", information)
tensorrtPatchV = re.search(r"NV_TENSORRT_PATCH \d+", information)
tensorrtBuildV = re.search(r"NV_TENSORRT_BUILD \d+", information)
tensorrtMajorV = "None" if tensorrtMajorV is None else tensorrtMajorV.group().split(" ")[-1]
tensorrtMinorV = "None" if tensorrtMinorV is None else tensorrtMinorV.group().split(" ")[-1]
tensorrtPatchV = "None" if tensorrtPatchV is None else tensorrtPatchV.group().split(" ")[-1]
tensorrtBuildV = "None" if tensorrtBuildV is None else tensorrtBuildV.group().split(" ")[-1]
print("TensorRT: %s" % tensorrtMajorV + "." + tensorrtMinorV + "." + tensorrtPatchV + "." + tensorrtBuildV)
information = os.popen(r"pip list").read()
pyTorchV = re.search(r"torch .+", information)
pyTorchV = "None" if pyTorchV is None else pyTorchV.group().split(" ")[-1]
tensorflowV = re.search(r"tensorflow .+", information)
tensorflowV = "None" if tensorflowV is None else tensorflowV.group().split(" ")[-1]
tensorrtV = re.search(r"tensorrt .+", information)
tensorrtV = "None" if tensorrtV is None else tensorrtV.group().split(" ")[-1]
print("pyTorch: %s" % pyTorchV)
print("TensorFlow: %s" % tensorflowV)
print("TensorRT(python):%s" % tensorrtV)
| trt-samples-for-hackathon-cn-master | cookbook/51-Uncategorized/getVersion.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
def buildMD(nSign, nExponent, nMantissa, nBias, bIEEE754=True): # Bias is useless now
typeName = "S%dE%dM%dB%d" % (nSign, nExponent, nMantissa, nBias)
if typeName == "S1E11M52B0":
typeName += "(FP64)"
elif typeName == "S1E8M23B0":
typeName += "(FP32)"
elif typeName == "S1E8M10B0":
typeName += "(TF32)"
elif typeName == "S1E5M10B0":
typeName += "(FP16)"
elif typeName == "S1E8M7B0":
typeName += "(BF16)"
elif typeName == "S1E5M2B0":
typeName += "(FP8e5m2)"
elif typeName == "S1E4M3B0":
typeName += "(FP8e4m3)"
print("Build %s" % typeName)
if nExponent < 2:
print("nExponent should be equal or greater than 2!")
ss = ""
def numberToString(x):
return ("%6.4e" % x).split("e")
with open(typeName + ".md", "w") as f:
ss += "# %s - %s\n" % (typeName, ("" if bIEEE754 else "not ") + "IEEE754")
ss += "\n"
ss += "+ SignBit ($s$): %d\n" % nSign
ss += "+ Exponent ($k$): %d\n" % nExponent
ss += "+ Mantissa ($n$): %d\n" % nMantissa
ss += "+ Bias ($b$): %d\n" % nBias
ss += "\n"
ss += "+ Special value\n"
ss += "| Mantissa | all 0 | not all 0 |\n"
ss += "| :-: | :-: | :-: |\n"
ss += "| $e = %s_2$ | Signed Zero | Subnormal Value |\n" % ("0" * nExponent)
ss += "| $e = %s_2$ | Signed Infinity | NaN |\n" % ("1" * nExponent)
ss += "\n"
ss += "+ Normal value ($%s1_2 \le e_2 \le %s0_2$)\n" % ("0" * (nExponent - 1), "1" * (nExponent - 1))
ss += "$$\n"
ss += "\\begin{equation}\n"
ss += "\\begin{aligned}\n"
ss += "E &= e_{10} - \left( 2^{k-1} - 1 \\right) \\\\\n"
ss += "M &= f_{10} \cdot 2^{-n} \\\\\n"
ss += "value &= \left(-1\\right)^{s}2^{E}\left(1+M\\right)\n"
ss += "\end{aligned}\n"
ss += "\end{equation}\n"
ss += "$$\n"
ss += "\n"
ss += "+ Subnormal value ($e_2 = %s_2$)\n" % ("0" * nExponent)
ss += "$$\n"
ss += "\\begin{equation}\n"
ss += "\\begin{aligned}\n"
ss += "E &= 2 - 2^{k-1} = %d \\\\\n" % (2 - 2 ** (nExponent - 1))
ss += "M &= f_{10} \cdot 2^{-n} \\\\\n"
ss += "value &= \left(-1\\right)^{s}2^{E}M\n"
ss += "\end{aligned}\n"
ss += "\end{equation}\n"
ss += "$$\n"
ss += "\n"
ss += "+ Examples\n"
# yapf:disable
ss += "| Number($\color{#FF0000}{Sign}\color{#007F00}{Exponent}\color{#0000FF}{Mantissa}$) | value | comment |\n"
ss += "| :-: | :-: | :-: |\n"
ss += "| $\color{#FF0000}{0}\color{#007F00}{%s}\color{#0000FF}{%s}$ | $+0$ | |\n" % ("0" * nExponent, "0" * nMantissa)
ss += "| $\color{#FF0000}{0}\color{#007F00}{%s}\color{#0000FF}{%s1}$ | $%s\\times10^{%s}$ | Minimum subnormal |\n" % ("0" * nExponent, "0" * (nMantissa - 1), *numberToString( 2 ** (2 - 2 ** (nExponent - 1)) * (0 + (1) / (2 ** nMantissa))))
ss += "| $\color{#FF0000}{0}\color{#007F00}{%s}\color{#0000FF}{%s}$ | $%s\\times10^{%s}$ | Maximum subnormal |\n" % ("0" * nExponent, "1" * nMantissa, *numberToString( 2 ** (2 - 2 ** (nExponent - 1)) * (0 + (2 ** nMantissa - 1) / (2 ** nMantissa))))
ss += "| $\color{#FF0000}{0}\color{#007F00}{%s1}\color{#0000FF}{%s}$ | $%s\\times10^{%s}$ | Minimum normal |\n" % ("0" * (nExponent - 1), "0" * nMantissa, *numberToString( 2 ** (2 - 2 ** (nExponent - 1)) * (1 + (0) / (2 ** nMantissa))))
if nExponent >= 3:
ss += "| $\color{#FF0000}{0}\color{#007F00}{0%s0}\color{#0000FF}{%s}$ | $1 - 2^{-%d}$ | largest number < 1 |\n" % ("1" * (nExponent - 2), "1" * nMantissa, nMantissa + 1)
elif nExponent == 2: # the largest number < 1 is subnormal number When nExponent == 2
ss += "| $\color{#FF0000}{0}\color{#007F00}{00}\color{#0000FF}{%s}$ | $1 - 2^{-%d}$ | largest number < 1 |\n" % ( "1" * nMantissa, nMantissa)
else:
# nExponent == 1
ss += "| $\color{#FF0000}{0}\color{#007F00}{0}\color{#0000FF}{%s}$ | $1 - 2^{-%d}$ | largest number < 1 |\n" % ( "1" * nMantissa, nMantissa)
ss += "| $\color{#FF0000}{0}\color{#007F00}{0%s}\color{#0000FF}{%s}$ | $1$ | |\n" % ("1" * (nExponent - 1), "0" * nMantissa)
ss += "| $\color{#FF0000}{0}\color{#007F00}{0%s}\color{#0000FF}{%s1}$ | $1 + 2^{-%d}$ | smallest number > 1 |\n" % ("1" * (nExponent - 1), "0" * (nMantissa - 1), nMantissa)
ss += "| $\color{#FF0000}{0}\color{#007F00}{1%s}\color{#0000FF}{%s}$ | $2$ | |\n" % ("0" * (nExponent - 1), "0" * nMantissa)
ss += "| $\color{#FF0000}{0}\color{#007F00}{1%s}\color{#0000FF}{1%s}$ | $3$ | |\n" % ("0" * (nExponent - 1), "0" * (nMantissa - 1))
if nExponent >= 3:
# 4 can only be represented when nExponent >= 3
ss += "| $\color{#FF0000}{0}\color{#007F00}{1%s1}\color{#0000FF}{%s}$ | $4$ | |\n" % ("0" * (nExponent - 2), "0" * nMantissa)
if nMantissa >= 2:
# 5 and 6 can only be represented when nExponent >= 3 and nMantissa >= 2
ss += "| $\color{#FF0000}{0}\color{#007F00}{1%s1}\color{#0000FF}{01%s}$ | $5$ | |\n" % ("0" * (nExponent - 2), "0" * (nMantissa - 2))
ss += "| $\color{#FF0000}{0}\color{#007F00}{1%s1}\color{#0000FF}{10%s}$ | $6$ | |\n" % ("0" * (nExponent - 2), "0" * (nMantissa - 2))
if bIEEE754:
ss += "| $\color{#FF0000}{0}\color{#007F00}{%s0}\color{#0000FF}{%s}$ | $%s\\times10^{%s}$ | Maximum |\n" % ("1" * (nExponent - 1), "1" * nMantissa, *numberToString( 2 ** (2 ** (nExponent - 1) - 1) * (1 + (2 ** nMantissa - 1) / (2 ** nMantissa))))
ss += "| $\color{#FF0000}{1}\color{#007F00}{%s0}\color{#0000FF}{%s}$ | $%s\\times10^{%s}$ | Maximum negtive |\n" % ("1" * (nExponent - 1), "1" * nMantissa, *numberToString((-1) * 2 ** (2 ** (nExponent - 1) - 1) * (1 + (2 ** nMantissa - 1) / (2 ** nMantissa))))
else:
# exponent of all 1 is a normal value and at this occasion mantissa can not be all 1
ss += "| $\color{#FF0000}{0}\color{#007F00}{\\bold{%s}}\color{#0000FF}{\\bold{%s0}}$ | $%s\\times10^{%s}$ | Maximum |\n" % ("1" * nExponent, "1" * (nMantissa - 1), *numberToString( 2 ** (2 ** (nExponent - 1)) * (1 + (2 ** nMantissa - 2) / (2 ** nMantissa))))
ss += "| $\color{#FF0000}{1}\color{#007F00}{\\bold{%s}}\color{#0000FF}{\\bold{%s0}}$ | $%s\\times10^{%s}$ | Maximum negtive |\n" % ("1" * nExponent, "1" * (nMantissa - 1), *numberToString((-1) * 2 ** (2 ** (nExponent - 1)) * (1 + (2 ** nMantissa - 2) / (2 ** nMantissa))))
ss += "| $\color{#FF0000}{1}\color{#007F00}{%s}\color{#0000FF}{%s}$ | $-0$ | |\n" % ("0" * nExponent, "0" * nMantissa)
ss += "| $\color{#FF0000}{0}\color{#007F00}{%s}\color{#0000FF}{%s}$ | $+\infty$ | positive infinity |\n" % ("1" * nExponent, "0" * nMantissa)
ss += "| $\color{#FF0000}{1}\color{#007F00}{%s}\color{#0000FF}{%s}$ | $-\infty$ | negative infinity |\n" % ("1" * nExponent, "0" * nMantissa)
ss += "| $\color{#FF0000}{0}\color{#007F00}{%s}\color{#0000FF}{%s1}$ | $NaN$ | sNaN |\n" % ("1" * nExponent, "0" * (nMantissa - 1))
if nMantissa >=2:
# two kind of NaN can both be represented when nMantissa >= 2
ss += "| $\color{#FF0000}{0}\color{#007F00}{%s}\color{#0000FF}{1%s1}$ | $NaN$ | qNaN |\n" % ("1" * nExponent, "0" * (nMantissa - 2))
ss += "| $\color{#FF0000}{0}\color{#007F00}{%s}\color{#0000FF}{%s}$ | $NaN$ | other alternative NaN |\n" % ("1" * nExponent, "1" * nMantissa)
if nExponent >=3:
# normal value less than 1/3 when nExponent >=3
ss += "| $\color{#FF0000}{0}\color{#007F00}{0%s01}\color{#0000FF}{%s}$ | $\\frac{1}{3}$ | |\n" % ("1" * (nExponent - 3), "01" * (nMantissa // 2) + ("1" if nMantissa % 2 == 1 else ""))
# yapf:enable
f.write(ss)
return
def convertBinaryToNumber(x, nSign, nExponent, nMantissa, nBias, bIEEE754):
if len(x) != nSign + nExponent + nMantissa:
print("Error input x")
return None
y = x
if nSign > 0:
sign = y[0]
y = y[1:]
else:
sign = 0
exponent = y[:nExponent]
mantissa = y[nExponent:]
# INF or NaN?
# Normal or Subnormal?
# TODO
return
def convertNumberToBinary(x, sign, exponent, mantissa, bias, bIEEE754):
# TODO
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--sign", "-s", type=int, default=1, choices=[0, 1], help="count of sign bit, 0 or 1")
parser.add_argument("--exponent", "-e", type=int, default=8, help="count of exponent bit, >= 2")
parser.add_argument("--mantissa", "-m", type=int, default=23, help="count of mantissa bit, >= 1")
parser.add_argument("--bias", "-b", type=int, default=0, help="bias")
parser.add_argument("--bIEEE754", "-S", type=str, default="True", choices=["True", "False"], help="standard IEEE754")
args = parser.parse_args()
buildMD(args.sign, args.exponent, args.mantissa, args.bias, args.bIEEE754 == "True")
| trt-samples-for-hackathon-cn-master | cookbook/51-Uncategorized/Number/buildDataTypeMD.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import loadMnistData
nTrain = int(sys.argv[1]) if len(sys.argv) > 1 and sys.argv[1].isdigit() else 3000
nTest = int(sys.argv[2]) if len(sys.argv) > 2 and sys.argv[2].isdigit() else 500
mnist = loadMnistData.MnistData("./", isOneHot=False)
mnist.saveImage(nTrain, "./train/", True) # 60000 images in total
mnist.saveImage(nTest, "./test/", False) # 10000 images in total
| trt-samples-for-hackathon-cn-master | cookbook/00-MNISTData/extractMnistData.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# http://yann.lecun.com/exdb/mnist/, https://storage.googleapis.com/cvdf-datasets/mnist/
import gzip
import cv2
import numpy as np
class MnistData():
def __init__(self, dataPath, isOneHot=False, randomSeed=97):
with open(dataPath + "train-images-idx3-ubyte.gz", "rb") as f:
self.trainImage = self.extractImage(f)
with open(dataPath + "train-labels-idx1-ubyte.gz", "rb") as f:
self.trainLabel = self.extractLabel(f)
with open(dataPath + "t10k-images-idx3-ubyte.gz", "rb") as f:
self.testImage = self.extractImage(f)
with open(dataPath + "t10k-labels-idx1-ubyte.gz", "rb") as f:
self.testLabel = self.extractLabel(f, isOneHot=isOneHot)
self.isOneHot = isOneHot
if self.isOneHot:
self.trainLabel = self.convertToOneHot(self.trainLabel)
self.testLabel = self.convertToOneHot(self.testLabel)
else:
self.trainLabel = self.trainLabel.astype(np.float32)
self.testLabel = self.testLabel.astype(np.float32)
np.random.seed(randomSeed)
def getBatch(self, batchSize, isTrain):
if isTrain:
index = np.random.choice(len(self.trainImage), batchSize, True)
return self.trainImage[index], self.trainLabel[index]
else:
index = np.random.choice(len(self.testImage), batchSize, True)
return self.testImage[index], self.testLabel[index]
def read4Byte(self, byteStream):
dt = np.dtype(np.uint32).newbyteorder(">")
return np.frombuffer(byteStream.read(4), dtype=dt)[0]
def extractImage(self, f):
print("Extracting", f.name)
with gzip.GzipFile(fileobj=f) as byteStream:
if self.read4Byte(byteStream) != 2051:
raise ValueError("Failed reading file!")
nImage = self.read4Byte(byteStream)
rows = self.read4Byte(byteStream)
cols = self.read4Byte(byteStream)
buf = byteStream.read(rows * cols * nImage)
return np.frombuffer(buf, dtype=np.uint8).astype(np.float32).reshape(nImage, rows, cols, 1) / 255
def extractLabel(self, f, isOneHot=False, nClass=10):
print("Extracting", f.name)
with gzip.GzipFile(fileobj=f) as byteStream:
if self.read4Byte(byteStream) != 2049:
raise ValueError("Failed reading file!")
nLabel = self.read4Byte(byteStream)
buf = byteStream.read(nLabel)
return np.frombuffer(buf, dtype=np.uint8)
def convertToOneHot(self, labelIndex, nClass=10):
nLabel = labelIndex.shape[0]
res = np.zeros((nLabel, nClass), dtype=np.float32)
offset = np.arange(nLabel) * nClass
res.flat[offset + labelIndex] = 1
return res
def saveImage(self, count, outputPath, isTrain):
if self.isOneHot:
return
image, label = ([self.testImage, self.testLabel], [self.trainImage, self.trainLabel])[isTrain]
for i in range(min(count, 10000)):
cv2.imwrite(outputPath + str(i).zfill(5) + "-" + str(label[i]) + ".jpg", (image[i] * 255).astype(np.uint8))
| trt-samples-for-hackathon-cn-master | cookbook/00-MNISTData/loadMnistData.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
nB, nC, nH, nW = 1, 3, 4, 5
data = np.arange(nC, dtype=np.float32).reshape(nC, 1, 1) * 100 + np.arange(nH).reshape(1, nH, 1) * 10 + np.arange(nW).reshape(1, 1, nW)
data = data.reshape(nB, nC, nH, nW).astype(np.float32)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW))
#------------------------------------------------------------------------------- Network
shapeLayer = network.add_shape(inputT0)
#------------------------------------------------------------------------------- Network
network.mark_output(shapeLayer.get_output(0))
engine = builder.build_engine(network, config) # 使用旧版的 engine 生成 API
context = engine.create_execution_context()
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
print("nInput = %d, nOutput = %d" % (nInput, nOutput))
bufferH = [] # 不需要绑定输入张量(engine中已经包含了输入张量的形状信息)
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(len(bufferH)): # 这里 bufferD 长度与 buffer 保持一致,小于 engine.num_bindings
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute(nB, bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nInput):
print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i])
for i in range(nOutput):
print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i])
for buffer in bufferD:
cudart.cudaFree(buffer)
| trt-samples-for-hackathon-cn-master | cookbook/52-Deprecated/ShapeLayer-TRT8/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30 # Deprecated in TensorRT 8.4
#config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 30) # use this since TensorRT 8.0
inputTensor = network.add_input("inputT0", trt.float32, [-1, 32, 1, 1])
profile.set_shape(inputTensor.name, [1, 32, 1, 1], [4, 32, 1, 1], [16, 32, 1, 1])
config.add_optimization_profile(profile)
weight = trt.Weights(np.ones([32, 64], dtype=np.float32))
bias = trt.Weights(np.ones([64], dtype=np.float32))
identityLayer = network.add_fully_connected(inputTensor, 64, weight, bias)
network.mark_output(identityLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
| trt-samples-for-hackathon-cn-master | cookbook/52-Deprecated/FullyConnectedLayer-TRT8.4/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
nB, nC, nH, nW = 1, 3, 4, 7
nHidden = 5
data = np.ones(nC * nH * nW, dtype=np.float32).reshape(nC, nH, nW)
weight = np.ascontiguousarray(np.ones((nHidden, nW + nHidden), dtype=np.float32)) # 权重矩阵,X 和 H 连接在一起
bias = np.ascontiguousarray(np.zeros(nHidden * 2, dtype=np.float32)) # 偏置,bX 和 bH 连接在一起
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network() # 必须使用 implicit batch 模式
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30
inputT0 = network.add_input("inputT0", trt.float32, (nC, nH, nW))
#------------------------------------------------------------------------------- Network
shuffleLayer = network.add_shuffle(inputT0) # 先 shuffle 成 (nH,nC,nW)
shuffleLayer.first_transpose = (1, 0, 2)
fakeWeight = trt.Weights(np.random.rand(nHidden, nW + nHidden).astype(np.float32))
fakeBias = trt.Weights(np.random.rand(nHidden * 2).astype(np.float32))
rnnLayer = network.add_rnn(shuffleLayer.get_output(0), 1, nHidden, nH, trt.RNNOperation.RELU, trt.RNNInputMode.LINEAR, trt.RNNDirection.UNIDIRECTION, fakeWeight, fakeBias)
rnnLayer.weights = trt.Weights(weight) # 重设 RNN 权重
rnnLayer.bias = trt.Weights(bias) # 重设 RNN 偏置
#------------------------------------------------------------------------------- Network
network.mark_output(rnnLayer.get_output(0))
network.mark_output(rnnLayer.get_output(1))
engine = builder.build_engine(network, config)
context = engine.create_execution_context()
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
bufferH = []
bufferH.append(data)
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute(nB, bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nInput):
print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i])
for i in range(nOutput):
print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i])
for buffer in bufferD:
cudart.cudaFree(buffer)
| trt-samples-for-hackathon-cn-master | cookbook/52-Deprecated/RNNLayer-TRT8/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
import torch
import torch.nn as nn
import torch.nn.functional as F
from cuda import cudart
np.random.seed(31193)
onnxFile = "./model.onnx"
trtFile = "./model.plan"
testInputShape = [1, 3, 64, 64]
testInputData = np.random.rand(np.prod(testInputShape)).astype(np.float32).reshape(testInputShape) * 2 - 1
os.system("rm -rf ./*.onnx ./*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
class Pad(nn.Module): # original Pad node
def __init__(self):
super(Pad, self).__init__()
def forward(self, input):
out = F.pad(input, (0, 1, 0, 2), "reflect")
return out
class Interpolate(nn.Module): # Use Interpolate node to replace Pad node
def __init__(self):
super(Interpolate, self).__init__()
def forward(self, input):
h, w = input.shape[2:]
out = F.interpolate(input, size=[h + 2, w + 1], mode="bilinear")
return out
inputTensor = torch.from_numpy(testInputData).cuda()
model0 = Pad().cuda()
torchOut = model0(inputTensor).detach().cpu().numpy()
model1 = Interpolate().cuda() # Use Interpolate node during exporting the nodel into ONNX
torch.onnx.export(
model0, # error information of using ReflectPad node is noted in output.txt
inputTensor,
onnxFile,
input_names=["input"],
output_names=["output"],
verbose=True,
keep_initializers_as_inputs=True,
opset_version=13,
dynamic_axes={"input": {
0: "batch_size",
2: "height",
3: "width"
}}
)
print("Succeeded convert model into ONNX!")
# Parse network, rebuild network and do inference in TensorRT ------------------
#os.system("trtexec --onnx=%s --saveEngine=%s --shapes=input:1x3x64x64 --buildOnly" % (onnxFile, trtFile)) # equivalent method using trtexec
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
parser = trt.OnnxParser(network, logger)
if not os.path.exists(onnxFile):
print("Failed finding ONNX file!")
exit()
print("Succeeded finding ONNX file!")
with open(onnxFile, "rb") as model:
if not parser.parse(model.read()):
print("Failed parsing .onnx file!")
for error in range(parser.num_errors):
print(parser.get_error(error))
exit()
print("Succeeded parsing .onnx file!")
inputTensor = network.get_input(0)
profile.set_shape(inputTensor.name, [1, 3, 64, 64], [1, 3, 80, 80], [1, 3, 120, 120])
config.add_optimization_profile(profile)
"""
# find the layer of Resize
for i in range(network.num_layers):
layer = network.get_layer(i)
print(i, "%s,in=%d,out=%d,%s" % (str(layer.type)[10:], layer.num_inputs, layer.num_outputs, layer.name))
for j in range(layer.num_inputs):
tensor = layer.get_input(j)
if tensor == None:
print("\tInput %2d:" % j, "None")
else:
print("\tInput %2d:%s,%s,%s" % (j, tensor.shape, str(tensor.dtype)[9:], tensor.name))
for j in range(layer.num_outputs):
tensor = layer.get_output(j)
if tensor == None:
print("\tOutput %2d:" % j, "None")
else:
print("\tOutput %2d:%s,%s,%s" % (j, tensor.shape, str(tensor.dtype)[9:], tensor.name))
"""
for i in range(network.num_layers): # Replace Resize layer with Slice layer
layer = network.get_layer(i)
if layer.name == "Resize_22":
sliceLayer = network.add_slice(layer.get_input(0), (0, 0, 0, 0), (1, 1, 1, 1), (1, 1, 1, 1))
sliceLayer.set_input(2, layer.get_input(1)) # set nre shape
sliceLayer.mode = trt.SliceMode.REFLECT
network.unmark_output(layer.get_output(0)) # replace the output tensor of the network
network.mark_output(sliceLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
exit()
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, testInputShape)
inputH0 = np.ascontiguousarray(testInputData.reshape(-1))
outputH0 = np.empty(context.get_binding_shape(1), dtype=trt.nptype(engine.get_binding_dtype(1)))
_, inputD0 = cudart.cudaMalloc(inputH0.nbytes)
_, outputD0 = cudart.cudaMalloc(outputH0.nbytes)
cudart.cudaMemcpy(inputD0, inputH0.ctypes.data, inputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2([int(inputD0), int(outputD0)])
cudart.cudaMemcpy(outputH0.ctypes.data, outputD0, outputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
cudart.cudaFree(inputD0)
cudart.cudaFree(outputD0)
print("Succeeded running model in TensorRT!")
#printArrayInformation(testInputData)
printArrayInformation(torchOut, "torch")
printArrayInformation(outputH0, "tensorrt")
check(torchOut, outputH0, True)
| trt-samples-for-hackathon-cn-master | cookbook/52-Deprecated/ErrorWhenParsePadNode-TRT-8.4/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./AddScalarPlugin.so"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def addScalarCPU(inputH, scalar):
return [inputH[0] + scalar]
def getAddScalarPlugin(scalar):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddScalar":
parameterList = []
parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, scalar):
testCase = "<shape=%s,scalar=%f>" % (shape, scalar)
trtFile = "./model-Dim%s.plan" % str(len(shape))
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
builder.max_batch_size = 32
network = builder.create_network()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, shape[1:])
pluginLayer = network.add_plugin_v2([inputT0], getAddScalarPlugin(scalar))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
for i in range(nInput, nIO):
bufferH.append(np.empty((shape[0], ) + tuple(context.get_tensor_shape(lTensorName[i])), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
bufferH[0] = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute(shape[0], bufferD)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = addScalarCPU(bufferH[:nInput], scalar)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run([32], 1)
run([32, 32], 1)
run([16, 16, 16], 1)
run([8, 8, 8, 8], 1)
run([32], 1)
run([32, 32], 1)
run([16, 16, 16], 1)
run([8, 8, 8, 8], 1)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/52-Deprecated/UsePluginV2IOExt-TRT8.6/testAddScalarPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
np.random.seed(31193)
nB, nC, nH, nW = 1, 3, 4, 5
data = np.arange(nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30
inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW))
#------------------------------------------------------------------------------- Network
factorShape = data.transpose(0, 1, 3, 2).shape
constantLayer = network.add_constant(factorShape, trt.Weights(np.ascontiguousarray(np.ones(factorShape, dtype=np.float32))))
matrixMultiplyLayer = network.add_matrix_multiply_deprecated(inputT0, True, constantLayer.get_output(0), True)
matrixMultiplyLayer.transpose0 = False # 重设乘数是否转置
matrixMultiplyLayer.transpose1 = False
#------------------------------------------------------------------------------- Network
network.mark_output(matrixMultiplyLayer.get_output(0))
engine = builder.build_engine(network, config)
context = engine.create_execution_context()
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
bufferH = []
bufferH.append(data)
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nInput):
print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i])
for i in range(nOutput):
print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i])
for buffer in bufferD:
cudart.cudaFree(buffer)
| trt-samples-for-hackathon-cn-master | cookbook/52-Deprecated/MatrixMultiplyDeprecatedLayer-TRT8/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from datetime import datetime as dt
from glob import glob
import calibrator
import cv2
import numpy as np
import tensorrt as trt
import torch as t
import torch.nn.functional as F
from cuda import cudart
from torch.autograd import Variable
from torch.utils import data
dataPath = os.path.dirname(os.path.realpath(__file__)) + "/../../00-MNISTData/"
sys.path.append(dataPath)
import loadMnistData
nTrainBatchSize = 128
ptFile = "./model.pt"
onnxFile = "./model.onnx"
trtFile = "./model.plan"
calibrationDataPath = dataPath + "test/"
cacheFile = "./int8.cache"
nCalibration = 1
inferenceImage = dataPath + "8.png"
nHeight = 28
nWidth = 28
os.system("rm -rf ./*.pt ./*.onnx ./*.plan ./*.cache")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
# pyTorch 中创建网络--------------------------------------------------------------
class Net(t.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = t.nn.Conv2d(1, 32, (5, 5), padding=(2, 2), bias=True)
self.conv2 = t.nn.Conv2d(32, 64, (5, 5), padding=(2, 2), bias=True)
self.fc1 = t.nn.Linear(64 * 7 * 7, 1024, bias=True)
self.fc2 = t.nn.Linear(1024, 10, bias=True)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = x.reshape(-1, 7 * 7 * 64)
x = F.relu(self.fc1(x))
y = self.fc2(x)
z = F.softmax(y, dim=1)
z = t.argmax(z, dim=1)
return y, z
class MyData(data.Dataset):
def __init__(self, path=dataPath, isTrain=True, nTrain=0, nTest=0):
if isTrain:
if len(glob(dataPath + "train/*.jpg")) == 0:
mnist = loadMnistData.MnistData(path, isOneHot=False)
mnist.saveImage([60000, nTrain][int(nTrain > 0)], path + "train/", True) # 60000 images in total
self.data = glob(path + "train/*.jpg")
else:
if len(glob(dataPath + "test/*.jpg")) == 0:
mnist = loadMnistData.MnistData(path, isOneHot=False)
mnist.saveImage([10000, nTest][int(nTest > 0)], path + "test/", False) # 10000 images in total
self.data = glob(path + "test/*.jpg")
def __getitem__(self, index):
imageName = self.data[index]
data = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)
label = np.zeros(10, dtype=np.float32)
index = int(imageName[-7])
label[index] = 1
return t.from_numpy(data.reshape(1, nHeight, nWidth).astype(np.float32)), label
def __len__(self):
return len(self.data)
model = Net().cuda()
ceLoss = t.nn.CrossEntropyLoss()
opt = t.optim.Adam(model.parameters(), lr=0.001)
#trainDataset = tv.datasets.MNIST(root=".",train=True,transform=tv.transforms.ToTensor(),download=True)
#testDataset = tv.datasets.MNIST(root=".",train=False,transform=tv.transforms.ToTensor(),download=True)
trainDataset = MyData(isTrain=True, nTrain=600)
testDataset = MyData(isTrain=False, nTest=100)
trainLoader = t.utils.data.DataLoader(dataset=trainDataset, batch_size=nTrainBatchSize, shuffle=True)
testLoader = t.utils.data.DataLoader(dataset=testDataset, batch_size=nTrainBatchSize, shuffle=True)
for epoch in range(40):
for i, (xTrain, yTrain) in enumerate(trainLoader):
xTrain = Variable(xTrain).cuda()
yTrain = Variable(yTrain).cuda()
opt.zero_grad()
y_, z = model(xTrain)
loss = ceLoss(y_, yTrain)
loss.backward()
opt.step()
if not (epoch + 1) % 10:
print("%s, epoch %d, loss = %f" % (dt.now(), epoch + 1, loss.data))
acc = 0
model.eval()
for xTest, yTest in testLoader:
xTest = Variable(xTest).cuda()
yTest = Variable(yTest).cuda()
y_, z = model(xTest)
acc += t.sum(z == t.matmul(yTest, t.Tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to("cuda:0"))).cpu().numpy()
print("test acc = %f" % (acc / len(testLoader) / nTrainBatchSize))
t.save(model, ptFile)
print("Succeeded building model in pyTorch!")
# 导出模型为 .onnx 文件 ---------------------------------------------------------
t.onnx.export(model, t.randn(1, 1, nHeight, nWidth, device="cuda"), onnxFile, input_names=["x"], output_names=["y", "z"], do_constant_folding=True, verbose=True, keep_initializers_as_inputs=True, opset_version=12, dynamic_axes={"x": {0: "nBatchSize"}, "z": {0: "nBatchSize"}})
print("Succeeded converting model into ONNX!")
# TensorRT 中加载 .onnx 创建 engine ----------------------------------------------
logger = trt.Logger(trt.Logger.ERROR)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
exit()
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.INT8)
config.int8_calibrator = calibrator.MyCalibrator(calibrationDataPath, nCalibration, (1, 1, nHeight, nWidth), cacheFile)
#config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 3 << 30)
config.max_workspace_size = 3 << 30
parser = trt.OnnxParser(network, logger)
if not os.path.exists(onnxFile):
print("Failed finding ONNX file!")
exit()
print("Succeeded finding ONNX file!")
with open(onnxFile, "rb") as model:
if not parser.parse(model.read()):
print("Failed parsing .onnx file!")
for error in range(parser.num_errors):
print(parser.get_error(error))
exit()
print("Succeeded parsing .onnx file!")
inputTensor = network.get_input(0)
profile.set_shape(inputTensor.name, [1, 1, 28, 28], [4, 1, 28, 28], [16, 1, 28, 28])
config.add_optimization_profile(profile)
for i in range(network.num_layers):
layer = network.get_layer(i)
print(i, "%s,in=%d,out=%d,%s" % (str(layer.type)[10:], layer.num_inputs, layer.num_outputs, layer.name))
for j in range(layer.num_inputs):
tensor = layer.get_input(j)
if tensor == None:
print("\tInput %2d:" % j, "None")
else:
print("\tInput %2d:%s,%s,%s" % (j, tensor.shape, str(tensor.dtype)[9:], tensor.name))
for j in range(layer.num_outputs):
tensor = layer.get_output(j)
if tensor == None:
print("\tOutput %2d:" % j, "None")
else:
print("\tOutput %2d:%s,%s,%s" % (j, tensor.shape, str(tensor.dtype)[9:], tensor.name))
network.unmark_output(network.get_output(0)) # remove output tensor "y"
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
exit()
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, [1, 1, 28, 28])
_, stream = cudart.cudaStreamCreate()
print("EngineBinding0->", engine.get_binding_shape(0), engine.get_binding_dtype(0))
print("EngineBinding1->", engine.get_binding_shape(1), engine.get_binding_dtype(1))
data = cv2.imread(inferenceImage, cv2.IMREAD_GRAYSCALE).astype(np.float32)
inputH0 = np.ascontiguousarray(data.reshape(-1))
outputH0 = np.empty(context.get_binding_shape(1), dtype=trt.nptype(engine.get_binding_dtype(1)))
_, inputD0 = cudart.cudaMallocAsync(inputH0.nbytes, stream)
_, outputD0 = cudart.cudaMallocAsync(outputH0.nbytes, stream)
cudart.cudaMemcpyAsync(inputD0, inputH0.ctypes.data, inputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
context.execute_async_v2([int(inputD0), int(outputD0)], stream)
cudart.cudaMemcpyAsync(outputH0.ctypes.data, outputD0, outputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
cudart.cudaStreamSynchronize(stream)
print("inputH0 :", data.shape)
#print(data)
print("outputH0:", outputH0.shape)
print(outputH0)
cudart.cudaStreamDestroy(stream)
cudart.cudaFree(inputD0)
cudart.cudaFree(outputD0)
print("Succeeded running model in TensorRT!")
| trt-samples-for-hackathon-cn-master | cookbook/52-Deprecated/FullyConnectedLayerWhenUsingParserTRT-8.4/pyTorchToTensorRT.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./AddScalarPlugin.so"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def addScalarCPU(inputH, scalar):
return [inputH[0] + scalar]
def getAddScalarPlugin(scalar):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddScalar":
parameterList = []
parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, scalar):
testCase = "<shape=%s,scalar=%f>" % (shape, scalar)
trtFile = "./model-Dim%s.plan" % str(len(shape))
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
builder.max_batch_size = 32
network = builder.create_network()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, shape[1:])
pluginLayer = network.add_plugin_v2([inputT0], getAddScalarPlugin(scalar))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
for i in range(nInput, nIO):
bufferH.append(np.empty((shape[0], ) + tuple(context.get_tensor_shape(lTensorName[i])), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
bufferH[0] = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute(shape[0], bufferD)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = addScalarCPU(bufferH[:nInput], scalar)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run([32], 1)
run([32, 32], 1)
run([16, 16, 16], 1)
run([8, 8, 8, 8], 1)
run([32], 1)
run([32, 32], 1)
run([16, 16, 16], 1)
run([8, 8, 8, 8], 1)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/52-Deprecated/UsePluginV2Ext-TRT8.5/testAddScalarPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
nB, nC, nH, nW = 1, 3, 4, 5
nOut, nCOut, nHOut, nWOut = 2, 3, 6, 10 # 输出张量 CHW
data = np.arange(nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW))
#------------------------------------------------------------------------------- Network
resizeLayer = network.add_resize(inputT0)
resizeLayer.shape = (nB, nCOut, nHOut, nWOut)
resizeLayer.resize_mode = trt.ResizeMode.LINEAR # 使用线性插值,与上面的 coordinate_transformation 统一
resizeLayer.align_corners = True # 指定角落对齐,默认值 False
#------------------------------------------------------------------------------- Network
network.mark_output(resizeLayer.get_output(0))
#engineString = builder.build_serialized_network(network, config)
#engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
engine = builder.build_engine(network, config)
context = engine.create_execution_context()
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
bufferH = []
bufferH.append(data)
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nInput):
print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i])
for i in range(nOutput):
print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i])
for buffer in bufferD:
cudart.cudaFree(buffer)
| trt-samples-for-hackathon-cn-master | cookbook/52-Deprecated/ResizeLayer-TRT8/Align_corners.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, -1])
profile.set_shape(inputT0.name, [1, 1, 1], [3, 4, 5], [6, 8, 10])
inputT1 = network.add_input("inputT1", trt.float32, [-1, -1, -1])
profile.set_shape(inputT1.name, [1, 1, 1], [3, 4, 5], [6, 8, 10])
config.add_optimization_profile(profile)
identityLayer = network.add_identity(inputT0) # 只有 inputT0 有用
network.mark_output(identityLayer.get_output(0))
#engineString = builder.build_serialized_network(network, config)
#engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
engine = builder.build_engine(network, config)
context = engine.create_execution_context()
context.set_binding_shape(0, [3, 4, 5]) # 只设定 inding 0 的形状
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
print("nBinding=%d, nInput=%d,nOutput=%d" % (engine.num_bindings, nInput, nOutput))
for i in range(nInput):
print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
for i in range(nInput, nInput + nOutput):
print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
| trt-samples-for-hackathon-cn-master | cookbook/52-Deprecated/BindingEliminate-TRT8/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30 # Deprecated in TensorRT 8.4
#config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 30) # use this since TensorRT 8.0
inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1])
profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10])
config.add_optimization_profile(profile)
identityLayer = network.add_identity(inputTensor)
network.mark_output(identityLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
| trt-samples-for-hackathon-cn-master | cookbook/52-Deprecated/max_workspace_size-TRT8.4/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
np.random.seed(31193)
nBatchSize, nHiddenSize = 4, 2
data = np.random.rand(nBatchSize * nHiddenSize).astype(np.float32).reshape(nBatchSize, nHiddenSize)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30
inputT0 = network.add_input("inputT0", trt.float32, [-1, nHiddenSize])
layer = network.add_unary(inputT0, trt.UnaryOperation.NEG)
network.mark_output(layer.get_output(0))
profile.set_shape(inputT0.name, [1, nHiddenSize], [nBatchSize, nHiddenSize], [nBatchSize * 2, nHiddenSize])
config.add_optimization_profile(profile)
engine = builder.build_engine(network, config)
contextList = []
for i in range(4):
context = engine.create_execution_context()
context.active_optimization_profile = 0
context.set_binding_shape(0, [i + 1, nHiddenSize])
contextList.append(context)
print("Context binding all? %s" % (["No", "Yes"][int(context.all_binding_shapes_specified)]))
for i in range(engine.num_bindings):
print(i, "Input " if engine.binding_is_input(i) else "Output", engine.get_binding_shape(i), context.get_binding_shape(i))
inputH0 = np.ascontiguousarray(data.reshape(-1))
outputH0 = np.empty(context.get_binding_shape(1), dtype=trt.nptype(engine.get_binding_dtype(1)))
_, inputD0 = cudart.cudaMalloc(inputH0.nbytes)
_, outputD0 = cudart.cudaMalloc(outputH0.nbytes)
for i in range(4):
cudart.cudaMemcpy(inputD0, inputH0.ctypes.data, inputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
contextList[i].execute_v2([int(inputD0), int(outputD0)])
cudart.cudaMemcpy(outputH0.ctypes.data, outputD0, outputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
print("check result:", np.all(outputH0[:i, ...] == -inputH0.reshape(outputH0.shape)[:i, ...]))
cudart.cudaFree(inputD0)
cudart.cudaFree(outputD0)
| trt-samples-for-hackathon-cn-master | cookbook/52-Deprecated/MultiContext-TRT8/main.py |
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract strings from command files and externalize into translation files.
Expects to be run from the root directory of the repository.
Usage:
extract.py pkg/kubectl/cmd/apply.go
"""
import fileinput
import sys
import re
class MatchHandler(object):
""" Simple holder for a regular expression and a function
to run if that regular expression matches a line.
The function should expect (re.match, file, linenumber) as parameters
"""
def __init__(self, regex, replace_fn):
self.regex = re.compile(regex)
self.replace_fn = replace_fn
def short_replace(match, file, line_number):
"""Replace a Short: ... cobra command description with an internationalization
"""
sys.stdout.write('{}i18n.T({}),\n'.format(match.group(1), match.group(2)))
SHORT_MATCH = MatchHandler(r'(\s+Short:\s+)("[^"]+"),', short_replace)
def import_replace(match, file, line_number):
"""Add an extra import for the i18n library.
Doesn't try to be smart and detect if it's already present, assumes a
gofmt round wil fix things.
"""
sys.stdout.write('{}\n"k8s.io/kubectl/pkg/util/i18n"\n'.format(match.group(1)))
IMPORT_MATCH = MatchHandler('(.*"k8s.io/kubectl/pkg/cmd/util")', import_replace)
def string_flag_replace(match, file, line_number):
"""Replace a cmd.Flags().String("...", "", "...") with an internationalization
"""
sys.stdout.write('{}i18n.T("{})"))\n'.format(match.group(1), match.group(2)))
STRING_FLAG_MATCH = MatchHandler('(\s+cmd\.Flags\(\).String\("[^"]*", "[^"]*", )"([^"]*)"\)', string_flag_replace)
def long_string_replace(match, file, line_number):
return '{}i18n.T({}){}'.format(match.group(1), match.group(2), match.group(3))
LONG_DESC_MATCH = MatchHandler('(LongDesc\()(`[^`]+`)([^\n]\n)', long_string_replace)
EXAMPLE_MATCH = MatchHandler('(Examples\()(`[^`]+`)([^\n]\n)', long_string_replace)
def replace(filename, matchers, multiline_matchers):
"""Given a file and a set of matchers, run those matchers
across the file and replace it with the results.
"""
# Run all the matchers
line_number = 0
for line in fileinput.input(filename, inplace=True):
line_number += 1
matched = False
for matcher in matchers:
match = matcher.regex.match(line)
if match:
matcher.replace_fn(match, filename, line_number)
matched = True
break
if not matched:
sys.stdout.write(line)
sys.stdout.flush()
with open(filename, 'r') as datafile:
content = datafile.read()
for matcher in multiline_matchers:
match = matcher.regex.search(content)
while match:
rep = matcher.replace_fn(match, filename, 0)
# Escape back references in the replacement string
# (And escape for Python)
# (And escape for regex)
rep = re.sub('\\\\(\\d)', '\\\\\\\\\\1', rep)
content = matcher.regex.sub(rep, content, 1)
match = matcher.regex.search(content)
sys.stdout.write(content)
# gofmt the file again
from subprocess import call
call(["goimports", "-w", filename])
replace(sys.argv[1], [SHORT_MATCH, IMPORT_MATCH, STRING_FLAG_MATCH], [LONG_DESC_MATCH, EXAMPLE_MATCH])
| k8s-operator-libs-main | vendor/k8s.io/kubectl/pkg/util/i18n/translations/extract.py |
# transformer_main.py
import argparse
import os
import sys
import time
import math
import random
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from fp16 import FP16_Module, FP16_Optimizer
import data
import model as m
from model import DistributedDataParallel as DDP
from reparameterization import apply_weight_norm, remove_weight_norm
from configure_data import configure_data
from learning_rates import AnnealingLR, WarmupLR, SlantedTriangularLR
from arguments import add_general_args, add_model_args, add_unsupervised_data_args
rnn_model = None
def setup_model_and_optim(args, train_data, tokenizer):
ntokens = args.data_size
if args.model.lower() == 'transformer':
embed_tokens = m.Embedding(ntokens, args.decoder_embed_dim, padding_idx=tokenizer.command_name_map['pad'].Id)
model = m.TransformerModel(m.DecoderPreprocessor(args, embed_tokens),
m.TransformerDecoder(args, embed_tokens))
else:
model = m.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
global rnn_model
rnn_model = model
LR_Warmer = None
print('* number of parameters: %d' % sum([p.nelement() for p in model.parameters()]))
if args.cuda:
model.cuda()
optim = None
if args.load is not None and args.load != '':
sd = torch.load(args.load, map_location='cpu')
if args.load_optim:
#optim_sd = torch.load(os.path.join(os.path.dirname(args.load), 'optim.pt'), map_location='cpu')
rng = torch.load(os.path.join(os.path.dirname(args.load), 'rng.pt'))
torch.cuda.set_rng_state(rng[0])
torch.set_rng_state(rng[1])
try:
model.load_state_dict(sd)
except:
if hasattr(model, 'rnn'):
apply_weight_norm(model.rnn, hook_child=False)
else:
apply_weight_norm(model, hook_child=False)
model.load_state_dict(sd)
remove_weight_norm(model)
if not args.no_weight_norm:
if hasattr(model, 'rnn'):
apply_weight_norm(model.rnn, hook_child=False)
else:
apply_weight_norm(model, hook_child=False)
if optim is None:
optim_choice = 'Adam' if args.stlr_cut_frac else args.optim
if args.fp16:
model = FP16_Module(model)
optim = eval('torch.optim.'+args.optim)(model.parameters(), lr=args.lr)
optim = FP16_Optimizer(optim,
static_loss_scale=args.loss_scale,
dynamic_loss_scale=args.dynamic_loss_scale)
else:
optim = eval('torch.optim.'+args.optim)(model.parameters(), lr=args.lr)
if args.load_optim:
optim.load_state_dict(optim_sd)
# add linear learning rate scheduler
if train_data is not None:
if args.constant_decay:
num_iters = args.constant_decay
else:
num_iters = args.train_iters * args.epochs
init_step = -1
if args.load_optim:
#TODO: this no longer makes sense given the new data loaders
init_step = optim_sd['iter']-optim_sd['skipped_iter']
train_data.batch_sampler.start_iter = (optim_sd['iter'] % len(train_data)) + 1
warmup_iter = args.warmup * num_iters
if args.stlr_cut_frac is not None:
LR = SlantedTriangularLR(optim, cut_frac=args.stlr_cut_frac, num_iters=num_iters)
else:
LR = AnnealingLR(optim, start_lr=args.lr, warmup_iter=warmup_iter, num_iters=num_iters, decay_style=args.decay_style)
if args.warmup != 0:
LR_Warmer = WarmupLR(optim, warmup_iter, last_iter=init_step)
# wrap model for distributed training
if args.world_size > 1:
model = DDP(model)
criterion = nn.CrossEntropyLoss(reduce=False)
return model, optim, LR, LR_Warmer, criterion
###############################################################################
# Training code
###############################################################################
# get_batch subdivides the source data into chunks of length args.seq_length.
# If source is equal to the example output of the data loading example, with
# a seq_length limit of 2, we'd get the following two Variables for i = 0:
# ┌ a g m s ┐ ┌ b h n t ┐
# └ b h n t ┘ └ c i o u ┘
# Note that despite the name of the function, the subdivison of data is not
# done along the batch dimension (i.e. dimension 1), since that was handled
# by the data loader. The chunks are along dimension 0, corresponding
# to the seq_len dimension in the LSTM. A Variable representing an appropriate
# shard reset mask of the same dimensions is also returned.
def get_batch(data, args):
reset_mask_batch = data[1].long()
padding_mask_batch = data[2].float()
data = data[0].long()
if args.cuda:
data = data.cuda()
reset_mask_batch = reset_mask_batch.cuda()
padding_mask_batch = padding_mask_batch.cuda()
text_batch = Variable(data[:,:-1].t().contiguous(), requires_grad=False)
target_batch = Variable(data[:,1:].t().contiguous(), requires_grad=False)
reset_mask_batch = Variable(reset_mask_batch[:,:text_batch.size(0)].t().contiguous(), requires_grad=False)
padding_mask_batch = Variable(padding_mask_batch[:,:text_batch.size(0)].t().contiguous(), requires_grad=False)
return text_batch, target_batch, reset_mask_batch, padding_mask_batch
def init_hidden(args):
if rnn_model is not None:
rnn_model.rnn.init_hidden(args.batch_size)
def evaluate(data_source, model, criterion, args):
# Turn on evaluation mode which disables dropout.
model.eval()
init_hidden(args)
total_loss = 0
ntokens = args.data_size
max_iters = args.eval_iters
with torch.no_grad():
data_iter = iter(data_source)
i = 0
while i < max_iters:
batch = next(data_iter)
data, targets, reset_mask, padding_mask = get_batch(batch, args)
output, hidden = model(data, reset_mask=reset_mask)
losses = criterion(output.view(-1, ntokens).contiguous().float(), targets.view(-1).contiguous())
padding_mask = padding_mask.view(-1)
portion_unpadded = padding_mask.sum() / padding_mask.size(0)
loss = portion_unpadded * torch.mean(losses * (padding_mask.view(-1).float()))
if isinstance(model, DDP):
torch.distributed.all_reduce(loss.data)
loss.data /= args.world_size
total_loss += loss.data.float()
i+=1
return (total_loss / max_iters).item()
def train(epoch, model, optim, train_data, LR, LR_Warmer, criterion, args, total_iters=0, skipped_iters=0, elapsed_time=False):
# Turn on training mode which enables dropout.
model.train()
init_hidden(args)
total_loss = 0
start_time = time.time()
t0 = start_time
ntokens = args.data_size
curr_loss = 0.
distributed = isinstance(model, DDP)
max_iters = args.train_iters
def log(epoch, i, lr, ms_iter, total_time, loss, scale):
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:.2E} | ms/batch {:.3E} | total time {:.3E}\
loss {:.2E} | ppl {:8.2f} | loss scale {:8.2f}'.format(
epoch, i, max_iters, lr,
ms_iter, total_time, loss, math.exp(min(loss, 20)), scale
)
)
i = 0
data_iter = iter(train_data)
while i < max_iters:
batch = next(data_iter)
data, targets, reset_mask, padding_mask = get_batch(batch, args)
optim.zero_grad()
output, _ = model(data, reset_mask=reset_mask, chkpt_grad=args.chkpt_grad)
losses = criterion(output.view(-1, ntokens).contiguous().float(), targets.view(-1).contiguous())
padding_mask = padding_mask.view(-1)
portion_unpadded = padding_mask.sum() / padding_mask.size(0)
loss = portion_unpadded * torch.mean(losses * (padding_mask.view(-1).float()))
total_loss += loss.data.float()
if args.fp16:
optim.backward(loss, update_master_grads=False)
else:
loss.backward()
if distributed:
torch.distributed.all_reduce(loss.data)
loss.data = loss.data/args.world_size
model.allreduce_params()
# clipping gradients helps prevent the exploding gradient problem in RNNs / LSTMs.
if args.clip > 0:
if not args.fp16:
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
else:
optim.clip_master_grads(clip=args.clip)
if args.fp16:
optim.update_master_grads()
optim.step()
# step learning rate and log training progress
lr = optim.param_groups[0]['lr']
if not args.fp16:
LR.step()
if args.warmup != 0:
LR_Warmer.step()
else:
# if fp16 optimizer skips gradient step due to explosion do not step lr
if not optim.overflow:
LR.step()
if args.warmup != 0:
LR_Warmer.step()
else:
skipped_iters += 1
if ((i+1) % args.log_interval == 0):
cur_loss = total_loss.item() / args.log_interval
cur_time = time.time()
elapsed = cur_time - start_time
total_elapsed = cur_time - t0 + elapsed_time
log(epoch, i+1, lr, elapsed * 1000 / args.log_interval, total_elapsed,
cur_loss, args.loss_scale if not args.fp16 else optim.loss_scale)
total_loss = 0
start_time = cur_time
sys.stdout.flush()
# save current model progress. If distributed only save from worker 0
if args.save_iters and total_iters % (args.save_iters) == 0 and total_iters > 0 and args.rank < 1:
if args.rank < 1:
with open(os.path.join(os.path.splitext(args.save)[0], 'e%s.pt'%(str(total_iters),)), 'wb') as f:
torch.save(model.state_dict(), f)
if args.save_optim:
with open(os.path.join(os.path.splitext(args.save)[0], 'optim.pt'), 'wb') as f:
optim_sd = optim.state_dict()
optim_sd['iter'] = total_iters
optim_sd['skipped_iter'] = skipped_iters
torch.save(optim_sd, f)
del optim_sd
with open(os.path.join(os.path.splitext(args.save)[0], 'rng.pt'), 'wb') as f:
torch.save((torch.cuda.get_rng_state(), torch.get_rng_state()),f)
if args.cuda:
torch.cuda.synchronize()
total_iters += 1
i+=1
#final logging
elapsed_iters = max_iters % args.log_interval
if elapsed_iters == 0:
return cur_loss, skipped_iters
cur_time = time.time()
elapsed = cur_time - start_time
total_elapsed = cur_time - t0 + elapsed_time
cur_loss = total_loss.item() / args.log_interval
log(epoch, max_iters, lr, elapsed * 1000/ elapsed_iters, total_elapsed,
cur_loss, args.loss_scale if not args.fp16 else optim.loss_scale)
return cur_loss, skipped_iters
def main():
parser = argparse.ArgumentParser(description='PyTorch Sentiment-Discovery Language Modeling')
parser = add_general_args(parser)
parser = add_model_args(parser)
data_config, data_parser = add_unsupervised_data_args(parser)
args = parser.parse_args()
torch.backends.cudnn.enabled = False
args.cuda = torch.cuda.is_available()
if args.multinode_init:
args.rank = int(os.getenv('RANK', 0))
args.world_size = int(os.getenv("WORLD_SIZE", 1))
# initialize distributed process group and set device
if args.rank > 0:
torch.cuda.set_device(args.rank % torch.cuda.device_count())
if args.world_size > 1:
init_method='tcp://'
if not args.multinode_init:
init_method+='localhost:6000'
else:
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6666')
init_method+=master_ip+':'+master_port
torch.distributed.init_process_group(backend=args.distributed_backend, world_size=args.world_size,
rank=args.rank, init_method=init_method)
# Set the random seed manually for reproducibility.
if args.seed is not None and args.seed > 0:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if args.loss_scale != 1 and args.dynamic_loss_scale:
raise RuntimeError("Static loss scale and dynamic loss scale cannot be used together.")
(train_data, val_data, test_data), tokenizer = data_config.apply(args)
args.data_size = tokenizer.num_tokens
model, optim, LR, LR_Warmer, criterion = setup_model_and_optim(args, train_data, tokenizer)
lr = args.lr
best_val_loss = None
# If saving process intermittently create directory for saving
if args.save_iters > 0 and not os.path.exists(os.path.splitext(args.save)[0]) and args.rank < 1:
os.makedirs(os.path.splitext(args.save)[0])
# At any point you can hit Ctrl + C to break out of training early.
try:
total_iters = 0
elapsed_time = 0
skipped_iters = 0
if args.load_optim:
total_iters = optim_sd['iter']
skipped_iters = optim_sd['skipped_iter']
for epoch in range(1, args.epochs+1):
if args.rank <= 0:
with open(args.save+'.train_lock', 'wb') as f:
pass
epoch_start_time = time.time()
val_loss, skipped_iters = train(epoch, model, optim, train_data, LR, LR_Warmer, criterion,
args, total_iters, skipped_iters, elapsed_time)
elapsed_time += time.time() - epoch_start_time
total_iters += args.train_iters
if val_data is not None:
print('entering eval')
val_loss = evaluate(val_data, model, criterion, args)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.4f} | '
'valid ppl {:8.4f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(min(val_loss, 20))))
print('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if (not best_val_loss or val_loss < best_val_loss) and args.rank <= 0:
torch.save(model.state_dict(), args.save)
best_val_loss = val_loss
if args.world_size == 1 or torch.distributed.get_rank() == 0:
try:
os.remove(args.save+'.train_lock')
except:
pass
# if args.world_size > 1:
# torch.distributed.barrier()
torch.cuda.synchronize()
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
#while os.path.exists(args.save+'.train_lock'):
# time.sleep(1)
# Load the best saved model.
#if os.path.exists(args.save):
# model.load_state_dict(torch.load(args.save, 'cpu'))
# if not args.no_weight_norm and args.rank <= 0:
# remove_weight_norm(model)
# torch.save(model.state_dict(), args.save)
if test_data is not None:
# Run on test data.
print('entering test')
test_loss = evaluate(test_data, model, criterion, args)
print('=' * 89)
print('| End of training | test loss {:5.4f} | test ppl {:8.4f}'.format(
test_loss, math.exp(min(test_loss, 20))))
print('=' * 89)
if __name__ == "__main__":
main()
| sentiment-discovery-master | pretrain.py |
###############################################################################
# Language Modeling on Penn Tree Bank
#
# This file generates new sentences sampled from the language model
#
###############################################################################
import os
import math
import argparse
import torch
from torch.autograd import Variable
from apex.reparameterization import apply_weight_norm, remove_weight_norm
import model
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style({'font.family': 'monospace'})
parser = argparse.ArgumentParser(description='PyTorch Sentiment Discovery Generation/Visualization')
# Model parameters.
parser.add_argument('--model', type=str, default='mLSTM',
help='type of recurrent net (RNNTanh, RNNReLU, LSTM, mLSTM, GRU')
parser.add_argument('--emsize', type=int, default=64,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=4096,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1,
help='number of layers')
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--all_layers', action='store_true',
help='if more than one layer is used, extract features from all layers, not just the last layer')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--load_model', type=str, default='model.pt',
help='model checkpoint to use')
parser.add_argument('--save', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--gen_length', type=int, default='1000',
help='number of tokens to generate')
parser.add_argument('--seed', type=int, default=-1,
help='random seed')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
parser.add_argument('--fp16', action='store_true',
help='run in fp16 mode')
parser.add_argument('--neuron', type=int, default=-1,
help='''specifies which neuron to analyze for visualization or overwriting.
Defaults to maximally weighted neuron during classification steps''')
parser.add_argument('--visualize', action='store_true',
help='generates heatmap of main neuron activation [not working yet]')
parser.add_argument('--overwrite', type=float, default=None,
help='Overwrite value of neuron s.t. generated text reads as a +1/-1 classification')
parser.add_argument('--text', default='',
help='warm up generation with specified text first')
args = parser.parse_args()
args.data_size = 256
args.cuda = torch.cuda.is_available()
# Set the random seed manually for reproducibility.
if args.seed >= 0:
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
#if args.temperature < 1e-3:
# parser.error("--temperature has to be greater or equal 1e-3")
model = model.RNNModel(args.model, args.data_size, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
if args.cuda:
model.cuda()
if args.fp16:
model.half()
with open(args.load_model, 'rb') as f:
sd = torch.load(f)
try:
model.load_state_dict(sd)
except:
apply_weight_norm(model.rnn)
model.load_state_dict(sd)
remove_weight_norm(model)
def get_neuron_and_polarity(sd, neuron):
"""return a +/- 1 indicating the polarity of the specified neuron in the module"""
if neuron == -1:
neuron = None
if 'classifier' in sd:
sd = sd['classifier']
if 'weight' in sd:
weight = sd['weight']
else:
return neuron, 1
else:
return neuron, 1
if neuron is None:
val, neuron = torch.max(torch.abs(weight[0].float()), 0)
neuron = neuron[0]
val = weight[0][neuron]
if val >= 0:
polarity = 1
else:
polarity = -1
return neuron, polarity
def process_hidden(cell, hidden, neuron, mask=False, mask_value=1, polarity=1):
feat = cell.data[:, neuron]
rtn_feat = feat.clone()
if mask:
# feat.fill_(mask_value*polarity)
hidden.data[:, neuron].fill_(mask_value*polarity)
return rtn_feat[0]
def model_step(model, input, neuron=None, mask=False, mask_value=1, polarity=1):
out, _ = model(input)
if neuron is not None:
hidden = model.rnn.rnns[-1].hidden
if len(hidden) > 1:
hidden, cell = hidden
else:
hidden = cell = hidden
feat = process_hidden(cell, hidden, neuron, mask, mask_value, polarity)
return out, feat
return out
def sample(out, temperature):
if temperature == 0:
char_idx = torch.max(out.squeeze().data, 0)[1][0]
else:
word_weights = out.float().squeeze().data.div(args.temperature).exp().cpu()
char_idx = torch.multinomial(word_weights, 1)[0]
return char_idx
def process_text(text, model, input, temperature, neuron=None, mask=False, overwrite=1, polarity=1):
chrs = []
vals = []
for c in text:
input.data.fill_(int(ord(c)))
if neuron:
ch, val = model_step(model, input, neuron, mask, overwrite, polarity)
vals.append(val)
else:
ch = model_step(model, input, neuron, mask, overwrite, polarity)
# ch = sample(ch, temperature)
input.data.fill_(sample(ch, temperature))
chrs = list(text)
# chrs.append(chr(ch))
return chrs, vals
def generate(gen_length, model, input, temperature, neuron=None, mask=False, overwrite=1, polarity=1):
chrs = []
vals = []
for i in range(gen_length):
chrs.append(chr(input.data[0]))
if neuron:
ch, val = model_step(model, input, neuron, mask, overwrite, polarity)
vals.append(val)
else:
ch = model_step(model, input, neuron, mask, overwrite, polarity)
ch = sample(ch, temperature)
input.data.fill_(ch)
# chrs.append(chr(ch))
# chrs.pop()
return chrs, vals
def make_heatmap(text, values, save=None, polarity=1):
cell_height=.325
cell_width=.15
n_limit = 74
text = list(map(lambda x: x.replace('\n', '\\n'), text))
num_chars = len(text)
total_chars = math.ceil(num_chars/float(n_limit))*n_limit
mask = np.array([0]*num_chars + [1]*(total_chars-num_chars))
text = np.array(text+[' ']*(total_chars-num_chars))
values = np.array(values+[0]*(total_chars-num_chars))
values *= polarity
values = values.reshape(-1, n_limit)
text = text.reshape(-1, n_limit)
mask = mask.reshape(-1, n_limit)
num_rows = len(values)
plt.figure(figsize=(cell_width*n_limit, cell_height*num_rows))
hmap=sns.heatmap(values, annot=text, mask=mask, fmt='', vmin=-1, vmax=1, cmap='RdYlGn',
xticklabels=False, yticklabels=False, cbar=False)
plt.tight_layout()
if save is not None:
plt.savefig(save)
# clear plot for next graph since we returned `hmap`
plt.clf()
return hmap
neuron, polarity = get_neuron_and_polarity(sd, args.neuron)
neuron = neuron if args.visualize or args.overwrite is not None else None
mask = args.overwrite is not None
model.eval()
hidden = model.rnn.init_hidden(1)
input = Variable(torch.LongTensor([int(ord('\n'))]))
if args.cuda:
input = input.cuda()
input = input.view(1,1).contiguous()
model_step(model, input, neuron, mask, args.overwrite, polarity)
input.data.fill_(int(ord(' ')))
out = model_step(model, input, neuron, mask, args.overwrite, polarity)
if neuron is not None:
out = out[0]
input.data.fill_(sample(out, args.temperature))
outchrs = []
outvals = []
#with open(args.save, 'w') as outf:
with torch.no_grad():
if args.text != '':
chrs, vals = process_text(args.text, model, input, args.temperature, neuron, mask, args.overwrite, polarity)
outchrs += chrs
outvals += vals
chrs, vals = generate(args.gen_length, model, input, args.temperature, neuron, mask, args.overwrite, polarity)
outchrs += chrs
outvals += vals
outstr = ''.join(outchrs)
print(outstr)
with open(args.save, 'w') as f:
f.write(outstr)
if args.visualize:
make_heatmap(outchrs, outvals, os.path.splitext(args.save)[0]+'.png', polarity)
| sentiment-discovery-master | generate.py |
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Author & Contact: Raul Puri ([email protected])
###############################################################################
from configure_data import configure_data
def add_general_args(parser):
group = parser.add_argument_group('general', 'general purpose arguments')
group.add_argument('--model', type=str, default='mLSTM',
help='type of recurrent net (RNNTanh, RNNReLU, LSTM, mLSTM, GRU)')
group.add_argument('--lr', type=float, default=5e-4,
help='initial learning rate')
group.add_argument('--constant-decay', type=int, default=None,
help='number of iterations to decay LR over,' + \
' None means decay to zero over training')
group.add_argument('--clip', type=float, default=0,
help='gradient clipping')
group.add_argument('--epochs', type=int, default=1,
help='upper epoch limit')
group.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
group.add_argument('--seed', type=int, default=1234,
help='random seed')
group.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
group.add_argument('--save', type=str, default='lang_model.pt',
help='path to save the final model')
group.add_argument('--load', type=str, default=None,
help='path to a previously saved model checkpoint')
group.add_argument('--load-optim', action='store_true',
help='load most recent optimizer to resume training')
group.add_argument('--save-iters', type=int, default=10000, metavar='N',
help='save current model progress interval')
group.add_argument('--save-optim', action='store_true',
help='save most recent optimizer')
group.add_argument('--fp16', action='store_true',
help='Run model in pseudo-fp16 mode (fp16 storage fp32 math).')
group.add_argument('--dynamic-loss-scale', action='store_true',
help='Dynamically look for loss scalar for fp16 convergance help.')
group.add_argument('--no-weight-norm', action='store_true',
help='Add weight normalization to model.')
group.add_argument('--loss-scale', type=float, default=1,
help='Static loss scaling, positive power of 2 values can improve fp16 convergence.')
group.add_argument('--world-size', type=int, default=1,
help='number of distributed workers')
group.add_argument('--distributed-backend', default='gloo',
help='which backend to use for distributed training. One of [gloo, nccl]')
group.add_argument('--rank', type=int, default=-1,
help='distributed worker rank. Typically set automatically from multiproc.py')
group.add_argument('--optim', default='Adam',
help='One of PyTorch\'s optimizers (Adam, SGD, etc). Default: Adam')
group.add_argument('--chkpt-grad', action='store_true',
help='checkpoint gradients to allow for training with larger models and sequences')
group.add_argument('--multinode-init', action='store_true',
help='initialize multinode. Environment variables should be set as according to https://pytorch.org/docs/stable/distributed.html')
return parser
def add_unsupervised_data_args(parser):
data_config, data_group = configure_data(parser)
# Set unsupervised L2R language modeling option defaults
data_config.set_defaults(data_set_type='L2R', transpose=True)
data_group.set_defaults(split='100,1,1')
# Create unsupervised-L2R-specific options
group = parser.add_argument_group('language modeling data options')
group.add_argument('--seq-length', type=int, default=256,
help="Maximum sequence length to process (for unsupervised rec)")
group.add_argument('--eval-seq-length', type=int, default=256,
help="Maximum sequence length to process for evaluation")
group.add_argument('--lazy', action='store_true',
help='whether to lazy evaluate the data set')
group.add_argument('--persist-state', type=int, default=1,
help='0=reset state after every sample in a shard, 1=reset state after every shard, -1=never reset state')
group.add_argument('--train-iters', type=int, default=1000,
help="""number of iterations per epoch to run training for""")
group.add_argument('--eval-iters', type=int, default=100,
help="""number of iterations per epoch to run validation/test for""")
group.add_argument('--decay-style', type=str, default=None, choices=['constant', 'linear', 'cosine', 'exponential'],
help='one of constant(None), linear, cosine, or exponential')
group.add_argument('--stlr-cut-frac', type=float, default=None,
help='what proportion of iterations to peak the slanted triangular learning rate')
group.add_argument('--warmup', type=float, default=0,
help='percentage of data to warmup on (.03 = 3% of all training iters). Default 0')
return data_config, parser
def add_model_args(parser):
args, _ = parser.parse_known_args()
if args.model.lower() == 'transformer':
return add_transformer_args(parser)
else:
return add_recurrent_args(parser)
def add_recurrent_args(parser):
group = parser.add_argument_group('recurrent', 'arguments for building recurrent nets')
group.add_argument('--num-hidden-warmup', type=int, default=0,
help='number of times to conduct hidden state warmup passes through inputs to be used for transfer tasks')
group.add_argument('--emsize', type=int, default=64,
help='size of word embeddings')
group.add_argument('--nhid', type=int, default=4096,
help='number of hidden units per layer')
group.add_argument('--nlayers', type=int, default=1,
help='number of layers')
group.add_argument('--dropout', type=float, default=0.0,
help='dropout applied to layers (0 = no dropout)')
group.add_argument('--neural-alphabet', action='store_true',
help='whether to use the neural alphabet encoder structure')
group.add_argument('--alphabet-size', type=int, default=128,
help='number of letters in neural alphabet')
group.add_argument('--ncontext', type=int, default=2,
help='number of context characters used in neural alphabet encoder structure')
group.add_argument('--residuals', action='store_true',
help='whether to implement residual connections between stackedRNN layers')
return parser
def add_transformer_args(parser):
group = parser.add_argument_group('transformer', 'args for specifically building a transformer network')
group.add_argument('--dropout', type=float, default=0.1,
help='dropout probability -- transformer only')
group.add_argument('--attention-dropout', type=float, default=0.0,
help='dropout probability for attention weights -- transformer only')
group.add_argument('--relu-dropout', type=float, default=0.1,
help='dropout probability after ReLU in FFN -- transformer only')
#ignore the encoder args for transformer. That's meant for seq2seq transformer
group.add_argument('--encoder-embed-path', type=str, default=None,
help='path to pre-trained encoder embedding')
group.add_argument('--encoder-embed-dim', type=int, default=64, # originally 512 but 64 for char level
help='encoder embedding dimension')
group.add_argument('--encoder-ffn-embed-dim', type=int, default=256, # originally 2048 but scaled for char level
help='encoder embedding dimension for FFN')
group.add_argument('--encoder-layers', type=int, default=6,
help='num encoder layers')
group.add_argument('--encoder-attention-heads', type=int, default=8,
help='num encoder attention heads')
group.add_argument('--encoder-normalize-before', default=False, action='store_true',
help='apply layernorm before each encoder block')
group.add_argument('--encoder-learned-pos', default=False, action='store_true',
help='use learned positional embeddings in the encoder')
group.add_argument('--decoder-embed-path', type=str, default=None,
help='path to pre-trained decoder embedding')
group.add_argument('--decoder-embed-dim', type=int, default=64, # originally 512 but 64 for char level
help='decoder embedding dimension')
group.add_argument('--decoder-ffn-embed-dim', type=int, default=256, # originally 2048 but scaled for char level
help='decoder embedding dimension for FFN')
group.add_argument('--decoder-layers', type=int, default=6,
help='num decoder layers')
group.add_argument('--decoder-attention-heads', type=int, default=8,
help='num decoder attention heads')
group.add_argument('--decoder-learned-pos', default=False, action='store_true',
help='use learned positional embeddings in the decoder')
group.add_argument('--decoder-normalize-before', default=False, action='store_true',
help='apply layernorm before each decoder block')
group.add_argument('--share-decoder-input-output-embed', default=False, action='store_true',
help='share decoder input and output embeddings')
group.add_argument('--share-all-embeddings', default=False, action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
group.add_argument('--use-final-embed', action='store_true',
help='whether to use the final timestep embeddings as output of transformer (in classification)')
return parser
def add_classifier_model_args(parser):
group = parser.add_argument_group('classifier', 'arguments used in training a classifier on top of a language model')
group.add_argument('--max-seq-len', type=int, default=None,
help='maximum sequence length to use for classification. Transformer uses a lot of memory and needs shorter sequences.')
group.add_argument('--classifier-hidden-layers', default=None, nargs='+',
help='sizes of hidden layers for binary classifier on top of language model, so excluding the input layer and final "1"')
group.add_argument('--classifier-hidden-activation', type=str, default='PReLU',
help='[defaults to PReLU] activations used in hidden layers of MLP classifier (ReLU, Tanh, torch.nn module names)')
group.add_argument('--classifier-dropout', type=float, default=0.1,
help='Dropout in layers of MLP classifier')
group.add_argument('--all-layers', action='store_true',
help='if more than one layer is used, extract features from all layers, not just the last layer')
group.add_argument('--concat-max', action='store_true',
help='whether to concatenate max pools onto cell/hidden states of RNNFeaturizer')
group.add_argument('--concat-min', action='store_true',
help='whether to concatenate min pools onto cell/hidden states of RNNFeaturizer')
group.add_argument('--concat-mean', action='store_true',
help='whether to concatenate mean pools onto cell/hidden states of RNNFeaturizer')
group.add_argument('--get-hidden', action='store_true',
help='whether to use the hidden state (as opposed to cell state) as features for classifier')
group.add_argument('--neurons', default=1, type=int,
help='number of nenurons to extract as features')
group.add_argument('--heads-per-class', type=int, default=1,
help='set > 1 for multiple heads per class prediction (variance, regularlization)')
parser.add_argument('--use-softmax', action='store_true', help='use softmax for classification')
group.add_argument('--double-thresh', action='store_true',
help='whether to report all metrics at once')
group.add_argument('--dual-thresh', action='store_true',
help='for 2 columns positive and negative, thresholds classes s.t. positive, negative, neutral labels are available')
group.add_argument('--joint-binary-train', action='store_true',
help='Train with dual thresholded (positive/negative/neutral) classes and other normal binary classes.\
Arguments to non-binary-cols must be passed with positive negative classes first.\
Ex: `--non-binary-cols positive negative <other classes>`')
group.set_defaults(epochs=5)
return parser
def add_sentiment_transfer_args(parser):
data_config, data_group = configure_data(parser)
# Set transfer learning data option defaults
data_group.set_defaults(split='1.', data=['data/binary_sst/train.csv'])
data_group.set_defaults(valid=['data/binary_sst/val.csv'], test=['data/binary_sst/test.csv'])
# Create transfer-learning-specific options
group = parser.add_argument_group('sentiment_transfer', 'arguments used for sentiment_transfer script')
group.add_argument('--mcc', action='store_true',
help='whether to use the matthews correlation coefficient as a measure of accuracy (for CoLA)')
group.add_argument('--save-results', type=str, default='sentiment',
help='path to save intermediate and final results of transfer')
group.add_argument('--no-test-eval', action='store_true',
help='whether to not evaluate the test model (useful when your test set has no labels)')
group.add_argument('--write-results', type=str, default='',
help='write results of model on test (or train if none is specified) data to specified filepath ')
group.add_argument('--use-cached', action='store_true',
help='reuse cached featurizations from a previous run')
group.add_argument('--drop-neurons', action='store_true',
help='drop top neurons instead of keeping them')
return data_config, data_group, group, parser
def add_run_classifier_args(parser):
data_config, data_group = configure_data(parser)
# Set classification data option defaults
data_group.set_defaults(split='1.', data=['data/binary_sst/train.csv'])
data_group.set_defaults(shuffle=False)
# Create classification-specific options
group = parser.add_argument_group('run_classifier', 'arguments used for run classifier script')
group.add_argument('--save_probs', type=str, default='clf_results.npy',
help='path to save numpy of predicted probabilities')
group.add_argument('--write-results', type=str, default='',
help='path to location for CSV -- write results of model on data \
input strings + results and variances. Will not write if empty')
return data_config, data_group, group, parser
def add_finetune_classifier_args(parser):
data_config, data_group = configure_data(parser)
# Set finetuning data option defaults
data_group.set_defaults(split='1.', data=['data/binary_sst/train.csv'])
data_group.set_defaults(valid=['data/binary_sst/val.csv'], test=['data/binary_sst/test.csv'])
data_group.set_defaults(shuffle=True)
# Create finetuning-specific options
parser.set_defaults(get_hidden=True)
data_group.add_argument('--seq-length', type=int, default=256,
help="Maximum sequence length to process (for unsupervised rec)")
data_group.add_argument('--lazy', action='store_true',
help='whether to lazy evaluate the data set')
group = parser.add_argument_group('finetune_classifier', 'arguments used for finetune script')
group.add_argument('--use-logreg', action='store_true',
help='use scikitlearn logistic regression instead of finetuning whole classifier')
group.add_argument('--stlr-cut-frac', type=float, default=None,
help='what proportion of iterations to peak the slanted triangular learning rate')
group.add_argument('--cos-cut-frac', type=float, default=None,
help='what proportion of iterations to peak the cosine learning rate')
group.add_argument('--lr-decay', type=float, default=1.0,
help='amount to multiply lr by to decay every epoch')
group.add_argument('--momentum', type=float, default=0.0,
help='momentum for SGD')
group.add_argument('--weight-decay', type=float, default=0,
help='weight decay for MLP optimization')
group.add_argument('--freeze-lm', action='store_true',
help='keep lanuage model froze -- don\'t backprop to Transformer/RNN')
group.add_argument('--aux-lm-loss', action='store_true',
help='whether to use language modeling objective as aux loss')
group.add_argument('--aux-lm-loss-weight', type=float, default=1.0,
help='LM model weight -- NOTE: default is 1.0 for back compatible. Way too high -- reasonable around 0.02')
group.add_argument('--aux-head-variance-loss-weight', type=float, default=0,
help='Set above 0.0 to force heads to learn different final-layer embeddings. Reasonable value ~10.-100.')
group.add_argument('--use-class-multihead-average', action='store_true',
help='Use average output for multihead per class -- not necessary to use with --class-single-threshold [just average the thresholds]')
group.add_argument('--thresh-test-preds', type=str, default=None,
help='path to thresholds for test outputs')
group.add_argument('--report-metric', type=str, default='f1', choices=['jacc', 'acc', 'f1', 'mcc', 'precision', 'recall', 'var', 'all'],
help='what metric to report performance (save best model)')
group.add_argument('--all-metrics', action='store_true',
help='Overloads report metrics and reports all metrics at once')
group.add_argument('--threshold-metric', type=str, default='f1', choices=['jacc', 'acc', 'f1', 'mcc', 'precision', 'recall', 'var', 'all'],
help='which metric to use when choosing ideal thresholds?')
group.add_argument('--micro', action='store_true',
help='whether to use micro averaging for metrics')
group.add_argument('--global-tweaks', type=int, default=0,
help='HACK: Pass int (1000 for example) to tweak individual thresholds toward best global average [good for SemEval]. Will increase threshold on rare, hard to measure, categories.')
group.add_argument('--save-finetune', action='store_true',
help='save finetuned models at every epoch of finetuning')
group.add_argument('--model-version-name', type=str, default='classifier',
help='space to version model name -- for saving')
group.add_argument('--automatic-thresholding', action='store_true',
help='automatically select classification thresholds based on validation performance. \
(test results are also reported using the thresholds)')
group.add_argument('--no-test-eval', action='store_true',
help='Do not report test metrics, write test and val results to disk instead.')
group.add_argument('--decay-style', type=str, default=None, choices=['constant', 'linear', 'cosine', 'exponential'],
help='Learning rate decay, one of constant(None), linear, cosine, or exponential')
group.add_argument('--warmup-epochs', type=float, default=0.,
help='number of epochs to warm up learning rate over.')
group.add_argument('--decay-epochs', type=float, default=-1,
help='number of epochs to decay for. If -1 decays for all of training')
group.add_argument('--load-finetuned', action='store_true',
help='load not just the language model but a previously finetuned full classifier checkpoint')
return data_config, data_group, group, parser
| sentiment-discovery-master | arguments.py |
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Copyright (c) 2017, openai. All rights reserved.
###############################################################################
"""
Modified version of openai implementation https://github.com/openai/generating-reviews-discovering-sentiment/blob/master/utils.py
Modified to handle multiple classes, different metrics, thresholding, and dropping neurons.
"""
import collections
import numpy as np
from sklearn.linear_model import LogisticRegression
from metric_utils import update_info_dict, get_metric
from threshold import _binary_threshold, _neutral_threshold_two_output
def train_logreg(trX, trY, vaX=None, vaY=None, teX=None, teY=None, penalty='l1', max_iter=100,
C=2**np.arange(-8, 1).astype(np.float), seed=42, model=None, eval_test=True, neurons=None,
drop_neurons=False, report_metric='acc', automatic_thresholding=False, threshold_metric='acc', micro=False):
# if only integer is provided for C make it iterable so we can loop over
if not isinstance(C, collections.Iterable):
C = list([C])
# extract features for given neuron indices
if neurons is not None:
if drop_neurons:
all_neurons = set(list(range(trX.shape[-1])))
neurons = set(list(neurons))
neurons = list(all_neurons - neurons)
trX = trX[:, neurons]
if vaX is not None:
vaX = vaX[:, neurons]
if teX is not None:
teX = teX[:, neurons]
# Cross validation over C
n_classes = 1
if len(trY.shape)>1:
n_classes = trY.shape[-1]
scores = []
if model is None:
for i, c in enumerate(C):
if n_classes <= 1:
model = LogisticRegression(C=c, penalty=penalty, max_iter=max_iter, random_state=seed)
model.fit(trX, trY)
blank_info_dict = {'fp' : 0, 'tp' : 0, 'fn' : 0, 'tn' : 0, 'std' : 0.,
'metric' : threshold_metric, 'micro' : micro}
if vaX is not None:
info_dict = update_info_dict(blank_info_dict.copy(), vaY, model.predict_proba(vaX)[:, -1])
else:
info_dict = update_info_dict(blank_info_dict.copy(), trY, model.predict_proba(trX)[:, -1])
scores.append(get_metric(info_dict))
print(scores[-1])
del model
else:
info_dicts = []
model = []
for cls in range(n_classes):
_model = LogisticRegression(C=c, penalty=penalty, max_iter=max_iter, random_state=seed)
_model.fit(trX, trY[:, cls])
blank_info_dict = {'fp' : 0, 'tp' : 0, 'fn' : 0, 'tn' : 0, 'std' : 0.,
'metric' : threshold_metric, 'micro' : micro}
if vaX is not None:
info_dict = update_info_dict(blank_info_dict.copy(), vaY[:, cls], _model.predict_proba(vaX)[:, -1])
else:
info_dict = update_info_dict(blank_info_dict.copy(), trY[:, cls], _model.predict_proba(trX)[:, -1])
info_dicts.append(info_dict)
model.append(_model)
scores.append(get_metric(info_dicts))
print(scores[-1])
del model
c = C[np.argmax(scores)]
if n_classes <= 1:
model = LogisticRegression(C=c, penalty=penalty, max_iter=max_iter, random_state=seed)
model.fit(trX, trY)
else:
model = []
for cls in range(n_classes):
_model = LogisticRegression(C=c, penalty=penalty, max_iter=max_iter, random_state=seed)
_model.fit(trX, trY[:, cls])
model.append(_model)
else:
c = model.C
# predict probabilities and get accuracy of regression model on train, val, test as appropriate
# also get number of regression weights that are not zero. (number of features used for modeling)
scores = []
if n_classes == 1:
nnotzero = np.sum(model.coef_ != 0)
preds = model.predict_proba(trX)[:, -1]
train_score = get_metric(update_info_dict(blank_info_dict.copy(), trY, preds), report_metric)
else:
nnotzero = 0
preds = []
info_dicts = []
for cls in range(n_classes):
nnotzero += np.sum(model[cls].coef_ != 0)
_preds = model[cls].predict_proba(trX)[:, -1]
info_dicts.append(update_info_dict(blank_info_dict.copy(), trY[:, cls], _preds))
preds.append(_preds)
nnotzero/=n_classes
train_score = get_metric(info_dicts, report_metric)
preds = np.concatenate([p.reshape((-1, 1)) for p in preds], axis=1)
scores.append(train_score * 100)
if vaX is None:
eval_data = trX
eval_labels = trY
val_score = train_score
else:
eval_data = vaX
eval_labels = vaY
if n_classes == 1:
preds = model.predict_proba(vaX)[:, -1]
val_score = get_metric(update_info_dict(blank_info_dict.copy(), vaY, preds), report_metric)
else:
preds = []
info_dicts = []
for cls in range(n_classes):
_preds = model[cls].predict_proba(vaX)[:, -1]
info_dicts.append(update_info_dict(blank_info_dict.copy(), vaY[:, cls], _preds))
preds.append(_preds)
val_score = get_metric(info_dicts, report_metric)
preds = np.concatenate([p.reshape((-1, 1)) for p in preds], axis=1)
val_preds = preds
val_labels = eval_labels
scores.append(val_score * 100)
eval_score = val_score
threshold = np.array([.5]*n_classes)
if automatic_thresholding:
_, threshold, _, _ = _binary_threshold(preds.reshape(-1, n_classes), eval_labels.reshape(-1, n_classes), threshold_metric, micro)
threshold = float(threshold.squeeze())
if teX is not None and teY is not None and eval_test:
eval_data = teX
eval_labels = teY
if n_classes == 1:
preds = model.predict_proba(eval_data)[:, -1]
else:
preds = []
for cls in range(n_classes):
_preds = model[cls].predict_proba(eval_data)[:, -1]
preds.append(_preds)
preds = np.concatenate([p.reshape((-1, 1)) for p in preds], axis=1)
if n_classes == 1:
threshold = float(threshold.squeeze())
eval_score = get_metric(update_info_dict(blank_info_dict.copy(), eval_labels, preds, threshold=threshold), report_metric)
else:
info_dicts = []
for cls in range(n_classes):
info_dicts.append(update_info_dict(blank_info_dict.copy(), eval_labels[:, cls], preds[:, cls], threshold=threshold[cls]))
eval_score = get_metric(info_dicts, report_metric)
scores.append(eval_score * 100)
return model, scores, preds, c, nnotzero | sentiment-discovery-master | logreg_utils.py |
import torch
import itertools
# At pain of messing up a good thing, also collect standard deviation (total) -- divided by total items for average
def update_info_dict(info_dict, labels, preds, threshold=0.5, std=None):
preds = (torch.tensor(preds) > threshold).long()
labels = (torch.tensor(labels) > threshold).long()
# For backward compatibility -- if no std, assume it's zero -- and put it on CUDA if needed
if std is not None:
info_dict['std'] += torch.sum(torch.tensor(std)).float()
else:
info_dict['std'] += torch.sum((preds == 1) & (preds == 0)).float()
info_dict['tp'] += torch.sum((preds == 1) & (labels == 1)).float()
info_dict['tn'] += torch.sum((preds == 0) & (labels == 0)).float()
info_dict['fp'] += torch.sum((preds == 1) & (labels == 0)).float()
info_dict['fn'] += torch.sum((preds == 0) & (labels == 1)).float()
return info_dict
# Mis-nomer -- returns standard deviation per class.
def get_variance(tp, tn, fp, fn, std):
total = tp + tn + fp + fn
return std / total
# TODO: Also return variance per class (in multihead sense) as a metric
def get_metric(infos, metric=None, micro=False):
"""Essentially a case-switch for getting a metric"""
metrics = {
'acc' : get_accuracy,
'jacc' : get_jaccard_index,
'f1' : get_f1,
'mcc' : get_mcc,
'recall': get_recall,
'precision': get_precision,
'var' : get_variance
}
tp = tn = fp = fn = std = 0
if isinstance(infos, dict):
infos = [infos]
metric = metrics[infos[0].get('metric') or metric]
micro = infos[0].get('micro') or micro
stats = ['tp', 'tn', 'fp', 'fn', 'std']
if micro:
# micro averaging computes the metric after aggregating
# all of the parameters from sets being averaged
for info in infos:
tp += info['tp']
tn += info['tn']
fp += info['fp']
fn += info['fn']
std += info['std']
return metric(tp, tn, fp, fn, std)
else:
# macro averaging computes the metric on each set
# and averages the metrics afterward
individual_metrics = []
for info in infos:
individual_metrics.append(metric(*[info[s].item() for s in stats]))
return sum(individual_metrics) / len(individual_metrics)
# Metrics as functions of true positive, true negative,
# false positive, false negative, standard deviation
def get_precision(tp, tn, fp, fn, std):
if tp == 0:
return 0
return tp / (tp + fp)
def get_recall(tp, tn, fp, fn, std):
if tp == 0:
return 0
return tp / (tp + fn)
def get_jaccard_index(tp, tn, fp, fn, std):
if tp == 0:
return 0
return (tp) / (tp + fp + fn)
def get_accuracy(tp, tn, fp, fn, std):
return (tp + tn) / (tp + tn + fp + fn)
def get_f1(tp, tn, fp, fn, std):
if tp == 0:
return 0
return 2.0 * tp / (2 * tp + fp + fn)
def get_mcc(tp, tn, fp, fn, std):
total = (tp + tn + fp + fn)
for v in tp, tn, fp, fn:
v /= total
denom = ((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) ** 0.5
denom = denom if denom > 1e-8 else 1
return (tp * tn - fp * fn) / denom
| sentiment-discovery-master | metric_utils.py |
from torch.optim.lr_scheduler import _LRScheduler
import math
class LinearLR(_LRScheduler):
"""
A scheduler for linear learning rate decay to 0 over a specified number of steps.
Args:
optimizer (Optimizer): Wrapped optimizer.
max_iters (int): Period of learning rate decay. When last_iter==max_iters lr=max(min_lr,0)
last_iter (int): The index of last iteration step. Default: -1
min_lr (float): smallest allowed learning rate (acts as a clamp to prevent too small learning rates). Default: 1e-8
Example:
>>> # Assuming optimizer also uses lr = 0.0005 for all groups
>>> scheduler = LinearLR(optimizer, max_iters=10, last_iter=-1, min_lr=1e-8)
>>> for iter in range(10):
>>> train(...)
>>> scheduler.step()
>>> validate(...)
"""
def __init__(self, optimizer, max_iters, last_iter=-1, min_lr=1e-8):
self.optimizer = optimizer
self.max_iters = max_iters
self.num_iters = last_iter
self.min_lr = min_lr
self.done = False
if last_iter == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.step(last_iter + 1)
def get_lr(self):
return [self.decay_func(base_lr) for base_lr in self.base_lrs]
def decay_func(self, init_lr):
new_lr = init_lr*((self.max_iters-self.num_iters)/self.max_iters)
return max(new_lr, self.min_lr)
def step(self, epoch=None):
if epoch is None:
epoch = self.num_iters + 1
self.num_iters = epoch
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
return self.done
class ConstantLR(_LRScheduler):
def __init__(self, optimizer, lr):
self.optimizer = optimizer
for group in optimizer.param_groups:
group['lr'] = lr
def step(self, step_num=None):
pass
class SlantedTriangularLR(_LRScheduler):
"""
Implements the "slanted triangular learning rate schedule used for ULMFiT as a function of
the number of training iterations" (arxiv.org/pdf/1801.06146.pdf)
Args:
optimizer (Optimizer): Wrapped optimizer.
lr_ratio (float): ratio of minimum to maximum learning rate (32 in paper)
max_val (float): highest learning rate (attained at peak of slanted triangle - 0.01 in paper)
cut_frac (float): proportion of iterations during which learning rate is increasing (0.1 in paper)
num_iters (int): total number of iterations expected (should be one epoch)
"""
def __init__(self, optimizer, lr_ratio=100, max_val=6.25e-5, cut_frac=0.002, num_iters=1000):
self.optimizer = optimizer
self.min_val = max_val / lr_ratio
self.max_val = max_val
self.peak_iter = num_iters * cut_frac
self.end_triangle_iter = num_iters
self.num_iters = 0
self.lr_func = self.create_lr_func()
for group in optimizer.param_groups:
group['weight_decay'] = 0.01
if 'name' in group.keys() and group['name'] == 'low':
group['lr'] = self.min_val / 2.6
else:
group['lr'] = self.min_val
def create_lr_func(self):
lr_range = self.max_val - self.min_val
up_slope = lr_range / self.peak_iter
up_intercept = self.min_val
down_slope = -lr_range / (self.end_triangle_iter - self.peak_iter)
down_intercept = -down_slope * self.peak_iter + self.max_val
def lr_func():
if self.num_iters <= self.peak_iter:
return up_slope * self.num_iters + up_intercept
else:
return down_slope * self.num_iters + down_intercept
return lr_func
def step(self, step_num=None):
if step_num is None:
step_num = self.num_iters + 1
self.num_iters = step_num
new_lr = self.lr_func()
for group in self.optimizer.param_groups:
if 'name' in group.keys() and group['name'] == 'low':
group['lr'] = new_lr / 2.6
else:
group['lr'] = new_lr
class CosineAnnealingLR(_LRScheduler):
"""Anneals the learning rate from start to zero along a cosine curve."""
def __init__(self, optimizer, start_lr, warmup_iter, num_iters):
self.optimizer = optimizer
self.start_lr = start_lr
self.warmup_iter = warmup_iter
self.num_iters = 0
self.end_iter = num_iters
def get_lr(self):
# https://openreview.net/pdf?id=BJYwwY9ll pg. 4
if self.num_iters <= self.warmup_iter:
return float(self.start_lr) * self.num_iters / self.warmup_iter
else:
return self.start_lr / 2.0 * (math.cos(math.pi * (self.num_iters - self.warmup_iter) / self.end_iter) + 1)
def step(self, step_num=None):
if step_num is None:
step_num = self.num_iters + 1
self.num_iters = step_num
new_lr = self.get_lr()
for group in self.optimizer.param_groups:
group['lr'] = new_lr
class AnnealingLR(_LRScheduler):
"""Anneals the learning rate from start to zero along a cosine curve."""
DECAY_STYLES = ['linear', 'cosine', 'exponential', 'constant', 'None']
def __init__(self, optimizer, start_lr, warmup_iter, num_iters, decay_style=None):
self.optimizer = optimizer
self.start_lr = start_lr
self.warmup_iter = warmup_iter
self.num_iters = 0
self.end_iter = num_iters
self.decay_style = decay_style.lower() if isinstance(decay_style, str) else None
print('decaying', decay_style)
def get_lr(self):
# https://openreview.net/pdf?id=BJYwwY9ll pg. 4
if self.num_iters <= self.warmup_iter:
return float(self.start_lr) * self.num_iters / self.warmup_iter
else:
if self.decay_style == self.DECAY_STYLES[0]:
return self.start_lr*((self.end_iter-(self.num_iters-self.warmup_iter))/self.end_iter)
elif self.decay_style == self.DECAY_STYLES[1]:
return self.start_lr / 2.0 * (math.cos(math.pi * (self.num_iters - self.warmup_iter) / self.end_iter) + 1)
elif self.decay_style == self.DECAY_STYLES[2]:
#TODO: implement exponential decay
return self.start_lr
else:
return self.start_lr
def step(self, step_num=None):
if step_num is None:
step_num = self.num_iters + 1
self.num_iters = step_num
new_lr = self.get_lr()
for group in self.optimizer.param_groups:
group['lr'] = new_lr
class DiscriminativeFinetuneWrapper(object):
def __init__(self, optimizer, layer_lambda, lr_ratio=0.3):
pass
class WarmupLR:
def __init__(self, optimizer, max_iters, last_iter=-1):
self.optimizer = optimizer
self.max_iters = max_iters
self.num_iters = last_iter
self.step(last_iter + 1)
def scale_lr(self, lr):
return (lr * (self.num_iters+1) / self.max_iters)
def step(self, epoch=None):
if epoch is None:
epoch = self.num_iters + 1
self.num_iters = epoch
if self.num_iters >= self.max_iters:
return
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
param_group['lr'] = self.scale_lr(lr)
| sentiment-discovery-master | learning_rates.py |
import argparse
import os
import time
import math
import collections
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import pandas as pd
from reparameterization import apply_weight_norm, remove_weight_norm
from model import SentimentClassifier
from configure_data import configure_data
from arguments import add_general_args, add_model_args, add_classifier_model_args, add_run_classifier_args
def get_data_and_args():
parser = argparse.ArgumentParser(description='PyTorch Sentiment Discovery Classification')
parser = add_general_args(parser)
parser = add_model_args(parser)
parser = add_classifier_model_args(parser)
data_config, data_parser, run_classifier_parser, parser = add_run_classifier_args(parser)
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
args.shuffle=False
if args.seed is not -1:
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
(train_data, val_data, test_data), tokenizer = data_config.apply(args)
args.data_size = tokenizer.num_tokens
args.padding_idx = tokenizer.command_name_map['pad'].Id
return (train_data, val_data, test_data), tokenizer, args
def get_model(args):
sd = None
model_args = args
if args.load is not None and args.load != '':
sd = torch.load(args.load)
if 'args' in sd:
model_args = sd['args']
if 'sd' in sd:
sd = sd['sd']
ntokens = model_args.data_size
concat_pools = model_args.concat_max, model_args.concat_min, model_args.concat_mean
if args.model == 'transformer':
model = SentimentClassifier(model_args.model, ntokens, None, None, None, model_args.classifier_hidden_layers, model_args.classifier_dropout,
None, concat_pools, False, model_args)
else:
model = SentimentClassifier(model_args.model, ntokens, model_args.emsize, model_args.nhid, model_args.nlayers,
model_args.classifier_hidden_layers, model_args.classifier_dropout, model_args.all_layers, concat_pools, False, model_args)
args.heads_per_class = model_args.heads_per_class
args.use_softmax = model_args.use_softmax
try:
args.classes = list(model_args.classes)
except:
args.classes = [args.label_key]
try:
args.dual_thresh = model_args.dual_thresh and not model_args.joint_binary_train
except:
args.dual_thresh = False
if args.cuda:
model.cuda()
if args.fp16:
model.half()
if sd is not None:
try:
model.load_state_dict(sd)
except:
# if state dict has weight normalized parameters apply and remove weight norm to model while loading sd
if hasattr(model.lm_encoder, 'rnn'):
apply_weight_norm(model.lm_encoder.rnn)
else:
apply_weight_norm(model.lm_encoder)
model.lm_encoder.load_state_dict(sd)
remove_weight_norm(model)
if args.neurons > 0:
print('WARNING. Setting neurons %s' % str(args.neurons))
model.set_neurons(args.neurons)
return model
# uses similar function as transform from transfer.py
def classify(model, text, args):
# Make sure to set *both* parts of the model to .eval() mode.
model.lm_encoder.eval()
model.classifier.eval()
# Initialize data, append results
stds = np.array([])
labels = np.array([])
label_probs = np.array([])
first_label = True
heads_per_class = args.heads_per_class
def get_batch(batch):
text = batch['text'][0]
timesteps = batch['length']
labels = batch['label']
text = Variable(text).long()
timesteps = Variable(timesteps).long()
labels = Variable(labels).long()
if args.max_seq_len is not None:
text = text[:, :args.max_seq_len]
timesteps = torch.clamp(timesteps, max=args.max_seq_len)
if args.cuda:
text, timesteps, labels = text.cuda(), timesteps.cuda(), labels.cuda()
return text.t(), labels, timesteps-1
def get_outs(text_batch, length_batch):
if args.model.lower() == 'transformer':
class_out, (lm_or_encoder_out, state) = model(text_batch, length_batch, args.get_hidden)
else:
model.lm_encoder.rnn.reset_hidden(args.batch_size)
for _ in range(1 + args.num_hidden_warmup):
class_out, (lm_or_encoder_out, state) = model(text_batch, length_batch, args.get_hidden)
if args.use_softmax and args.heads_per_class == 1:
class_out = F.softmax(class_out, -1)
return class_out, (lm_or_encoder_out, state)
tstart = start = time.time()
n = 0
len_ds = len(text)
with torch.no_grad():
for i, data in tqdm(enumerate(text), total=len(text)):
text_batch, labels_batch, length_batch = get_batch(data)
size = text_batch.size(1)
n += size
# get predicted probabilities given transposed text and lengths of text
probs, _ = get_outs(text_batch, length_batch)
# probs = model(text_batch, length_batch)
if first_label:
first_label = False
labels = []
label_probs = []
if heads_per_class > 1:
stds = []
# Save variances, and predictions
# TODO: Handle multi-head [multiple classes out]
if heads_per_class > 1:
_, probs, std, preds = probs
stds.append(std.data.cpu().numpy())
else:
probs, preds = probs
if args.use_softmax:
probs = F.softmax(probs, -1)
labels.append(preds.data.cpu().numpy())
label_probs.append(probs.data.cpu().numpy())
num_char = length_batch.sum().item()
end = time.time()
elapsed_time = end - start
total_time = end - tstart
start = end
s_per_batch = total_time / (i+1)
timeleft = (len_ds - (i+1)) * s_per_batch
ch_per_s = float(num_char) / elapsed_time
if not first_label:
labels = (np.concatenate(labels)) #.flatten())
label_probs = (np.concatenate(label_probs)) #.flatten())
if heads_per_class > 1:
stds = (np.concatenate(stds))
else:
stds = np.zeros_like(labels)
print('%0.3f seconds to transform %d examples' %
(time.time() - tstart, n))
return labels, label_probs, stds
def make_header(classes, heads_per_class=1, softmax=False, dual_thresh=False):
header = []
if softmax:
header.append('prediction')
for cls in classes:
if not softmax:
header.append(cls + ' pred')
header.append(cls + ' prob')
if heads_per_class > 1:
header.append(cls + ' std')
if dual_thresh:
header.append('neutral pred')
header.append('neutral prob')
return header
def get_row(pred, prob, std, classes, heads_per_class=1, softmax=False, dual_thresh=False):
row = []
if softmax:
row.append(pred[0])
for i in range(len(classes)):
if not softmax:
row.append(pred[i])
row.append(prob[i])
if heads_per_class > 1:
row.append(std[i])
if dual_thresh:
row.append(pred[2])
row.append(prob[2])
return row
def get_writer(preds, probs, stds, classes, heads_per_class=1, softmax=False, dual_thresh=False):
header = make_header(classes, heads_per_class, softmax, dual_thresh)
yield header
for pred, prob, std in zip(preds, probs, stds):
yield get_row(pred, prob, std, classes, heads_per_class, softmax, dual_thresh)
def main():
(train_data, val_data, test_data), tokenizer, args = get_data_and_args()
model = get_model(args)
ypred, yprob, ystd = classify(model, train_data, args)
save_root = ''
save_root = os.path.join(save_root, args.save_probs)
print('saving predicted probabilities to '+save_root)
np.save(save_root, ypred)
np.save(save_root+'.prob', yprob)
np.save(save_root+'.std', ystd)
if args.write_results is None or args.write_results == '':
exit()
print('writing results to '+args.write_results)
writer = get_writer(ypred, yprob, ystd, args.classes, args.heads_per_class, args.use_softmax, args.dual_thresh)
train_data.dataset.write(writer, path=args.write_results)
if __name__ == '__main__':
main()
| sentiment-discovery-master | run_classifier.py |
from sklearn import metrics
import itertools
import argparse
import torch
import numpy as np
import pandas as pd
from metric_utils import update_info_dict, get_metric
from collections import defaultdict
from tqdm import tqdm
def binary_threshold(args, labels=None):
preds = pd.read_csv(args.preds_file, header=None).values
labels = pd.read_csv(args.labels_file, header=None).values
avg_metric, best_thresholds, category_metrics, category_best_info_dicts = _binary_threshold(preds, labels, args.metric, args.micro)
print(avg_metric / preds.shape[1])
np.savetxt('best_binary_thresholds_{}_{}.txt'.format('micro' if args.micro else 'macro', args.metric), (best_thresholds))
def _binary_threshold(preds, labels, metric, micro, global_tweaks=1000, debug=False, heads_per_class=1, class_single_threshold=False):
avg_metric = 0
best_thresholds = []
info_dicts = []
category_metrics = []
# Compute threshold per class... *unless* multiple heads per class and one threshold required.
num_categories = labels.shape[1]
for category in range(num_categories):
category_best_threshold = category_best_metric = 0
for threshold in np.linspace(0.005, 1, 200):
if heads_per_class > 1 and class_single_threshold:
info_dict = update_info_dict(defaultdict(int), labels[:, (category * heads_per_class):(category+1)*heads_per_class], preds[:, (category * heads_per_class):(category+1)*heads_per_class], threshold=threshold)
else:
info_dict = update_info_dict(defaultdict(int), labels[:, category], preds[:, category], threshold=threshold)
metric_score = get_metric(info_dict, metric, micro)
if metric_score > category_best_metric or category_best_metric==0:
category_best_metric, category_best_threshold, category_best_info_dict = metric_score, threshold, info_dict
info_dicts.append(category_best_info_dict)
category_metrics.append(category_best_metric)
best_thresholds.append(category_best_threshold)
# HACK -- use micro average here, even if not elsewhere
micro = True
best_metric = get_metric(info_dicts, metric, micro)
# HACK: Attempt to tune thresholds simultaneously... for overall micro average
if num_categories < 2:
global_tweaks = 0
if debug and global_tweaks > 0:
print('best after invididual thresholds (micro %s)' % micro)
print(best_thresholds)
print(get_metric(info_dicts, metric, micro))
for i in range(global_tweaks):
# Choose random category
category = np.random.randint(num_categories)
curr_threshold = best_thresholds[category]
# tweak randomly
new_threshold = curr_threshold + (0.08 * (np.random.random() - 0.5))
if heads_per_class > 1 and class_single_threshold:
info_dict = update_info_dict(defaultdict(int), labels[:, (category * heads_per_class):(category+1)*heads_per_class], preds[:, (category * heads_per_class):(category+1)*heads_per_class], threshold=new_threshold)
else:
info_dict = update_info_dict(defaultdict(int), labels[:, category], preds[:, category], threshold=new_threshold)
old_dict = info_dicts[category]
info_dicts[category] = info_dict
# compute *global* metrics
metric_score = get_metric(info_dicts, metric, micro)
# save new threshold if global metrics improve
if metric_score > best_metric:
# print('Better threshold %.3f for category %d' % (new_threshold, category))
best_thresholds[category] = round(new_threshold, 3)
best_metric = metric_score
else:
info_dicts[category] = old_dict
if debug and global_tweaks > 0:
print('final thresholds')
print(best_thresholds)
print(get_metric(info_dicts, metric, micro))
# OK, now *if* we used multiple heads per class (same threshold) copy these back out to final answers
if heads_per_class > 1 and class_single_threshold:
best_thresholds = np.concatenate([[best_thresholds[i]]*heads_per_class for i in range(num_categories)])
else:
best_thresholds = np.array(best_thresholds)
# print(best_thresholds)
return get_metric(info_dicts, metric, micro), best_thresholds, category_metrics, info_dicts
def get_auc(args):
preds = pd.read_csv(args.preds_file, header=None).values
labels = pd.read_csv(args.labels_file, header=None).values.astype(int)
aucs = []
for category in range(preds.shape[1]):
fpr, tpr, thresholds = metrics.roc_curve(labels[:, category], preds[:, category], pos_label=1)
aucs.append(metrics.auc(fpr, tpr))
for idx, auc in enumerate(aucs):
print('{}: {}\n'.format(idx, auc))
def neutral_threshold_scalar_output(args):
preds = pd.read_csv(args.preds_file, header=None, names=['preds'])
labels = pd.read_csv(args.labels_file, header=None, names=['labels'])
assert preds.shape[1] == labels.shape[1] == 1, "Neutral thresholding only available for single category labels"
labels['positive'] = labels['labels'].apply(lambda s: int(s == 1))
labels['negative'] = labels['labels'].apply(lambda s: int(s == 0))
labels['neutral'] = ((labels['positive'] == labels['negative']).sum() == 2).astype(int)
labels_vals = labels[['positive', 'negative', 'neutral']].values
best_pos = best_neg = best_acc = 0
for pos, neg in tqdm(itertools.product(np.linspace(0.005, 1, 200), repeat=2), total=200 ** 2, unit='setting'):
if neg > pos:
continue
new_df = pd.DataFrame()
new_df['pos'] = preds['preds'].apply(lambda s: int(s > pos))
new_df['neg'] = preds['preds'].apply(lambda s: int(s < neg))
new_df['neutral'] = ((new_df['pos'] == new_df['neg']).sum() == 2).astype(int)
new_df_vals = new_df.values
acc = 0
for new_row, label_row in zip(new_df_vals, labels_vals):
acc += int((new_row == label_row).sum() == 3)
acc /= float(labels.shape[0])
if acc > best_acc:
best_pos, best_neg, best_acc = pos, neg, acc
print("Best acc:", best_acc, "Best pos:", best_pos, "Best neg:", best_neg)
np.savetxt('best_neutral_thresholds.txt', np.array([best_pos, best_neg]))
def neutral_threshold_two_output(args):
preds = pd.read_csv(args.preds_file, header=None, names=['positive', 'negative']) # ordered positive, negative
labels = pd.read_csv(args.labels_file, header=None, names=['positive', 'negative'])
labels['neutral'] = labels['positive'] == labels['negative']
labels_vals = labels.values
best_pos = best_neg = best_acc = 0
for pos, neg in tqdm(itertools.product(np.linspace(0.005, 1, 200), repeat=2), total=200 ** 2, unit='setting'):
new_df = pd.DataFrame()
new_df['pos'] = preds['positive'].apply(lambda s: int(s > pos))
new_df['neg'] = preds['negative'].apply(lambda s: int(s > neg))
new_df['neutral'] = (new_df['pos'] == new_df['neg']).astype(int)
new_df_vals = new_df.values
acc = 0
for new_row, label_row in zip(new_df_vals, labels_vals):
if new_row[0] == new_row[1] == 1:
new_row[0] = new_row[1] = 0
acc += int((new_row == label_row).sum() == 3)
acc /= float(labels.shape[0])
if acc > best_acc:
best_pos, best_neg, best_acc = pos, neg, acc
print("Best acc:", best_acc, "Best pos:", best_pos, "Best neg:", best_neg)
np.savetxt('best_neutral_thresholds.txt', np.array([best_pos, best_neg]))
def neutral_threshold_two_output(args):
preds = pd.read_csv(args.preds_file, header=None, names=['positive', 'negative']) # ordered positive, negative
labels = pd.read_csv(args.labels_file, header=None, names=['positive', 'negative'])
best_acc, (best_pos, best_neg) = _neutral_threshold_two_output(preds.values, labels.values)
print("Best acc:", best_acc, "Best pos:", best_pos, "Best neg:", best_neg)
np.savetxt('best_neutral_thresholds.txt', np.array([best_pos, best_neg]))
def _neutral_threshold_two_output(preds, labels, threshold_granularity=30):
neutral_labels = (labels[:,0] == labels[:,1]).astype(int).reshape(-1, 1)
labels_vals = np.concatenate([labels[:,:2], neutral_labels], axis=1)
best_0 = best_1 = best_acc = 0
for t0, t1 in tqdm(itertools.product(np.linspace(0.005, 1, threshold_granularity), repeat=2), total=threshold_granularity ** 2, unit='setting'):
t0, t1 = round(t0, 3), round(t1, 3)
new_df = pd.DataFrame()
new_df['0'] = (preds[:,0]>t0).astype(int)
new_df['1'] = (preds[:,1]>t1).astype(int)
new_df['neutral'] = (new_df['0'] == new_df['1']).astype(int)
new_df_vals = new_df[['0','1','neutral']].values
acc = 0
for new_row, label_row in zip(new_df_vals, labels_vals):
if new_row[0] == new_row[1] == 1:
new_row[0] = new_row[1] = 0
acc += int((new_row == label_row).sum() == 3)
acc /= labels_vals.shape[0]
if acc > best_acc:
best_0, best_1, best_acc = t0, t1, acc
return best_acc, (best_0, best_1)
def main():
task_dict = {
'auc' : get_auc,
'binary' : binary_threshold,
'neutral' : neutral_threshold_two_output,
'scalar' : neutral_threshold_scalar_output,
}
parser = argparse.ArgumentParser("Tools for optimizing outputs through ROC/AUC analysis")
parser.add_argument('--task', type=str, required=True, help='what do you want to do?')
parser.add_argument('--preds-file', type=str, help='path to predictions file')
parser.add_argument('--labels-file', type=str, help='path to labels file')
parser.add_argument('--metric', type=str, default='f1', help='which metric to analyze/optimize')
parser.add_argument('--micro', action='store_true', help='whether to micro-average metric')
args = parser.parse_args()
task_dict[args.task](args)
if __name__ == '__main__':
main()
| sentiment-discovery-master | threshold.py |
import argparse
import os
import sys
import time
import math
import random
import collections
import pandas as pd
import pickle as pkl
import json
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from logreg_utils import train_logreg
from fp16 import FP16_Module, FP16_Optimizer
from reparameterization import apply_weight_norm, remove_weight_norm
import model as M
from tqdm import tqdm
from model import DistributedDataParallel as DDP
from configure_data import configure_data
from learning_rates import AnnealingLR, SlantedTriangularLR, ConstantLR
from arguments import add_general_args, add_model_args, add_classifier_model_args, add_finetune_classifier_args
from metric_utils import update_info_dict, get_metric
from threshold import _binary_threshold, _neutral_threshold_two_output
def get_data_and_args():
parser = argparse.ArgumentParser(description='PyTorch Sentiment Discovery Transfer Learning')
parser = add_general_args(parser)
parser = add_model_args(parser)
parser = add_classifier_model_args(parser)
data_config, data_parser, finetune_classifier_parser, parser = add_finetune_classifier_args(parser)
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
if args.seed is not -1:
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
(train_data, val_data, test_data), tokenizer = data_config.apply(args)
args.data_size = tokenizer.num_tokens
args.padding_idx = tokenizer.command_name_map['pad'].Id
return (train_data, val_data, test_data), tokenizer, args
def get_model_and_optim(args, train_data):
if args.use_softmax:
args.report_no_thresholding = True
ntokens = args.data_size
concat_pools = args.concat_max, args.concat_min, args.concat_mean
if args.model == 'transformer':
model = M.SentimentClassifier(args.model, ntokens, None, None, None, args.classifier_hidden_layers, args.classifier_dropout,
None, concat_pools, args.aux_lm_loss, args)
else:
model = M.SentimentClassifier(args.model, ntokens, args.emsize, args.nhid, args.nlayers,
args.classifier_hidden_layers, args.classifier_dropout, args.all_layers, concat_pools, args.aux_lm_loss, args)
if args.cuda:
model.cuda()
if args.fp16:
model.half()
# load char embedding and recurrent encoder for featurization
if args.load is not None and args.load != '':
with open(args.load, 'rb') as f:
sd = x = torch.load(f, 'cpu')
if 'sd' in sd:
sd = sd['sd']
if not args.load_finetuned:
if 'lm_encoder' in sd:
sd = sd['lm_encoder']
try:
model.lm_encoder.load_state_dict(sd)
except:
# if state dict has weight normalized parameters apply and remove weight norm to model while loading sd
if hasattr(model.lm_encoder, 'rnn'):
apply_weight_norm(model.lm_encoder.rnn)
else:
apply_weight_norm(model.lm_encoder)
model.lm_encoder.load_state_dict(sd)
remove_weight_norm(model)
else:
model.load_state_dict(sd)
if args.thresh_test_preds:
model.set_thresholds(pd.read_csv(args.thresh_test_preds, header=None).values.squeeze(), args.double_thresh, args.dual_thresh and not args.joint_binary_train)
optims = {
'adam' : 'Adam',
'sgd' : 'SGD'
}
optim = eval('torch.optim.'+ optims[args.optim.lower()])(model.parameters(), lr=args.lr)
iters_per_epoch = len(train_data)
num_iters = iters_per_epoch * args.epochs
assert not (args.stlr_cut_frac and args.cos_cut_frac)
if args.stlr_cut_frac is not None:
LR = SlantedTriangularLR(optim, max_val=args.lr, cut_frac=args.stlr_cut_frac, num_iters=num_iters)
elif args.cos_cut_frac is not None:
LR = AnnealingLR(optim, start_lr=args.lr, warmup_iter=int(args.cos_cut_frac * num_iters), num_iters=num_iters, decay_style='cosine')
elif args.decay_style is not None:
warmup_iters = int(args.warmup_epochs * iters_per_epoch)
if args.decay_epochs == -1:
decay_iters = int(args.epochs * iters_per_epoch)
else:
decay_iters = int(args.decay_epochs * iters_per_epoch)
if args.decay_style == 'constant':
#TODO: implement
LR = AnnealingLR(optim, start_lr=args.lr, warmup_iter=warmup_iters, num_iters=decay_iters+warmup_iters, decay_style=args.decay_style)
elif args.decay_style == 'linear':
#TODO: implement
LR = AnnealingLR(optim, start_lr=args.lr, warmup_iter=warmup_iters, num_iters=decay_iters+warmup_iters, decay_style=args.decay_style)
elif args.decay_style == 'cosine':
LR = AnnealingLR(optim, start_lr=args.lr, warmup_iter=warmup_iters, num_iters=decay_iters+warmup_iters, decay_style=args.decay_style)
elif args.decay_style == 'exponential':
#TODO: implement
LR = ConstantLR(optim, lr=args.lr)
else:
LR = ConstantLR(optim, lr=args.lr)
else:
LR = ConstantLR(optim, lr=args.lr)
return model, optim, LR
def get_supervised_batch(batch, use_cuda, model, max_seq_len=None, args=None, save_outputs=False, heads_per_class=1):
'''
Process batch and return tuple of (text, text label, text length) long tensors.
Text is returned in column format with (time, batch) dimensions.
'''
text = batch['text'][0]
timesteps = batch['length']
labels = batch['label']
text = Variable(text).long()
timesteps = Variable(timesteps).long()
labels = Variable(labels)
if max_seq_len is not None:
text = text[:, :max_seq_len]
timesteps = torch.clamp(timesteps, max=args.max_seq_len)
if args.use_softmax:
labels = Variable(labels).view(-1).long()
else:
labels = labels.view(-1, int(model.out_dim/model.heads_per_class)).float()
if use_cuda:
text, timesteps, labels = text.cuda(), timesteps.cuda(), labels.cuda()
return text.t(), labels, timesteps-1
def transform(model, text_batch, labels_batch, length_batch, args, LR=None):
batch_size = text_batch.size(1)
def get_outs():
if args.model.lower() == 'transformer':
class_out, (lm_or_encoder_out, state) = model(text_batch, length_batch, args.get_hidden)
else:
model.lm_encoder.rnn.reset_hidden(args.batch_size)
for _ in range(1 + args.num_hidden_warmup):
class_out, (lm_or_encoder_out, state) = model(text_batch, length_batch, args.get_hidden)
# if args.heads_per_class > 1:
# class_out, mean_out, std_out = class_out
# if args.use_softmax:
# class_out = torch.max(class_out,-1)[1].view(-1,1)
# class_out = class_out.float()
# if args.heads_per_class > 1:
# class_out = class_out, mean_out, std_out
return class_out, (lm_or_encoder_out, state)
if LR is not None and not args.use_logreg:
# doing true finetuning
class_out, lm_or_encoder_out = get_outs()
else:
with torch.no_grad():
class_out, lm_or_encoder_out = get_outs()
# class_out = class_out.float().view(-1, model.out_dim)
return class_out, lm_or_encoder_out
def finetune(model, text, args, val_data=None, LR=None, reg_loss=None, tqdm_desc='nvidia', save_outputs=False,
heads_per_class=1, default_threshold=0.5, last_thresholds=[], threshold_validation=True, debug=False):
'''
Apply featurization `model` to extract features from text in data loader.
Featurization model should return cell state not hidden state.
`text` data loader should return tuples of ((text, text length), text label)
Returns labels and features for samples in text.
'''
# NOTE: If in training mode, do not run in .eval() mode. Bug fixed.
if LR is None:
model.lm_encoder.eval()
model.classifier.eval()
else:
# Very important to reset back to train mode for future epochs!
model.lm_encoder.train()
model.classifier.train()
# Optionally, freeze language model (train MLP only)
# NOTE: un-freeze gradients if they every need to be tweaked in future iterations
if args.freeze_lm:
for param in model.lm_encoder.parameters():
param.requires_grad = False
# Choose which losses to implement
if args.use_softmax:
if heads_per_class > 1:
clf_loss_fn = M.MultiHeadCrossEntropyLoss(heads_per_class=heads_per_class)
else:
clf_loss_fn = torch.nn.CrossEntropyLoss()
else:
if heads_per_class > 1:
clf_loss_fn = M.MultiHeadBCELoss(heads_per_class=heads_per_class)
else:
clf_loss_fn = torch.nn.BCELoss()
if args.aux_lm_loss:
aux_loss_fn = torch.nn.CrossEntropyLoss(reduce=False)
else:
aux_loss_fn = None
if args.thresh_test_preds:
thresholds = model.get_thresholds()
elif len(last_thresholds) > 0:
# Re-use previous thresholds, if provided.
# Why? More accurate reporting, and not that slow. Don't compute thresholds on training, for example -- but can recycle val threshold
thresholds = last_thresholds
else:
# Default thresholds -- faster, but less accurate
thresholds = np.array([default_threshold for _ in range(int(model.out_dim/heads_per_class))])
total_loss = 0
total_classifier_loss = 0
total_lm_loss = 0
total_multihead_variance_loss = 0
class_accuracies = torch.zeros(model.out_dim).cuda()
if model.out_dim/heads_per_class > 1 and not args.use_softmax:
keys = list(args.non_binary_cols)
elif args.use_softmax:
keys = [str(m) for m in range(model.out_dim)]
else:
keys = ['']
info_dicts = [{'fp' : 0, 'tp' : 0, 'fn' : 0, 'tn' : 0, 'std' : 0,
'metric' : args.report_metric, 'micro' : args.micro} for k in keys]
# Sanity check -- should do this sooner. Does #classes match expected output?
assert model.out_dim == len(keys) * heads_per_class, "model.out_dim does not match keys (%s) x heads_per_class (%d)" % (keys, heads_per_class)
batch_adjustment = 1. / len(text)
# Save all outputs *IF* small enough, and requested for thresholding -- basically, on validation
#if threshold_validation and LR is not None:
all_batches = []
all_stds = []
all_labels = []
for i, data in tqdm(enumerate(text), total=len(text), unit="batch", desc=tqdm_desc, position=1, ncols=100):
text_batch, labels_batch, length_batch = get_supervised_batch(data, args.cuda, model, args.max_seq_len, args, heads_per_class=args.heads_per_class)
class_out, (lm_out, _) = transform(model, text_batch, labels_batch, length_batch, args, LR)
class_std = None
if heads_per_class > 1:
all_heads, class_out, class_std, clf_out = class_out
classifier_loss = clf_loss_fn(all_heads, labels_batch)
else:
class_out, clf_out = class_out
if args.dual_thresh:
class_out = class_out[:, :-1]
classifier_loss = clf_loss_fn(class_out, labels_batch)
if args.use_softmax:
class_out = F.softmax(class_out, -1)
loss = classifier_loss
classifier_loss = classifier_loss.clone() # save for reporting
# Also compute multihead variance loss -- from classifier [divide by output size since it scales linearly]
if args.aux_head_variance_loss_weight > 0.:
multihead_variance_loss = model.classifier.get_last_layer_variance() / model.out_dim
loss = loss + multihead_variance_loss * args.aux_head_variance_loss_weight
# Divide by # batches? Since we're looking at the parameters here, and should be batch independent.
# multihead_variance_loss *= batch_adjustment
if args.aux_lm_loss:
lm_labels = text_batch[1:]
lm_losses = aux_loss_fn(lm_out[:-1].view(-1, lm_out.size(2)).contiguous().float(),
lm_labels.contiguous().view(-1))
padding_mask = (torch.arange(lm_labels.size(0)).unsqueeze(1).cuda() > length_batch).float()
portion_unpadded = padding_mask.sum() / padding_mask.size(0)
lm_loss = portion_unpadded * torch.mean(lm_losses * (padding_mask.view(-1).float()))
# Scale LM loss -- since it's so big
if args.aux_lm_loss_weight > 0.:
loss = loss + lm_loss * args.aux_lm_loss_weight
# Training
if LR is not None:
LR.optimizer.zero_grad()
loss.backward()
LR.optimizer.step()
LR.step()
# Remove loss from CUDA -- kill gradients and save memory.
total_loss += loss.detach().cpu().numpy()
if args.use_softmax:
labels_batch = onehot(labels_batch.squeeze(), model.out_dim)
class_out = onehot(clf_out.view(-1), int(model.out_dim/heads_per_class))
total_classifier_loss += classifier_loss.detach().cpu().numpy()
if args.aux_lm_loss:
total_lm_loss += lm_loss.detach().cpu().numpy()
if args.aux_head_variance_loss_weight > 0:
total_multihead_variance_loss += multihead_variance_loss.detach().cpu().numpy()
for j in range(int(model.out_dim/heads_per_class)):
std = None
if class_std is not None:
std = class_std[:,j]
info_dicts[j] = update_info_dict(info_dicts[j], labels_batch[:, j], class_out[:, j], thresholds[j], std=std)
# Save, for overall thresholding (not on training)
if threshold_validation and LR is None:
all_labels.append(labels_batch.detach().cpu().numpy())
all_batches.append(class_out.detach().cpu().numpy())
if class_std is not None:
all_stds.append(class_std.detach().cpu().numpy())
if threshold_validation and LR is None:
all_batches = np.concatenate(all_batches)
all_labels = np.concatenate(all_labels)
if heads_per_class > 1:
all_stds = np.concatenate(all_stds)
# Compute new thresholds -- per class
_, thresholds, _, _ = _binary_threshold(all_batches, all_labels, args.threshold_metric, args.micro, global_tweaks=args.global_tweaks)
info_dicts = [{'fp' : 0, 'tp' : 0, 'fn' : 0, 'tn' : 0, 'std' : 0.,
'metric' : args.report_metric, 'micro' : args.micro} for k in keys]
# In multihead case, look at class averages? Why? More predictive. Works especially well when we force single per-class threshold.
for j in range(int(model.out_dim/heads_per_class)):
std = None
if heads_per_class > 1:
std = all_stds[:, j]
info_dicts[j] = update_info_dict(info_dicts[j], all_labels[:, j], all_batches[:, j], thresholds[j], std=std)
# Metrics for all items -- with current best thresholds
total_metrics, class_metric_strs = get_metric_report(info_dicts, args, keys, LR)
# Show losses
if debug:
tqdm.write('losses -- total / classifier / LM / multihead_variance')
tqdm.write(total_loss * batch_adjustment)
tqdm.write(total_classifier_loss * batch_adjustment)
tqdm.write(total_lm_loss * batch_adjustment)
tqdm.write(total_multihead_variance_loss * batch_adjustment)
return total_loss.item() / (i + 1), total_metrics, class_metric_strs, thresholds
def onehot(sparse, nclasses):
rows = len(sparse)
rtn = torch.zeros(rows, math.floor(nclasses))
rtn[torch.arange(rows), sparse.squeeze().cpu()] = 1
return rtn
def get_metric_report(info_dicts, args, keys=['-'], LR=None):
class_metric_strs, total_metrics = [], []
report_metrics = ['jacc', 'acc', 'mcc', 'f1', 'recall', 'precision', 'var'] if args.all_metrics else [args.report_metric]
for m in report_metrics:
for d in info_dicts:
d.update({'metric' : m})
class_metrics = [get_metric(d) for d in info_dicts]
total_metrics.append(get_metric(info_dicts))
if LR is not None:
delim = '-'
else:
delim = {'mcc' : '#', 'f1' : '+', 'jacc' : '=', 'acc' : '>', 'var' : '%', 'recall': '<', 'precision':'~'}[m]
class_metric_strs.append(", ".join('{} {} {:5.2f}'.format(k, delim, f * 100) for k, f in zip(keys, class_metrics)))
return total_metrics, class_metric_strs
def generate_outputs(model, text, args, thresholds=None, debug=False):
model.eval()
collected_outputs = []
collected_labels = []
# Collect category standard deviations, across multiple heads
collected_outputs_std = []
for i, data in tqdm(enumerate(text), total=len(text), unit='batch', desc='predictions', position=1, ncols=100):
text_batch, labels_batch, length_batch = get_supervised_batch(data, args.cuda, model, args.max_seq_len, args, save_outputs=True, heads_per_class=args.heads_per_class)
class_out, (lm_out, _) = transform(model, text_batch, labels_batch, length_batch, args)
# Take the average per-category if requested
if args.heads_per_class > 1:
_, class_out, class_std, clf_out = class_out
else:
class_out, clf_out = class_out
if args.use_softmax:
class_out = F.softmax(class_out, -1)
class_std = torch.zeros(class_out.shape)
if args.thresh_test_preds or thresholds is not None:
class_out = clf_out
if args.use_softmax:
labels_batch = onehot(labels_batch.squeeze(), int(model.out_dim/args.heads_per_class)).cuda()
class_out = onehot(torch.max(clf_out, -1)[1].squeeze(), int(model.out_dim/args.heads_per_class))
collected_outputs.append(torch.tensor(class_out).cuda().float())
collected_labels.append(labels_batch)
collected_outputs_std.append(torch.tensor(class_std).cuda().float())
collected_outputs = torch.cat(collected_outputs, 0)
collected_outputs_std = torch.cat(collected_outputs_std, 0)
collected_labels = torch.cat(collected_labels, 0)
return collected_outputs, collected_labels, collected_outputs_std
def write_results(preds, labels, save):
labels_file = os.path.splitext(save)[0] + '_labels.txt'
# HACK -- handle both tensors and numpy arrays here:
if isinstance(preds, np.ndarray):
np.savetxt(save, preds.astype(int), delimiter=',')
np.savetxt(labels_file, labels.astype(int), delimiter=',')
else:
np.savetxt(save, preds.cpu().numpy().astype(int), delimiter=',')
np.savetxt(labels_file, labels.cpu().numpy().astype(int), delimiter=',')
def main():
(train_data, val_data, test_data), tokenizer, args = get_data_and_args()
# Print args for logging & reproduction. Need to know, including default args
if test_data is None:
test_data = val_data
model, optim, LR = get_model_and_optim(args, train_data)
# save_root = '' if args.load is None else args.load
# save_root = save_root.replace('.current', '')
# save_root = os.path.splitext(save_root)[0]
# save_root += '_transfer'
save_root = os.path.join('', args.model_version_name)
if not os.path.exists(save_root):
os.makedirs(save_root)
print('writing results to '+save_root)
def clf_reg_loss(reg_penalty=.125, order=1):
loss = 0
for p in model.classifier.parameters():
loss += torch.abs(p).sum()*reg_penalty
return loss
reg_loss = clf_reg_loss
init_params = list(model.lm_encoder.parameters())
if args.use_logreg:
def transform_for_logreg(model, data, args, desc='train'):
if data is None:
return None, None
X_out = []
Y_out = []
for i, batch in tqdm(enumerate(data), total=len(data), unit="batch", desc=desc, position=0, ncols=100):
text_batch, labels_batch, length_batch = get_supervised_batch(batch, args.cuda, model, args.max_seq_len, args, heads_per_class=args.heads_per_class)
# if args.non_binary_cols:
# labels_batch = labels_batch[:,0]-labels_batch[:,1]+1
_, (_, state) = transform(model, text_batch, labels_batch, length_batch, args)
X_out.append(state.cpu().numpy())
Y_out.append(labels_batch.cpu().numpy())
X_out = np.concatenate(X_out)
Y_out = np.concatenate(Y_out)
return X_out, Y_out
model.eval()
trX, trY = transform_for_logreg(model, train_data, args, desc='train')
vaX, vaY = transform_for_logreg(model, val_data, args, desc='val')
teX, teY = transform_for_logreg(model, test_data, args, desc='test')
logreg_model, logreg_scores, logreg_preds, c, nnotzero = train_logreg(trX, trY, vaX, vaY, teX, teY, eval_test=not args.no_test_eval,
report_metric=args.report_metric, threshold_metric=args.threshold_metric,
automatic_thresholding=args.automatic_thresholding, micro=args.micro)
print(', '.join([str(score) for score in logreg_scores]), 'train, val, test accuracy for all neuron regression')
print(str(c)+' regularization coefficient used')
print(str(nnotzero) + ' features used in all neuron regression\n')
else:
best_vaY = 0
vaT = [] # Current "best thresholds" so we can get reasonable estimates on training set
for e in tqdm(range(args.epochs), unit="epoch", desc="epochs", position=0, ncols=100):
if args.use_softmax:
vaT = []
save_outputs = False
report_metrics = ['jacc', 'acc','mcc', 'f1', 'recall', 'precision', 'var'] if args.all_metrics else [args.report_metric]
print_str = ""
trXt, trY, trC, _ = finetune(model, train_data, args, val_data=val_data, LR=LR, reg_loss=reg_loss, tqdm_desc='train', heads_per_class=args.heads_per_class, last_thresholds=vaT, threshold_validation=False)
data_str_base = "Train Loss: {:4.2f} Train {:5s} (All): {:5.2f}, Train Class {:5s}: {}"
for idx, m in enumerate(report_metrics):
data_str = data_str_base.format(trXt, m, trY[idx] * 100, m, trC[idx])
print_str += data_str + " " * max(0, 110 - len(data_str)) + "\n"
vaXt, vaY = None, None
if val_data is not None:
vaXt, vaY, vaC, vaT = finetune(model, val_data, args, tqdm_desc='val', heads_per_class=args.heads_per_class, last_thresholds=vaT)
# Take command line, for metric for which to measure best performance against.
# NOTE: F1, MCC, Jaccard are good measures. Accuracy is not -- since so skewed.
selection_metric = ['jacc', 'acc','mcc', 'f1', 'recall', 'precision', 'var'].index(args.threshold_metric)
avg_Y = vaY[selection_metric]
tqdm.write('avg '+args.threshold_metric+' metric '+str(avg_Y))
if avg_Y > best_vaY:
save_outputs = True
best_vaY = avg_Y
elif avg_Y == best_vaY and random.random() > 0.5:
save_outputs = True
best_vaY = avg_Y
data_str_base = "Val Loss: {:4.2f} Val {:5s} (All): {:5.2f}, Val Class {:5s}: {}"
for idx, m in enumerate(report_metrics):
data_str = data_str_base.format(vaXt, m, vaY[idx] * 100, m, vaC[idx])
print_str += data_str + " " * max(0, 110 - len(data_str)) + "\n"
tqdm.write(print_str[:-1])
teXt, teY = None, None
if test_data is not None:
# Hardcode -- enable to always save outputs [regardless of metrics]
# save_outputs = True
if save_outputs:
tqdm.write('performing test eval')
try:
with torch.no_grad():
if not args.no_test_eval:
auto_thresholds = None
dual_thresholds = None
# NOTE -- we manually threshold to F1 [not necessarily good]
V_pred, V_label, V_std = generate_outputs(model, val_data, args)
if args.automatic_thresholding:
if args.dual_thresh:
# get dual threshold (do not call auto thresholds)
# TODO: Handle multiple heads per class
_, dual_thresholds = _neutral_threshold_two_output(V_pred.cpu().numpy(), V_label.cpu().numpy())
model.set_thresholds(dual_thresholds, dual_threshold=args.dual_thresh and not args.joint_binary_train)
else:
# Use args.threshold_metric to choose which category to threshold on. F1 and Jaccard are good options
# NOTE: For multiple heads per class, can threshold each head (default) or single threshold. Little difference once model converges.
auto_thresholds = vaT
# _, auto_thresholds, _, _ = _binary_threshold(V_pred.view(-1, int(model.out_dim/args.heads_per_class)).contiguous(), V_label.view(-1, int(model.out_dim/args.heads_per_class)).contiguous(),
# args.threshold_metric, args.micro, global_tweaks=args.global_tweaks)
model.set_thresholds(auto_thresholds, args.double_thresh)
T_pred, T_label, T_std = generate_outputs(model, test_data, args, auto_thresholds)
if not args.use_softmax and int(model.out_dim/args.heads_per_class) > 1:
keys = list(args.non_binary_cols)
if args.dual_thresh:
if len(keys) == len(dual_thresholds):
tqdm.write('Dual thresholds: %s' % str(list(zip(keys, dual_thresholds))))
keys += ['neutral']
else:
tqdm.write('Class thresholds: %s' % str(list(zip(keys, auto_thresholds))))
elif args.use_softmax:
keys = [str(m) for m in range(model.out_dim)]
else:
tqdm.write('Class threshold: %s' % str([args.label_key, auto_thresholds[0]]))
keys = ['']
info_dicts = [{'fp' : 0, 'tp' : 0, 'fn' : 0, 'tn' : 0, 'std' : 0.,
'metric' : args.report_metric, 'micro' : True} for k in keys]
#perform dual threshold here, adding the neutral labels to T_label, thresholding existing predictions and adding neutral preds to T_Pred
if args.dual_thresh:
if dual_thresholds is None:
dual_thresholds = [.5, .5]
def make_onehot_w_neutral(label):
rtn = [0]*3
rtn[label] = 1
return rtn
def get_label(pos_neg):
thresholded = [pos_neg[0]>=dual_thresholds[0], pos_neg[1]>=dual_thresholds[1]]
if thresholded[0] == thresholded[1]:
return 2
return thresholded.index(1)
def get_new_std(std):
return std[0], std[1], (std[0]+std[1])/2
new_labels = []
new_preds = []
T_std = torch.cat([T_std[:,:2], T_std[:,:2].mean(-1).view(-1, 1)], -1).cpu().numpy()
for j, lab in enumerate(T_label):
pred = T_pred[j]
new_preds.append(make_onehot_w_neutral(get_label(pred)))
new_labels.append(make_onehot_w_neutral(get_label(lab)))
T_pred = np.array(new_preds)
T_label = np.array(new_labels)
# HACK: If dual threshold, hardcoded -- assume positive, negative and neutral -- in that order
# It's ok to train with other categories (after positive, neutral) as auxilary loss -- but won't calculate in test
if args.dual_thresh and args.joint_binary_train:
keys = ['positive', 'negative', 'neutral']
info_dicts = [{'fp' : 0, 'tp' : 0, 'fn' : 0, 'tn' : 0, 'std' : 0.,
'metric' : args.report_metric, 'micro' : True} for k in keys]
for j, k in enumerate(keys):
update_info_dict(info_dicts[j], T_pred[:,j], T_label[:,j], std=T_std[:,j])
total_metrics, metric_strings = get_metric_report(info_dicts, args, keys)
test_str = ''
test_str_base = "Test {:5s} (micro): {:5.2f}, Test Class {:5s}: {}"
for idx, m in enumerate(report_metrics):
data_str = test_str_base.format(m, total_metrics[idx] * 100, m, metric_strings[idx])
test_str += data_str + " " * max(0, 110 - len(data_str)) + "\n"
tqdm.write(test_str[:-1])
# tqdm.write(str(total_metrics))
# tqdm.write('; '.join(metric_strings))
else:
V_pred, V_label, V_std = generate_outputs(model, val_data, args)
T_pred, T_label, T_std = generate_outputs(model, test_data, args)
val_path = os.path.join(save_root, 'val_results.txt')
tqdm.write('Saving validation prediction results of size %s to %s' % (str(T_pred.shape[:]), val_path))
write_results(V_pred, V_label, val_path)
test_path = os.path.join(save_root, 'test_results.txt')
tqdm.write('Saving test prediction results of size %s to %s' % (str(T_pred.shape[:]), test_path))
write_results(T_pred, T_label, test_path)
except KeyboardInterrupt:
pass
else:
pass
# Save the model, upon request
if args.save_finetune and save_outputs:
# Save model if best so far. Note epoch number, and also keys [what is it predicting], as well as optional version number
# TODO: Add key string to handle multiple runs?
if args.non_binary_cols:
keys = list(args.non_binary_cols)
else:
keys = [args.label_key]
# Also save args
args_save_path = os.path.join(save_root, 'args.txt')
tqdm.write('Saving commandline to %s' % args_save_path)
with open(args_save_path, 'w') as f:
f.write(' '.join(sys.argv[1:]))
# Save and add thresholds to arguments for easy reloading of model config
if not args.no_test_eval and args.automatic_thresholding:
thresh_save_path = os.path.join(save_root, 'thresh'+'_ep'+str(e)+'.npy')
tqdm.write('Saving thresh to %s' % thresh_save_path)
if args.dual_thresh:
np.save(thresh_save_path, list(zip(keys, dual_thresholds)))
args.thresholds = list(zip(keys, dual_thresholds))
else:
np.save(thresh_save_path, list(zip(keys, auto_thresholds)))
args.thresholds = list(zip(keys, auto_thresholds))
else:
args.thresholds = None
args.classes = keys
#save full model with args to restore
clf_save_path = os.path.join(save_root, 'model'+'_ep'+str(e)+'.clf')
tqdm.write('Saving full classifier to %s' % clf_save_path)
torch.save({'sd': model.state_dict(), 'args': args}, clf_save_path)
if __name__ == "__main__":
main()
# python3 finetune.py --data csvs/SemEval-7k-processed-IDs.train.csv --valid csvs/SemEval-7k-processed-IDs.val.csv --test csvs/SemEval-7k-processed-IDs.test.csv --epochs 5 --text_key 32k-ids --ids --optim adam --data_size 32000 --aux-lm-loss --label_key label --all-metrics --automatic-thresholding --batch_size 24 --lr 1.73e-5 --model transformer --decoder-embed-dim 768 --decoder-ffn-embed-dim 3072 --decoder-layers 12 --load /home/adlr-sent.cosmos433/chkpts/tf-768emb-3072ffn-12x8head-learnedpos-32000parts-2cos-300/e170000.pt --decoder-learned-pos --use-final-embed --classifier-hidden-layers 8 --non-binary-cols csvs/cols/plutchik-cols.json --save-finetune
| sentiment-discovery-master | finetune_classifier.py |
import argparse
import os
import time
import math
import collections
import pickle as pkl
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.autograd import Variable
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from logreg_utils import train_logreg
from fp16 import FP16_Module, FP16_Optimizer
from reparameterization import apply_weight_norm, remove_weight_norm
from model import RNNFeaturizer, TransformerFeaturizer
from configure_data import configure_data
from arguments import add_general_args, add_model_args, add_classifier_model_args, add_sentiment_transfer_args
def get_data_and_args():
parser = argparse.ArgumentParser(description='PyTorch Sentiment Discovery Transfer Learning')
parser = add_general_args(parser)
parser = add_model_args(parser)
parser = add_classifier_model_args(parser)
data_config, data_parser, sentiment_transfer_parser, parser = add_sentiment_transfer_args(parser)
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
if args.seed is not -1:
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
(train_data, val_data, test_data), tokenizer = data_config.apply(args)
args.data_size = tokenizer.num_tokens
args.padding_idx = tokenizer.command_name_map['pad'].Id
return (train_data, val_data, test_data), tokenizer, args
def get_model(args):
ntokens = args.data_size
concat_pools = [args.concat_max, args.concat_min, args.concat_mean]
if args.model.lower() == 'transformer':
model = TransformerFeaturizer(False, args)
else:
model = RNNFeaturizer(args.model, ntokens, args.emsize, args.nhid, args.nlayers,
0.0, args.all_layers, concat_pools, residuals=args.residuals)
if args.cuda:
model.cuda()
if args.fp16:
model.half()
if args.load is not None and args.load != '':
# load char embedding and recurrent encoder for featurization
with open(args.load, 'rb') as f:
sd = x = torch.load(f)
if 'sd' in sd:
sd = sd['sd']
if 'lm_encoder' in sd:
sd = sd['lm_encoder']
try:
model.load_state_dict(sd)
except:
# if state dict has weight normalized parameters apply and remove weight norm to model while loading sd
if hasattr(model, 'rnn'):
apply_weight_norm(model.rnn)
else:
apply_weight_norm(model)
model.load_state_dict(sd)
remove_weight_norm(model)
return model
def transform(model, text, args):
'''
Apply featurization `model` to extract features from text in data loader.
Featurization model should return cell state not hidden state.
`text` data loader should return tuples of ((text, text length), text label)
Returns labels and features for samples in text.
'''
model.eval()
features = np.array([])
labels = np.array([])
first_feature = True
def get_batch(batch):
'''
Process batch and return tuple of (text, text label, text length) long tensors.
Text is returned in column format with (time, batch) dimensions.
'''
text = batch['text'][0]
timesteps = batch['length']
labels = batch['label']
text = Variable(text).long()
timesteps = Variable(timesteps).long()
labels = Variable(labels).long()
if args.cuda:
text, timesteps, labels = text.cuda(), timesteps.cuda(), labels.cuda()
return text.t(), labels, timesteps-1
def get_outs(text_batch, length_batch):
if args.model.lower() == 'transformer':
cell_out, lm_or_encoder_out = model(text_batch, length_batch, args.get_hidden)
else:
model.rnn.reset_hidden(args.batch_size)
for _ in range(1 + args.num_hidden_warmup):
cell_out, lm_or_encoder_out = model(text_batch, length_batch, args.get_hidden)
return cell_out, lm_or_encoder_out
tstart = start = time.time()
n = 0
len_ds = len(text)
# Use no grad context for improving memory footprint/speed of inference
with torch.no_grad():
for i, data in tqdm(enumerate(text), total=len_ds, unit="batch", desc="transform", position=1, ncols=100):
text_batch, labels_batch, length_batch = get_batch(data)
# get batch size and reset hidden state with appropriate batch size
batch_size = text_batch.size(1)
n += batch_size
# extract batch of features from text batch
cell, _ = get_outs(text_batch, length_batch)
cell = cell.float()
if first_feature:
features = []
first_feature = False
labels = []
labels.append(labels_batch.data.cpu().numpy())
features.append(cell.data.cpu().numpy())
if not first_feature:
features = (np.concatenate(features))
labels = (np.concatenate(labels))
print('%0.3f seconds to transform %d examples' %
(time.time() - tstart, n))
return features, labels
def score_and_predict(model, X, Y):
'''
Given a binary classification model, predict output classification for numpy features `X`
and evaluate accuracy against labels `Y`. Labels should be numpy array of 0s and 1s.
Returns (accuracy, numpy array of classification probabilities)
'''
probs = model.predict_proba(X)[:, 1]
clf = probs > .5
accuracy = (np.squeeze(Y) == np.squeeze(clf)).mean()
return accuracy, probs
def get_top_k_neuron_weights(model, k=1):
"""
Get's the indices of the top weights based on the l1 norm contributions of the weights
based off of https://rakeshchada.github.io/Sentiment-Neuron.html interpretation of
https://arxiv.org/pdf/1704.01444.pdf (Radford et. al)
Args:
weights: numpy arraylike of shape `[d,num_classes]`
k: integer specifying how many rows of weights to select
Returns:
k_indices: numpy arraylike of shape `[k]` specifying indices of the top k rows
"""
weights = model.coef_.T
weight_penalties = np.squeeze(np.linalg.norm(weights, ord=1, axis=1))
if k == 1:
k_indices = np.array([np.argmax(weight_penalties)])
elif k >= np.log(len(weight_penalties)):
# runs O(nlogn)
k_indices = np.argsort(weight_penalties)[-k:][::-1]
else:
# runs O(n+klogk)
k_indices = np.argpartition(weight_penalties, -k)[-k:]
k_indices = (k_indices[np.argsort(weight_penalties[k_indices])])[::-1]
return k_indices
def plot_logits(save_root, X, Y_pred, top_neurons):
"""plot logits and save to appropriate experiment directory"""
save_root = os.path.join(save_root,'logit_vis')
if not os.path.exists(save_root):
os.makedirs(save_root)
print('plotting_logits at', save_root)
for i, n in enumerate(top_neurons):
plot_logit_and_save(trXt, trY, n, os.path.join(save_root, str(i)+'_'+str(n)))
def plot_logit_and_save(logits, labels, logit_index, name):
"""
Plots histogram (wrt to what label it is) of logit corresponding to logit_index.
Saves plotted histogram to name.
Args:
logits:
labels:
logit_index:
name:
"""
logit = logits[:,logit_index]
plt.title('Distribution of Logit Values')
plt.ylabel('# of logits per bin')
plt.xlabel('Logit Value')
plt.hist(logit[labels < .5], bins=25, alpha=0.5, label='neg')
plt.hist(logit[labels >= .5], bins=25, alpha=0.5, label='pos')
plt.legend()
plt.savefig(name+'.png')
plt.clf()
def plot_weight_contribs_and_save(coef, name):
plt.title('Values of Resulting L1 Penalized Weights')
plt.tick_params(axis='both', which='major')
coef = normalize(coef)
plt.plot(range(len(coef[0])), coef.T)
plt.xlabel('Neuron (Feature) Index')
plt.ylabel('Neuron (Feature) weight')
print('saving weight visualization to', name)
plt.savefig(name)
plt.clf()
def normalize(coef):
norm = np.linalg.norm(coef)
coef = coef/norm
return coef
def main():
(train_data, val_data, test_data), tokenizer, args = get_data_and_args()
model = get_model(args)
save_root = '' if args.load is None else args.load
save_root = save_root.replace('.current', '')
save_root = os.path.splitext(save_root)[0]
save_root += '_transfer'
save_root = os.path.join(save_root, args.save_results)
if not os.path.exists(save_root):
os.makedirs(save_root)
print('writing results to '+save_root)
# featurize train, val, test or use previously cached features if possible
print('transforming train')
if not (os.path.exists(os.path.join(save_root, 'trXt.npy')) and args.use_cached):
trXt, trY = transform(model, train_data, args)
np.save(os.path.join(save_root, 'trXt'), trXt)
np.save(os.path.join(save_root, 'trY'), trY)
else:
trXt = np.load(os.path.join(save_root, 'trXt.npy'))
trY = np.load(os.path.join(save_root, 'trY.npy'))
vaXt, vaY = None, None
if val_data is not None:
print('transforming validation')
if not (os.path.exists(os.path.join(save_root, 'vaXt.npy')) and args.use_cached):
vaXt, vaY = transform(model, val_data, args)
np.save(os.path.join(save_root, 'vaXt'), vaXt)
np.save(os.path.join(save_root, 'vaY'), vaY)
else:
vaXt = np.load(os.path.join(save_root, 'vaXt.npy'))
vaY = np.load(os.path.join(save_root, 'vaY.npy'))
teXt, teY = None, None
if test_data is not None:
print('transforming test')
if not (os.path.exists(os.path.join(save_root, 'teXt.npy')) and args.use_cached):
teXt, teY = transform(model, test_data, args)
np.save(os.path.join(save_root, 'teXt'), teXt)
np.save(os.path.join(save_root, 'teY'), teY)
else:
teXt = np.load(os.path.join(save_root, 'teXt.npy'))
teY = np.load(os.path.join(save_root, 'teY.npy'))
# train logistic regression model of featurized text against labels
start = time.time()
metric = 'mcc' if args.mcc else 'acc'
logreg_model, logreg_scores, logreg_probs, c, nnotzero = train_logreg(trXt, trY, vaXt, vaY, teXt, teY, max_iter=args.epochs, eval_test=not args.no_test_eval,
seed=args.seed, report_metric=metric, threshold_metric=metric)
end = time.time()
elapsed_time = end - start
with open(os.path.join(save_root, 'all_neurons_score.txt'), 'w') as f:
f.write(str(logreg_scores))
with open(os.path.join(save_root, 'all_neurons_probs.pkl'), 'wb') as f:
pkl.dump(logreg_probs, f)
with open(os.path.join(save_root, 'neurons.pkl'), 'wb') as f:
pkl.dump(logreg_model.coef_, f)
print('all neuron regression took %s seconds'%(str(elapsed_time)))
print(', '.join([str(score) for score in logreg_scores]), 'train, val, test accuracy for all neuron regression')
print(str(c)+' regularization coefficient used')
print(str(nnotzero) + ' features used in all neuron regression\n')
# save a sentiment classification pytorch model
sd = {}
if not args.fp16:
clf_sd = {'weight': torch.from_numpy(logreg_model.coef_).float(), 'bias': torch.from_numpy(logreg_model.intercept_).float()}
else:
clf_sd = {'weight': torch.from_numpy(logreg_model.coef_).half(), 'bias': torch.from_numpy(logreg_model.intercept_).half()}
sd['classifier'] = clf_sd
model.float().cpu()
sd['lm_encoder'] = model.state_dict()
with open(os.path.join(save_root, 'classifier.pt'), 'wb') as f:
torch.save(sd, f)
model.half()
sd['lm_encoder'] = model.state_dict()
with open(os.path.join(save_root, 'classifier.pt.16'), 'wb') as f:
torch.save(sd, f)
# extract sentiment neuron indices
sentiment_neurons = get_top_k_neuron_weights(logreg_model, args.neurons)
print('using neuron(s) %s as features for regression'%(', '.join([str(neuron) for neuron in list(sentiment_neurons.reshape(-1))])))
# train logistic regression model of features corresponding to sentiment neuron indices against labels
start = time.time()
logreg_neuron_model, logreg_neuron_scores, logreg_neuron_probs, neuron_c, neuron_nnotzero = train_logreg(trXt, trY, vaXt, vaY, teXt, teY, max_iter=args.epochs, eval_test=not args.no_test_eval,
seed=args.seed, neurons=sentiment_neurons, drop_neurons=args.drop_neurons,
report_metric=metric, threshold_metric=metric)
end = time.time()
if args.drop_neurons:
with open(os.path.join(save_root, 'dropped_neurons_score.txt'), 'w') as f:
f.write(str(logreg_neuron_scores))
with open(os.path.join(save_root, 'dropped_neurons_probs.pkl'), 'wb') as f:
pkl.dump(logreg_neuron_probs, f)
print('%d dropped neuron regression took %s seconds'%(args.neurons, str(end-start)))
print(', '.join([str(score) for score in logreg_neuron_scores]), 'train, val, test accuracy for %d dropped neuron regression'%(args.neurons))
print(str(neuron_c)+' regularization coefficient used')
start = time.time()
logreg_neuron_model, logreg_neuron_scores, logreg_neuron_probs, neuron_c, neuron_nnotzero = train_logreg(trXt, trY, vaXt, vaY, teXt, teY, max_iter=args.epochs, eval_test=not args.no_test_eval,
seed=args.seed, neurons=sentiment_neurons, report_metric=metric, threshold_metric=metric)
end = time.time()
print('%d neuron regression took %s seconds'%(args.neurons, str(end-start)))
print(', '.join([str(score) for score in logreg_neuron_scores]), 'train, val, test accuracy for %d neuron regression'%(args.neurons))
print(str(neuron_c)+' regularization coefficient used')
# log model accuracies, predicted probabilities, and weight/bias of regression model
with open(os.path.join(save_root, 'all_neurons_score.txt'), 'w') as f:
f.write(str(logreg_scores))
with open(os.path.join(save_root, 'neurons_score.txt'), 'w') as f:
f.write(str(logreg_neuron_scores))
with open(os.path.join(save_root, 'all_neurons_probs.pkl'), 'wb') as f:
pkl.dump(logreg_probs, f)
with open(os.path.join(save_root, 'neurons_probs.pkl'), 'wb') as f:
pkl.dump(logreg_neuron_probs, f)
with open(os.path.join(save_root, 'neurons.pkl'), 'wb') as f:
pkl.dump(logreg_model.coef_, f)
with open(os.path.join(save_root, 'neuron_bias.pkl'), 'wb') as f:
pkl.dump(logreg_model.intercept_, f)
#Plot feats
use_feats, use_labels = teXt, teY
if use_feats is None:
use_feats, use_labels = vaXt, vaY
if use_feats is None:
use_feats, use_labels = trXt, trY
try:
plot_logits(save_root, use_feats, use_labels, sentiment_neurons)
except:
print('no labels to plot logits for')
plot_weight_contribs_and_save(logreg_model.coef_, os.path.join(save_root, 'weight_vis.png'))
print('results successfully written to ' + save_root)
if args.write_results == '':
exit()
def get_csv_writer(feats, top_neurons, all_proba, neuron_proba):
"""makes a generator to be used in data_utils.datasets.csv_dataset.write()"""
header = ['prob w/ all', 'prob w/ %d neuron(s)'%(len(top_neurons),)]
top_feats = feats[:, top_neurons]
header += ['neuron %s'%(str(x),) for x in top_neurons]
yield header
for i, _ in enumerate(top_feats):
row = []
row.append(all_proba[i])
row.append(neuron_proba[i])
row.extend(list(top_feats[i].reshape(-1)))
yield row
data, use_feats = test_data, teXt
if use_feats is None:
data, use_feats = val_data, vaXt
if use_feats is None:
data, use_feats = train_data, trXt
csv_writer = get_csv_writer(use_feats, sentiment_neurons, logreg_probs[-1], logreg_neuron_probs[-1])
data.dataset.write(csv_writer, path=args.write_results)
if __name__ == '__main__':
main()
| sentiment-discovery-master | transfer.py |
import os
from setuptools import setup, find_packages
import torch
print("torch.__version__ = ", torch.__version__)
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR < 4:
raise RuntimeError("Sentiment Discovery requires Pytorch 0.4 or newer.\n" +
"The latest stable release can be obtained from https://pytorch.org/")
print("Building module.")
setup(
name='sentiment_discovery', version='0.4',
# ext_modules=[cuda_ext,],
description='PyTorch Extensions written by NVIDIA',
packages=find_packages(where='.'),
install_requires=[
"numpy",
"pandas",
"scikit-learn",
"matplotlib",
"unidecode",
"seaborn",
"sentencepiece",
"emoji"
]
)
| sentiment-discovery-master | setup.py |
import os
import copy
import data_utils
class DataConfig(object):
def __init__(self, parser, defaults={}):
super(DataConfig,self).__init__()
self.parser = parser
self.defaults = defaults
def apply(self, opt):
print('configuring data')
self.apply_defaults(opt)
return make_loaders(opt)
def set_defaults(self, **kwargs):
for k, v in kwargs.items():
self.defaults[k] = v
def apply_defaults(self, opt):
for k, v in self.defaults.items():
k = k.replace('-', '_')
if not hasattr(opt, k):
setattr(opt, k, v)
def make_loaders(opt):
"""makes training/val/test"""
batch_size = opt.batch_size * opt.world_size
eval_batch_size = opt.eval_batch_size * opt.world_size
seq_length = opt.seq_length
if seq_length < 0:
seq_length = seq_length * opt.world_size
eval_seq_length = opt.eval_seq_length
if opt.eval_seq_length < 0:
eval_seq_length = eval_seq_length * opt.world_size
# data_loader_args = {'num_workers': 0, 'shuffle': opt.shuffle, 'batch_size': batch_size,
data_loader_args = {'num_workers': 4, 'shuffle': opt.shuffle, 'batch_size': batch_size,
# data_loader_args = {'num_workers': 1, 'shuffle': opt.shuffle, 'batch_size': batch_size,
'pin_memory': True, 'transpose': opt.transpose, 'distributed': opt.world_size > 1,
'rank': opt.rank, 'world_size': opt.world_size, 'drop_last': opt.world_size > 1}
if opt.data_set_type == 'L2R':
loader_type = data_utils.ShardLoader
data_loader_args.update({'seq_len': seq_length, 'persist_state': opt.persist_state, 'samples_per_shard': opt.samples_per_shard})
else:
loader_type = data_utils.DataLoader
split = get_split(opt)
data_set_args = {
'path': opt.data, 'seq_length': seq_length, 'lazy': opt.lazy, 'delim': opt.delim,
'text_key': opt.text_key, 'label_key': opt.label_key, 'preprocess': opt.preprocess,
'ds_type': opt.data_set_type, 'split': split, 'loose': opt.loose_json,
'tokenizer_type': opt.tokenizer_type, 'tokenizer_model_path': opt.tokenizer_path,
'vocab_size': opt.vocab_size, 'model_type': opt.tokenizer_model_type,
'non_binary_cols': opt.non_binary_cols, 'process_fn': opt.process_fn}
eval_loader_args = copy.copy(data_loader_args)
eval_set_args = copy.copy(data_set_args)
eval_set_args['split']=[1.]
# if optional eval args were set then replace their equivalent values in the arg dict
if opt.eval_batch_size != 0:
eval_loader_args['batch_size'] = eval_batch_size
if opt.eval_seq_length != 0:
eval_set_args['seq_length'] = eval_seq_length
if opt.data_set_type == 'L2R':
eval_loader_args['seq_len'] = eval_seq_length
if opt.eval_text_key is not None:
eval_set_args['text_key'] = opt.eval_text_key
if opt.eval_label_key is not None:
eval_set_args['label_key'] = opt.eval_label_key
train = None
valid = None
test = None
if opt.data is not None:
train, tokenizer = data_utils.make_dataset(**data_set_args)
if should_split(split):
train, valid, test = train
eval_set_args['tokenizer'] = tokenizer
if opt.valid is not None:
eval_set_args['path'] = opt.valid
valid, _ = data_utils.make_dataset(**eval_set_args)
if test is None and opt.test is not None:
eval_set_args['path'] = opt.test
test, _ = data_utils.make_dataset(**eval_set_args)
if train is not None and opt.batch_size > 0:
train = loader_type(train, **data_loader_args)
if valid is not None:
valid = loader_type(valid, **eval_loader_args)
if test is not None:
test = loader_type(test, **eval_loader_args)
return (train, valid, test), tokenizer
def should_split(split):
return max(split) != 1.
def get_split(opt):
splits = []
if opt.split.find(',') != -1:
splits = [float(s) for s in opt.split.split(',')]
elif opt.split.find('/') != -1:
splits = [float(s) for s in opt.split.split('/')]
else:
splits = [float(opt.split)]
split_total = sum(splits)
if split_total < 1.:
splits.append(1-split_total)
while len(splits) < 3:
splits.append(0.)
splits = splits[:3]
if opt.valid is not None:
splits[1] = 0.
if opt.test is not None:
splits[2] = 0.
final_sum = sum(splits)
return [s/final_sum for s in splits]
def configure_data(parser):
"""add cmdline flags for configuring datasets"""
main_parser = parser
group = parser.add_argument_group('data options')
group.add_argument('--data', nargs='+', default=['./data/imdb/unsup.json'],
help="""Filename for training""")
group.add_argument('--valid', nargs='*', default=None,
help="""Filename for validation""")
group.add_argument('--test', nargs='*', default=None,
help="""Filename for testing""")
group.add_argument('--process-fn', type=str, default='process_str', choices=['process_str', 'process_tweet'],
help='what preprocessing function to use to process text. One of [process_str, process_tweet].')
group.add_argument('--batch-size', type=int, default=128,
help='Data Loader batch size')
group.add_argument('--eval-batch-size', type=int, default=0,
help='Data Loader batch size for evaluation datasets')
group.add_argument('--data-size', type=int, default=256,
help='number of tokens in data')
group.add_argument('--loose-json', action='store_true',
help='Use loose json (one json-formatted string per newline), instead of tight json (data file is one json string)')
group.add_argument('--preprocess', action='store_true',
help='force preprocessing of datasets')
group.add_argument('--delim', default=',',
help='delimiter used to parse csv testfiles')
group.add_argument('--non-binary-cols', nargs='*', default=None,
help='labels for columns to non-binary dataset [only works for csv datasets]')
group.add_argument('--split', default='1.',
help='comma-separated list of proportions for training, validation, and test split')
group.add_argument('--text-key', default='sentence',
help='key to use to extract text from json/csv')
group.add_argument('--label-key', default='label',
help='key to use to extract labels from json/csv')
group.add_argument('--eval-text-key', default=None,
help='key to use to extract text from json/csv evaluation datasets')
group.add_argument('--eval-label-key', default=None,
help='key to use to extract labels from json/csv evaluation datasets')
# tokenizer arguments
group.add_argument('--tokenizer-type', type=str, default='CharacterLevelTokenizer', choices=['CharacterLevelTokenizer', 'SentencePieceTokenizer'],
help='what type of tokenizer to use')
group.add_argument('--tokenizer-model-type', type=str, default='bpe', choices=['bpe', 'char', 'unigram', 'word'],
help='Model type to use for sentencepiece tokenization')
group.add_argument('--vocab-size', type=int, default=256,
help='vocab size to use for non-character-level tokenization')
group.add_argument('--tokenizer-path', type=str, default='tokenizer.model',
help='path used to save/load sentencepiece tokenization models')
# These are options that are relevant to data loading functionality, but are not meant to be exposed to the command line user.
# These options are intneded to be set in code by specific scripts.
defaults = {
'world_size': 1,
'rank': -1,
'persist_state': 0,
'lazy': False,
'shuffle': False,
'transpose': False,
'data_set_type': 'supervised',
'seq_length': 256,
'eval_seq_length': 256,
'samples_per_shard': 100
}
return DataConfig(main_parser, defaults=defaults), group
| sentiment-discovery-master | configure_data.py |
import argparse
import os
import sys
import time
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from fp16 import FP16_Module, FP16_Optimizer
import data
import model
from model import DistributedDataParallel as DDP
from apex.reparameterization import apply_weight_norm, remove_weight_norm
from configure_data import configure_data
from learning_rates import LinearLR
parser = argparse.ArgumentParser(description='PyTorch Sentiment-Discovery Language Modeling')
parser.add_argument('--model', type=str, default='mLSTM',
help='type of recurrent net (Tanh, ReLU, LSTM, mLSTM, GRU)')
parser.add_argument('--emsize', type=int, default=64,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=4096,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1,
help='number of layers')
parser.add_argument('--lr', type=float, default=5e-4,
help='initial learning rate')
parser.add_argument('--constant_decay', type=int, default=None,
help='number of iterations to decay LR over,' + \
' None means decay to zero over training')
parser.add_argument('--clip', type=float, default=0,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=1,
help='upper epoch limit')
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1234,
help='random seed')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='lang_model.pt',
help='path to save the final model')
parser.add_argument('--load', type=str, default='',
help='path to a previously saved model checkpoint')
parser.add_argument('--load_optim', action='store_true',
help='load most recent optimizer to resume training')
parser.add_argument('--save_iters', type=int, default=2000, metavar='N',
help='save current model progress interval')
parser.add_argument('--save_optim', action='store_true',
help='save most recent optimizer')
parser.add_argument('--fp16', action='store_true',
help='Run model in pseudo-fp16 mode (fp16 storage fp32 math).')
parser.add_argument('--dynamic_loss_scale', action='store_true',
help='Dynamically look for loss scalar for fp16 convergance help.')
parser.add_argument('--no_weight_norm', action='store_true',
help='Add weight normalization to model.')
parser.add_argument('--loss_scale', type=float, default=1,
help='Static loss scaling, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--world_size', type=int, default=1,
help='number of distributed workers')
parser.add_argument('--distributed_backend', default='gloo',
help='which backend to use for distributed training. One of [gloo, nccl]')
parser.add_argument('--rank', type=int, default=-1,
help='distributed worker rank. Typically set automatically from multiproc.py')
parser.add_argument('--base-gpu', type=int, default=0,
help='base gpu to use as gpu 0')
parser.add_argument('--optim', default='Adam',
help='One of PyTorch\'s optimizers (Adam, SGD, etc). Default: Adam')
parser.add_argument('--tcp-port', type=int, default=6000,
help='tcp port so as to avoid address already in use errors')
# Add dataset args to argparser and set some defaults
data_config, data_parser = configure_data(parser)
data_config.set_defaults(data_set_type='L2R', transpose=True)
data_parser.set_defaults(split='100,1,1')
data_parser = parser.add_argument_group('language modeling data options')
data_parser.add_argument('--seq_length', type=int, default=256,
help="Maximum sequence length to process (for unsupervised rec)")
data_parser.add_argument('--eval_seq_length', type=int, default=256,
help="Maximum sequence length to process for evaluation")
data_parser.add_argument('--lazy', action='store_true',
help='whether to lazy evaluate the data set')
data_parser.add_argument('--persist_state', type=int, default=1,
help='0=reset state after every sample in a shard, 1=reset state after every shard, -1=never reset state')
data_parser.add_argument('--num_shards', type=int, default=102,
help="""number of total shards for unsupervised training dataset. If a `split` is specified,
appropriately portions the number of shards amongst the splits.""")
data_parser.add_argument('--val_shards', type=int, default=0,
help="""number of shards for validation dataset if validation set is specified and not split from training""")
data_parser.add_argument('--test_shards', type=int, default=0,
help="""number of shards for test dataset if test set is specified and not split from training""")
data_parser.add_argument('--train-iters', type=int, default=1000,
help="""number of iterations per epoch to run training for""")
data_parser.add_argument('--eval-iters', type=int, default=100,
help="""number of iterations per epoch to run validation/test for""")
args = parser.parse_args()
torch.backends.cudnn.enabled = False
args.cuda = torch.cuda.is_available()
# initialize distributed process group and set device
if args.rank > 0 or args.base_gpu != 0:
torch.cuda.set_device((args.rank+args.base_gpu) % torch.cuda.device_count())
if args.world_size > 1:
distributed_init_file = os.path.splitext(args.save)[0]+'.distributed.dpt'
torch.distributed.init_process_group(backend=args.distributed_backend, world_size=args.world_size,
init_method='tcp://localhost:{}'.format(args.tcp_port), rank=args.rank)
# init_method='file://'+distributed_init_file, rank=args.rank)
# Set the random seed manually for reproducibility.
if args.seed > 0:
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if args.loss_scale != 1 and args.dynamic_loss_scale:
raise RuntimeError("Static loss scale and dynamic loss scale cannot be used together.")
###############################################################################
# Load data
###############################################################################
# Starting from sequential data, the unsupervised dataset type loads the corpus
# into rows. With the alphabet as the our corpus and batch size 4, we get
# ┌ a b c d e f ┐
# │ g h i j k l │
# │ m n o p q r │
# └ s t u v w x ┘.
# These rows are treated as independent by the model, which means that the
# dependence of e. g. 'g' on 'f' can not be learned, but allows more efficient
# batch processing.
#
# The unsupervised dataset further splits the corpus into shards through which
# the hidden state is persisted. The dataset also produces a hidden state
# reset mask that resets the hidden state at the start of every shard. A valid
# mask might look like
# ┌ 1 0 0 0 0 0 ... 0 0 0 1 0 0 ... ┐
# │ 1 0 0 0 0 0 ... 0 1 0 0 0 0 ... │
# │ 1 0 0 0 0 0 ... 0 0 1 0 0 0 ... │
# └ 1 0 0 0 0 0 ... 1 0 0 0 0 0 ... ┘.
# With 1 indicating to reset hidden state at that particular minibatch index
(train_data, val_data, test_data), tokenizer = data_config.apply(args)
###############################################################################
# Build the model
###############################################################################
args.data_size = tokenizer.num_tokens
ntokens = args.data_size
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
print('* number of parameters: %d' % sum([p.nelement() for p in model.parameters()]))
if args.cuda:
model.cuda()
rnn_model = model
optim = None
if args.load != '':
sd = torch.load(args.load, map_location='cpu')
if args.load_optim:
optim_sd = torch.load(os.path.join(os.path.dirname(args.load), 'optim.pt'), map_location='cpu')
rng = torch.load(os.path.join(os.path.dirname(args.load), 'rng.pt'))
torch.cuda.set_rng_state(rng[0])
torch.set_rng_state(rng[1])
try:
model.load_state_dict(sd)
except:
apply_weight_norm(model.rnn, hook_child=False)
model.load_state_dict(sd)
remove_weight_norm(model.rnn)
if not args.no_weight_norm:
apply_weight_norm(model, 'rnn', hook_child=False)
# create optimizer and fp16 models
if args.fp16:
model = FP16_Module(model)
optim = eval('torch.optim.'+args.optim)(model.parameters(), lr=args.lr)
optim = FP16_Optimizer(optim,
static_loss_scale=args.loss_scale,
dynamic_loss_scale=args.dynamic_loss_scale)
else:
optim = eval('torch.optim.'+args.optim)(model.parameters(), lr=args.lr)
if args.load_optim:
pass
optim.load_state_dict(optim_sd)
# add linear learning rate scheduler
if train_data is not None:
if args.constant_decay:
num_iters = args.constant_decay
else:
num_iters = args.train_iters * args.epochs
init_step = -1
if args.load_optim:
init_step = optim_sd['iter']-optim_sd['skipped_iter']
train_data.batch_sampler.start_iter = (optim_sd['iter'] % len(train_data)) + 1
LR = LinearLR(optim, num_iters, last_iter=init_step)
# wrap model for distributed training
if args.world_size > 1:
model = DDP(model)
criterion = nn.CrossEntropyLoss()
###############################################################################
# Training code
###############################################################################
# get_batch subdivides the source data into chunks of length args.seq_length.
# If source is equal to the example output of the data loading example, with
# a seq_length limit of 2, we'd get the following two Variables for i = 0:
# ┌ a g m s ┐ ┌ b h n t ┐
# └ b h n t ┘ └ c i o u ┘
# Note that despite the name of the function, the subdivison of data is not
# done along the batch dimension (i.e. dimension 1), since that was handled
# by the data loader. The chunks are along dimension 0, corresponding
# to the seq_len dimension in the LSTM. A Variable representing an appropriate
# shard reset mask of the same dimensions is also returned.
def get_batch(data):
reset_mask_batch = data[1].long()
data = data[0].long()
if args.cuda:
data = data.cuda()
reset_mask_batch = reset_mask_batch.cuda()
text_batch = Variable(data[:, :-1].t().contiguous(), requires_grad=False)
target_batch = Variable(data[:, 1:].t().contiguous(), requires_grad=False)
reset_mask_batch = Variable(reset_mask_batch[:,:text_batch.size(0)].t().contiguous(), requires_grad=False)
return text_batch, target_batch, reset_mask_batch
def init_hidden(batch_size):
return rnn_model.rnn.init_hidden(args.batch_size)
def evaluate(data_source, max_iters):
# Turn on evaluation mode which disables dropout.
model.eval()
init_hidden(args.batch_size)
total_loss = 0
ntokens = args.data_size
with torch.no_grad():
data_iter = iter(data_source)
i = 0
while i < max_iters:
batch = next(data_iter)
data, targets, reset_mask = get_batch(batch)
output, hidden = model(data, reset_mask=reset_mask)
output_flat = output.view(-1, ntokens).contiguous().float()
loss = criterion(output_flat, targets.view(-1).contiguous())
if isinstance(model, DDP):
torch.distributed.all_reduce(loss.data)
loss.data /= args.world_size
total_loss += loss.data[0]
i += 1
return total_loss / max(max_iters, 1)
def train(max_iters, total_iters=0, skipped_iters=0, elapsed_time=False):
# Turn on training mode which enables dropout.
model.train()
total_loss = 0
start_time = time.time()
t0 = start_time
ntokens = args.data_size
hidden = init_hidden(args.batch_size)
curr_loss = 0.
distributed = isinstance(model, DDP)
def log(epoch, i, lr, ms_iter, total_time, loss, scale):
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:.2E} | ms/batch {:.3E} | total time {:.3E}\
loss {:.2E} | ppl {:8.2f} | loss scale {:8.2f}'.format(
epoch, i, max_iters, lr,
ms_iter, total_time, loss, math.exp(min(loss, 20)), scale
)
)
data_iter = iter(train_data)
i = 0
while i < max_iters:
batch = next(data_iter)
data, targets, reset_mask = get_batch(batch)
optim.zero_grad()
output, hidden = model(data, reset_mask=reset_mask)
loss = criterion(output.view(-1, ntokens).contiguous().float(), targets.view(-1).contiguous())
total_loss += loss.data.float()
if args.fp16:
optim.backward(loss, update_master_grads=False)
else:
loss.backward()
if distributed:
torch.distributed.all_reduce(loss.data)
loss.data /= args.world_size
model.allreduce_params()
# clipping gradients helps prevent the exploding gradient problem in RNNs / LSTMs.
if args.clip > 0:
if not args.fp16:
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
else:
optim.clip_master_grads(clip=args.clip)
if args.fp16:
optim.update_master_grads()
optim.step()
# step learning rate and log training progress
lr = optim.param_groups[0]['lr']
if not args.fp16:
LR.step()
else:
# if fp16 optimizer skips gradient step due to explosion do not step lr
if not optim.overflow:
LR.step()
else:
skipped_iters += 1
# log current results
if ((i+1) % args.log_interval == 0) and (i != max_iters - 1):
cur_loss = total_loss[0] / args.log_interval
cur_time = time.time()
elapsed = cur_time - start_time
total_elapsed = cur_time - t0 + elapsed_time
log(epoch, i+1, lr, elapsed * 1000 / args.log_interval, total_elapsed,
cur_loss, args.loss_scale if not args.fp16 else optim.loss_scale)
total_loss = 0
start_time = cur_time
sys.stdout.flush()
# save current model progress. If distributed only save from worker 0
if args.save_iters and total_iters % args.save_iters == 0 and total_iters > 0 and args.rank < 1:
if args.rank < 1:
with open(os.path.join(os.path.splitext(args.save)[0], 'e%s.pt'%(str(total_iters),)), 'wb') as f:
torch.save(model.state_dict(), f)
if args.save_optim:
with open(os.path.join(os.path.splitext(args.save)[0], 'optim.pt'), 'wb') as f:
optim_sd = optim.state_dict()
optim_sd['iter'] = total_iters
optim_sd['skipped_iter'] = skipped_iters
torch.save(optim_sd, f)
del optim_sd
with open(os.path.join(os.path.splitext(args.save)[0], 'rng.pt'), 'wb') as f:
torch.save((torch.cuda.get_rng_state(), torch.get_rng_state()),f)
if args.cuda:
torch.cuda.synchronize()
total_iters += 1
i += 1
#final logging
elapsed_iters = max_iters % args.log_interval
if elapsed_iters == 0:
elapsed_iters = args.log_interval
cur_loss = total_loss[0] / elapsed_iters
cur_time = time.time()
elapsed = cur_time - start_time
total_elapsed = cur_time - t0 + elapsed_time
log(epoch, max_iters, lr, elapsed * 1000/ elapsed_iters, total_elapsed,
cur_loss, args.loss_scale if not args.fp16 else optim.loss_scale)
return cur_loss, skipped_iters
# Loop over epochs.
lr = args.lr
best_val_loss = None
# If saving process intermittently create directory for saving
if args.save_iters > 0 and not os.path.exists(os.path.splitext(args.save)[0]) and args.rank < 1:
os.makedirs(os.path.splitext(args.save)[0])
# At any point you can hit Ctrl + C to break out of training early.
try:
total_iters = 0
elapsed_time = 0
skipped_iters = 0
if args.load_optim:
total_iters = optim_sd['iter']
skipped_iters = optim_sd['skipped_iter']
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
val_loss, skipped_iters = train(args.train_iters, total_iters, skipped_iters, elapsed_time)
elapsed_time += time.time() - epoch_start_time
total_iters += args.train_iters
if val_data is not None:
print('entering eval')
val_loss = evaluate(val_data, args.eval_iters)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(min(val_loss, 20))))
print('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss and args.rank <= 0:
torch.save(model.state_dict(), args.save)
best_val_loss = val_loss
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
if os.path.exists(args.save):
model.load_state_dict(torch.load(args.save, 'cpu'))
if not args.no_weight_norm and args.rank <= 0:
remove_weight_norm(rnn_model)
with open(args.save, 'wb') as f:
torch.save(model.state_dict(), f)
if test_data is not None:
# Run on test data.
test_loss = evaluate(test_data, args.eval_iters)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
| sentiment-discovery-master | main.py |
import torch
import sys
import os
import subprocess
argslist = list(sys.argv)[1:]
LOGDIR = 'distributed_logs'
if '--save' in argslist:
savepath = os.path.splitext(os.path.basename(argslist[argslist.index('--save')+1]))[0]
else:
savepath = 'model'
LOGDIR = os.path.join(LOGDIR, savepath)
if not os.path.exists(LOGDIR):
os.makedirs(LOGDIR)
if '--world-size' in argslist:
world_size = int(argslist[argslist.index('--world-size')+1])
else:
world_size = torch.cuda.device_count()
argslist.append('--world-size')
argslist.append(str(world_size))
for i in range(world_size):
if '--rank' in argslist:
argslist[argslist.index('--rank')+1] = str(i)
else:
argslist.append('--rank')
argslist.append(str(i))
#stdout = open(os.path.join(LOGDIR, str(i)+".log"), "w")
stdout = None if i == 0 else open(os.path.join(LOGDIR, str(i)+".log"), "w")
call = subprocess.Popen
if i == world_size-1:
call = subprocess.call
call([str(sys.executable)]+argslist, stdout=stdout)
| sentiment-discovery-master | multiproc.py |
import argparse
import itertools
import sys
import subprocess
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser("Let's run some multihead experiments!")
parser.add_argument('--gpu', type=int, default=0,
help='which gpu to run on')
parser.add_argument('--train', type=str, default='./data/semeval/val.csv',
help='using nvidia training dataset')
parser.add_argument('--val', type=str, default='./data/semeval/val.csv',
help='using nvidia val dataset')
parser.add_argument('--test', type=str, default='./data/semeval/val.csv')
parser.add_argument('--process-fn', type=str, default='process_str', choices=['process_str', 'process_tweet'],
help='what preprocessing function to use to process text. One of [process_str, process_tweet].')
parser.add_argument('--text-key', default='text', type=str)
args = parser.parse_args()
env = os.environ.copy()
env['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
plutchik_cols = ' '.join(['anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust'])
base_command = "python3 finetune_classifier.py --data {train} --valid {val} --test {test} --warmup-epochs 0.5 --epochs 20 " \
+ "--text-key {text_key} --optim Adam --all-metrics --automatic-thresholding --batch-size 16 --save-finetune --process-fn {proc} " \
+ "--aux-lm-loss --aux-lm-loss-weight 0.02 --classifier-hidden-layers 4096 2048 1024 8 --classifier-dropout 0.3 --non-binary-cols " + plutchik_cols + ' '
transformer_options = "--lr 1e-5 --tokenizer-type SentencePieceTokenizer --tokenizer-path ama_32k_tokenizer.model --vocab-size 32000 --decoder-layers 12 "\
+" --decoder-embed-dim 768 --decoder-ffn-embed-dim 3072 --decoder-learned-pos --model transformer --load transformer.pt --use-final-embed --max-seq-len 150 " \
+" --dropout 0.2 --attention-dropout 0.2 --relu-dropout 0.2 --model-version-name transformer_multihead"
mlstm_options = " --lr 1e-5 --load mlstm.pt --model-version-name mlstm_multihead"
formatted_base_command = base_command.format(train=args.train, val=args.val, test=args.test, text_key=args.text_key, proc=args.process_fn)
transformer_command = formatted_base_command + transformer_options
print('*' * 100)
print("EXPERIMENT: Transformer, {}, {}, {}".format('multihead', args.train, args.val))
print('*' * 100)
sys.stdout.flush()
sys.stderr.flush()
subprocess.call(transformer_command.split(), stdout=sys.stdout, stderr=sys.stderr, env=env)
mlstm_command = formatted_base_command + mlstm_options
print('*' * 100)
print("EXPERIMENT: mLSTM, {}, {}, {}".format('multihead', args.train, args.val))
print('*' * 100)
sys.stdout.flush()
sys.stderr.flush()
subprocess.call(mlstm_command.split(), stdout=sys.stdout, stderr=sys.stderr, env=env) | sentiment-discovery-master | experiments/run_clf_multihead.py |
import argparse
import itertools
import sys
import subprocess
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser("Let's run some sst experiments!")
parser.add_argument('--gpu', type=int, default=0,
help='which gpu to run on')
args = parser.parse_args()
env = os.environ.copy()
env['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
base_command = "python3 finetune_classifier.py --warmup-epochs 0.5 --epochs 20 " \
+ "--optim Adam --all-metrics --threshold-metric f1 --automatic-thresholding --batch-size 16 " \
+ "--aux-lm-loss --aux-lm-loss-weight 0.02 --save-finetune "
transformer_options = "--lr 1e-5 --tokenizer-type SentencePieceTokenizer --tokenizer-path ama_32k_tokenizer.model --vocab-size 32000 --decoder-layers 12 "\
+" --decoder-embed-dim 768 --decoder-ffn-embed-dim 3072 --decoder-learned-pos --model transformer --load transformer.pt --use-final-embed --max-seq-len 150 " \
+" --dropout 0.2 --attention-dropout 0.2 --relu-dropout 0.2 --model-version-name transformer_sst_binary"
mlstm_options = " --lr 1e-5 --load mlstm.pt --model-version-name mlstm_sst_binary"
formatted_base_command = base_command
transformer_command = formatted_base_command + transformer_options
print('*' * 100)
print("EXPERIMENT: Transformer, {}, ".format('sst',))
print('*' * 100)
sys.stdout.flush()
sys.stderr.flush()
subprocess.call(transformer_command.split(), stdout=sys.stdout, stderr=sys.stderr, env=env)
mlstm_command = formatted_base_command + mlstm_options
print('*' * 100)
print("EXPERIMENT: mLSTM, {}, ".format('sst', ))
print('*' * 100)
sys.stdout.flush()
sys.stderr.flush()
subprocess.call(mlstm_command.split(), stdout=sys.stdout, stderr=sys.stderr, env=env) | sentiment-discovery-master | experiments/run_clf_sst.py |
import argparse
import itertools
import sys
import subprocess
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser("Let's run some singlehead experiments!")
parser.add_argument('--gpu', type=int, default=0,
help='which gpu to run on')
parser.add_argument('--train', type=str, default='./data/semeval/val.csv',
help='using nvidia training dataset')
parser.add_argument('--val', type=str, default='./data/semeval/val.csv',
help='using nvidia val dataset')
parser.add_argument('--test', type=str, default='./data/semeval/val.csv')
parser.add_argument('--process-fn', type=str, default='process_str', choices=['process_str', 'process_tweet'],
help='what preprocessing function to use to process text. One of [process_str, process_tweet].')
parser.add_argument('--text-key', default='text', type=str)
args = parser.parse_args()
env = os.environ.copy()
env['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
plutchik_cols = ['anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust']
base_command = "python3 finetune_classifier.py --data {train} --valid {val} --test {test} --warmup-epochs 0.5 --epochs 20 " \
+ "--text-key {text_key} --optim Adam --label-key {label_key} --all-metrics --automatic-thresholding --batch-size 16 --process-fn {proc} " \
+ "--aux-lm-loss --aux-lm-loss-weight 0.02 --classifier-hidden-layers 1 --classifier-dropout 0.3 "
transformer_options = "--lr 1e-5 --tokenizer-type SentencePieceTokenizer --tokenizer-path ama_32k_tokenizer.model --vocab-size 32000 --decoder-layers 12 "\
+" --decoder-embed-dim 768 --decoder-ffn-embed-dim 3072 --decoder-learned-pos --model transformer --load transformer.pt --use-final-embed --max-seq-len 150 "\
+" --dropout 0.2 --attention-dropout 0.2 --relu-dropout 0.2"
mlstm_options = " --lr 1e-5 --load mlstm.pt"
for label_key in plutchik_cols:
formatted_base_command = base_command.format(label_key=label_key, train=args.train, val=args.val, test=args.test, text_key=args.text_key, proc=args.process_fn)
transformer_command = formatted_base_command + transformer_options
print('*' * 100)
print("EXPERIMENT: Transformer, {}, {}, {}".format(label_key, args.train, args.val))
print('*' * 100)
sys.stdout.flush()
sys.stderr.flush()
subprocess.call(transformer_command.split(), stdout=sys.stdout, stderr=sys.stderr, env=env)
mlstm_command = formatted_base_command + mlstm_options
print('*' * 100)
print("EXPERIMENT: mLSTM, {}, {}, {}".format(label_key, args.train, args.val))
print('*' * 100)
sys.stdout.flush()
sys.stderr.flush()
subprocess.call(mlstm_command.split(), stdout=sys.stdout, stderr=sys.stderr, env=env)
| sentiment-discovery-master | experiments/run_clf_single_head.py |
import argparse
import itertools
import sys
import subprocess
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser("Let's run some binary sentiment experiments!")
parser.add_argument('--gpu', type=int, default=0,
help='which gpu to run on')
parser.add_argument('--train', type=str, default='./data/semeval/train.csv',
help='using nvidia training dataset')
parser.add_argument('--val', type=str, default='./data/semeval/val.csv',
help='using nvidia val dataset')
parser.add_argument('--test', type=str, default='./data/semeval/val.csv')
parser.add_argument('--process-fn', type=str, default='process_str', choices=['process_str', 'process_tweet'],
help='what preprocessing function to use to process text. One of [process_str, process_tweet].')
parser.add_argument('--text-key', default='text', type=str)
args = parser.parse_args()
env = os.environ.copy()
env['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
binary_cols = ' '.join(['positive', 'negative'])
base_command = "python3 finetune_classifier.py --data {train} --valid {val} --test {test} --warmup-epochs 0.5 --epochs 20 " \
+ "--text-key {text_key} --optim Adam --all-metrics --threshold-metric f1 --automatic-thresholding --batch-size 16 --dual-thresh --process-fn {proc} " \
+ "--aux-lm-loss --aux-lm-loss-weight 0.02 --classifier-hidden-layers 4096 2048 1024 2 --classifier-dropout 0.3 --non-binary-cols " + binary_cols + ' '
transformer_options = "--lr 1e-5 --tokenizer-type SentencePieceTokenizer --tokenizer-path ama_32k_tokenizer.model --vocab-size 32000 --decoder-layers 12 "\
+" --decoder-embed-dim 768 --decoder-ffn-embed-dim 3072 --decoder-learned-pos --model transformer --load transformer.pt --use-final-embed --max-seq-len 150 " \
+" --dropout 0.2 --attention-dropout 0.2 --relu-dropout 0.2"
mlstm_options = " --lr 1e-5 --load mlstm.pt"
formatted_base_command = base_command.format(train=args.train, val=args.val, test=args.test, text_key=args.text_key, proc=args.process_fn)
transformer_command = formatted_base_command + transformer_options
print('*' * 100)
print("EXPERIMENT: Transformer, {}, {}, {}".format('binary', args.train, args.val))
print('*' * 100)
sys.stdout.flush()
sys.stderr.flush()
subprocess.call(transformer_command.split(), stdout=sys.stdout, stderr=sys.stderr, env=env)
mlstm_command = formatted_base_command + mlstm_options
print('*' * 100)
print("EXPERIMENT: mLSTM, {}, {}, {}".format('binary', args.train, args.val))
print('*' * 100)
sys.stdout.flush()
sys.stderr.flush()
subprocess.call(mlstm_command.split(), stdout=sys.stdout, stderr=sys.stderr, env=env) | sentiment-discovery-master | experiments/run_clf_binary.py |
import os
import mmap
import pickle as pkl
import time
from itertools import accumulate
from threading import Lock
import torch
def get_lazy_path(path):
"""
Gets path where lazy files are stored.
"""
return os.path.splitext(path)[0]+'.lazy'
def exists_lazy(path, data_type='data'):
"""
Check if we've already made a lazy version of this file for the `data_type` field.
"""
if not os.path.exists(get_lazy_path(path)):
return False
contents = os.listdir(get_lazy_path(path))
if data_type not in contents:
return False
if data_type+'.len.pkl' not in contents:
return False
return True
def make_lazy(path, strs, data_type='data'):
"""
Make lazy version of `data_type` field of the file.
"""
lazypath = get_lazy_path(path)
if not os.path.exists(lazypath):
os.makedirs(lazypath)
datapath = os.path.join(lazypath, data_type)
lenpath = os.path.join(lazypath, data_type+'.len.pkl')
if not torch.distributed._initialized or torch.distributed.get_rank() == 0:
with open(datapath, 'wb') as f:
str_lens = []
str_cnt = 0
for s in strs:
if isinstance(s, dict):
s = s['text']
encoded = s.encode('utf-8')
f.write(encoded)
str_cnt = len(encoded)
str_lens.append(str_cnt)
pkl.dump(str_lens, open(lenpath, 'wb'))
else:
while not os.path.exists(lenpath):
time.sleep(1)
def split_strings(strings, start, chr_lens):
"""
Split strings based on string lengths and given start.
"""
return [strings[i-start:j-start] for i, j in zip([start]+chr_lens[:-1], chr_lens)]
class ProcessorTokenizer:
def __init__(self, tokenizer, process_fn=None):
self.tokenizer = tokenizer
self.process_fn = process_fn
def __call__(self, string):
return self.tokenizer(string, process_fn=self.process_fn)
class lazy_array_loader(object):
"""
Arguments:
path: path to directory where array entries are concatenated into one big string file
and the .len file are located
data_type (str): Some datsets have multiple fields that are stored in different paths.
`data_type` specifies which of these fields to load in this class
mem_map (boolean): Specifies whether to memory map file `path`
map_fn (callable): Fetched strings are passed through map_fn before being returned.
"""
def __init__(self, path, data_type='data', mem_map=False, map_fn=None):
lazypath = get_lazy_path(path)
datapath = os.path.join(lazypath, data_type)
#get file where array entries are concatenated into one big string
self._file = open(datapath, 'rb')
self.file = self._file
#memory map file if necessary
self.mem_map = mem_map
if self.mem_map:
self.file = mmap.mmap(self.file.fileno(), 0, prot=mmap.PROT_READ)
lenpath = os.path.join(lazypath, data_type+'.len.pkl')
self.lens = pkl.load(open(lenpath, 'rb'))
self.ends = list(accumulate(self.lens))
self.dumb_ends = list(self.ends)
self.read_lock = Lock()
self.map_fn = map_fn
def SetTokenizer(self, tokenizer):
self.map_fn = ProcessorTokenizer(tokenizer, self.map_fn)
def __getitem__(self, index):
"""read file and splice strings based on string ending array `ends` """
if not isinstance(index, slice):
if index == 0:
start = 0
else:
start = self.ends[index-1]
end = self.ends[index]
rtn = self.file_read(start, end)
if self.map_fn is not None:
return self.map_fn(rtn)
else:
chr_lens = self.ends[index]
if index.start == 0 or index.start is None:
start = 0
else:
start = self.ends[index.start-1]
stop = chr_lens[-1]
strings = self.file_read(start, stop)
rtn = split_strings(strings, start, chr_lens)
if self.map_fn is not None:
return self.map_fn([s for s in rtn])
return rtn
def __len__(self):
return len(self.ends)
def file_read(self, start=0, end=None):
"""read specified portion of file"""
#TODO: Solve race condition
#Seek to start of file read
self.read_lock.acquire()
self.file.seek(start)
##### Getting context-switched here
#read to end of file if no end point provided
if end is None:
rtn = self.file.read()
#else read amount needed to reach end point
else:
rtn = self.file.read(end-start)
self.read_lock.release()
#TODO: @raulp figure out mem map byte string bug
#if mem map'd need to decode byte string to string
# rtn = rtn.decode('utf-8')
rtn = str(rtn)
if self.mem_map:
rtn = rtn.decode('unicode_escape')
return rtn
| sentiment-discovery-master | data_utils/lazy_loader.py |
import os
import re
import html
import unicodedata
import unidecode
import torch
try:
import emoji
except:
print(Warning("emoji import unavailable"))
HTML_CLEANER_REGEX = re.compile('<.*?>')
def clean_html(text):
"""remove html div tags"""
text = str(text)
return re.sub(HTML_CLEANER_REGEX, ' ', text)
def binarize_labels(labels, hard=True):
"""If hard, binarizes labels to values of 0 & 1. If soft thresholds labels to [0,1] range."""
labels = np.array(labels)
min_label = min(labels)
label_range = max(labels)-min_label
if label_range == 0:
return labels
labels = (labels-min_label)/label_range
if hard:
labels = (labels > .5).astype(int)
return labels
def remove_accents(input_str):
nfkd_form = unicodedata.normalize('NFKD', input_str)
return u"".join([c for c in nfkd_form if not unicodedata.combining(c)])
def process_str(text, front_pad='\n ', end_pad=' ', maxlen=None, clean_markup=True,
clean_unicode=True, encode='utf-8', limit_repeats=3):
"""
Processes utf-8 encoded text according to the criterion specified in seciton 4 of https://arxiv.org/pdf/1704.01444.pdf (Radford et al).
We use unidecode to clean unicode text into ascii readable text
"""
if clean_markup:
text = clean_html(text)
if clean_unicode:
text = unidecode.unidecode(text)
text = html.unescape(text)
text = text.split()
if maxlen is not None:
len2use = maxlen-len(front_pad)-len(end_pad)
text = text[:len2use]
if limit_repeats > 0:
remove_repeats(text, limit_repeats, join=False)
text = front_pad+(" ".join(text))+end_pad
if encode is not None:
text = text.encode(encoding=encode)
text = ''.join(chr(c) for c in text)
return text
def remove_repeats(string, n, join=True):
count = 0
output = []
last = ''
for c in string:
if c == last:
count = count + 1
else:
count = 0
last = c
if count < n:
output.append(c)
if join:
return "".join(output)
return output
def tokenize_str_batch(strings, rtn_maxlen=True, process=True, maxlen=None, ids=False, rtn_processed=True):
"""
Tokenizes a list of strings into a ByteTensor
Args:
strings: List of utf-8 encoded strings to tokenize into ByteTensor form
rtn_maxlen: Boolean with functionality specified in Returns.lens
Returns:
batch_tensor: ByteTensor of shape `[len(strings),maxlen_of_strings]`
lens: Length of each string in strings after being preprocessed with `preprocess` (useful for
dynamic length rnns). If `rtn_maxlen` is `True` then max(lens) is returned instead.
"""
if process:
processed_strings = [process_str(x, maxlen=maxlen) for x in strings]
else:
processed_strings = [x.encode('utf-8', 'replace') for x in strings]
tensor_type = torch.ByteTensor
lens, batch_tensor = batch_tokens(processed_strings, tensor_type)
maxlen = max(lens)
rounded_maxlen = max(lens)
rtn = []
if not rtn_maxlen and rtn_maxlen is not None:
rtn = [batch_tensor, lens]
elif rtn_maxlen is None:
rtn = [batch_tensor]
else:
rtn = [batch_tensor, rounded_maxlen]
if rtn_processed:
rtn += [processed_strings]
return tuple(rtn)
def batch_tokens(token_lists, tensor_type=torch.LongTensor, fill_value=0):
lens = list(map(len, token_lists))
batch_tensor = fill_value * torch.ones(len(lens), max(lens)).type(tensor_type)
for i, string in enumerate(token_lists):
_tokenize_str(string, tensor_type, batch_tensor[i])
return batch_tensor, lens
def _tokenize_str(data, tensor_type, char_tensor=None):
"""
Parses a utf-8 encoded string and assigns to ByteTensor char_tensor.
If no char_tensor is provide one is created.
Typically used internally by `tokenize_str_batch`.
"""
if char_tensor is None:
if isinstance(data, str):
# data could either be a string or a list of ids.
data = data.encode()
char_tensor = tensor_type(len(data))
for i, char in enumerate(data):
char_tensor[i] = char
EMOJI_DESCRIPTION_SCRUB = re.compile(r':(\S+?):')
HASHTAG_BEFORE = re.compile(r'#(\S+)')
BAD_HASHTAG_LOGIC = re.compile(r'(\S+)!!')
FIND_MENTIONS = re.compile(r'@(\S+)')
LEADING_NAMES = re.compile(r'^\s*((?:@\S+\s*)+)')
TAIL_NAMES = re.compile(r'\s*((?:@\S+\s*)+)$')
def process_tweet(s, save_text_formatting=True, keep_emoji=False, keep_usernames=False):
# NOTE: will sometimes need to use Windows encoding here, depending on how CSV is generated.
# All results saved in UTF-8
# TODO: Try to get input data in UTF-8 and don't let it touch windows (Excel). That loses emoji, among other things
# Clean up the text before tokenizing.
# Why is this necessary?
# Unsupervised training (and tokenization) is usually on clean, unformatted text.
# Supervised training/classification may be on tweets -- with non-ASCII, hashtags, emoji, URLs.
# Not obvious what to do. Two options:
# A. Rewrite formatting to something in ASCII, then finetune.
# B. Remove all formatting, keep only the text.
if save_text_formatting:
s = re.sub(r'https\S+', r'xxxx', str(s))
else:
s = re.sub(r'https\S+', r' ', str(s))
s = re.sub(r'x{3,5}', r' ', str(s))
# Try to rewrite all non-ASCII if known printable equivalent
s = re.sub(r'\\n', ' ', s)
s = re.sub(r'\s', ' ', s)
s = re.sub(r'<br>', ' ', s)
s = re.sub(r'&', '&', s)
s = re.sub(r''', "'", s)
s = re.sub(r'>', '>', s)
s = re.sub(r'<', '<', s)
s = re.sub(r'\'', "'", s)
# Rewrite emoji as words? Need to import a function for that.
# If no formatting, just get the raw words -- else save formatting so model can "learn" emoji
# TODO: Debug to show differences?
if save_text_formatting:
s = emoji.demojize(s)
elif keep_emoji:
s = emoji.demojize(s)
# Transliterating directly is ineffective w/o emoji training. Try to shorten & fix
s = s.replace('face_with', '')
s = s.replace('face_', '')
s = s.replace('_face', '')
# remove emjoi formatting (: and _)
# TODO: A/B test -- better to put emoji in parens, or just print to screen?
#s = re.sub(EMOJI_DESCRIPTION_SCRUB, r' (\1) ', s)
s = re.sub(EMOJI_DESCRIPTION_SCRUB, r' \1 ', s)
# TODO -- better to replace '_' within the emoji only...
s = s.replace('(_', '(')
s = s.replace('_', ' ')
# Remove all non-printable and non-ASCII characters, including unparsed emoji
s = re.sub(r"\\x[0-9a-z]{2,3,4}", "", s)
# NOTE: We can't use "remove accents" as long as foreign text and emoji gets parsed as characters. Better to delete it.
# Replace accents with non-accented English letter, if possible.
# WARNING: Will try to parse corrupted text... (as aAAAa_A)
s = remove_accents(s)
# Rewrite or remove hashtag symbols -- important text, but not included in ASCII unsupervised set
if save_text_formatting:
s = re.sub(HASHTAG_BEFORE, r'\1!!', s)
else:
s = re.sub(HASHTAG_BEFORE, r'\1', s)
# bad logic in case ^^ done already
s = re.sub(BAD_HASHTAG_LOGIC, r'\1', s)
# Keep user names -- or delete them if not saving formatting.
# NOTE: This is not an obvious choice -- we could also treat mentions vs replies differently. Or we could sub xxx for user name
# The question is, does name in the @mention matter for category prediction? For emotion, it should not, most likely.
if save_text_formatting:
# TODO -- should we keep but anonymize mentions? Same as we rewrite URLs.
pass
else:
# If removing formatting, either remove all mentions, or just the @ sign.
if keep_usernames:
# quick cleanup extra spaces
s = ' '.join(s.split())
# If keep usernames, *still* remove leading and trailing names in @ mentions (or tail mentions)
# Why? These are not part of the text -- and should not change sentiment
s = re.sub(LEADING_NAMES, r' ', s)
s = re.sub(TAIL_NAMES, r' ', s)
# Keep remaining mentions, as in "what I like about @nvidia drivers"
s = re.sub(FIND_MENTIONS, r'\1', s)
else:
s = re.sub(FIND_MENTIONS, r' ', s)
#s = re.sub(re.compile(r'@(\S+)'), r'@', s)
# Just in case -- remove any non-ASCII and unprintable characters, apart from whitespace
s = "".join(x for x in s if (x.isspace() or (31 < ord(x) < 127)))
# Final cleanup -- remove extra spaces created by rewrite.
s = ' '.join(s.split())
return s
| sentiment-discovery-master | data_utils/preprocess.py |
import collections
import sys
if sys.version_info[0] == 2:
import Queue as queue
string_classes = basestring
else:
import queue
string_classes = (str, bytes)
import threading
import traceback
import math
import time
import torch
from torch.utils import data
import torch.multiprocessing as multiprocessing
import numpy as np
from .preprocess import tokenize_str_batch, batch_tokens
from .samplers import DistributedBatchSampler, BatchSampler, TransposedSampler, RandomShardSampler, DistributedBatchShardSampler, BatchShardSampler
from .tokenization import Tokenization
_use_shared_memory = False
"""Whether to use shared memory in default_collate"""
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
samples = []
def default_collate(batch, maxlen=None, process=False):
"""
normal default collate except for string classes we use our own tokenize_str_batch
function to batch strings
"""
"Puts each data field into a tensor with outer dimension batch size"
if torch.is_tensor(batch[0]):
out = None
if _use_shared_memory:
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif type(batch[0]).__module__ == 'numpy':
elem = batch[0]
if type(elem).__name__ == 'ndarray':
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == ():
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], Tokenization):
pad = batch[0].pad
tokenization, text, original_text = zip(*([(tokenization.tokenization, tokenization.text, tokenization.original_text) for tokenization in batch]))
return [batch_tokens(tokenization, fill_value=pad)[0], text, original_text]
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return tokenize_str_batch(batch, rtn_maxlen=None, process=process, maxlen=maxlen)
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}"
.format(type(batch[0]))))
def pin_memory_batch(batch):
if isinstance(batch, torch.Tensor):
return batch.pin_memory()
elif isinstance(batch, string_classes):
return batch
elif isinstance(batch, collections.Mapping):
return {k: pin_memory_batch(sample) for k, sample in batch.items()}
elif isinstance(batch, collections.Sequence):
return [pin_memory_batch(sample) for sample in batch]
else:
return batch
class DataLoader(data.DataLoader):
"""normal data loader except with options for distributed data batch sampling + wrap around"""
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False,
transpose=False, world_size=2, rank=-1, distributed=False, wrap_last=False,
timeout=0, worker_init_fn=None):
self.dataset = dataset
self.batch_size = batch_size
self.num_workers = num_workers
self.collate_fn = collate_fn
self.pin_memory = pin_memory
self.drop_last = drop_last
self.timeout = timeout
self.worker_init_fn = worker_init_fn
if timeout < 0:
raise ValueError('timeout option should be non-negative')
if batch_sampler is not None:
if batch_size > 1 or shuffle or sampler is not None or drop_last:
raise ValueError('batch_sampler is mutually exclusive with \
batch_size, shuffle, sampler, and drop_last')
if sampler is not None and shuffle:
raise ValueError('sampler is mutually exclusive with shuffle')
if self.num_workers < 0:
raise ValueError('num_workers cannot be negative; '
'use num_workers=0 to disable multiprocessing.')
if batch_sampler is None:
if sampler is None:
if shuffle:
sampler = data.sampler.RandomSampler(dataset)
else:
if transpose:
sampler = TransposedSampler(dataset, batch_size)
else:
sampler = data.sampler.SequentialSampler(dataset)
if distributed:
batch_sampler = DistributedBatchSampler(sampler, batch_size, drop_last,
world_size=world_size, rank=rank, wrap_last=wrap_last)
else:
batch_sampler = BatchSampler(sampler, batch_size, drop_last, wrap_last=wrap_last)
self.sampler = sampler
self.batch_sampler = batch_sampler
self.last_iter = None
class ShardLoader(object):
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False,
transpose=False, world_size=2, rank=-1, distributed=False, wrap_last=False,
timeout=0, worker_init_fn=None, seq_len=-1, persist_state=0, samples_per_shard=1):
self.dataset = dataset
self.batch_size = batch_size
self.seq_len = seq_len
self.persist_state = persist_state
self.samples_per_shard = samples_per_shard
self.num_workers = num_workers
self.collate_fn = collate_fn
self.pin_memory = pin_memory
self.drop_last = drop_last
self.timeout = timeout
self.worker_init_fn = worker_init_fn
if timeout < 0:
raise ValueError('timeout option should be non-negative')
if batch_sampler is not None:
if batch_size > 1 or shuffle or sampler is not None or drop_last:
raise ValueError('batch_sampler is mutually exclusive with \
batch_size, shuffle, sampler, and drop_last')
if sampler is not None and shuffle:
raise ValueError('sampler is mutually exclusive with shuffle')
if self.num_workers < 0:
raise ValueError('num_workers cannot be negative; '
'use num_workers=0 to disable multiprocessing.')
self.distributed=distributed
self.world_size=world_size
self.rank=rank
if self.distributed:
self.batch_size = math.ceil(self.batch_size/self.world_size)
if batch_sampler is None:
if sampler is None:
sampler = RandomShardSampler(self.dataset, self.samples_per_shard, self.seq_len, self.persist_state)
if self.distributed:
batch_sampler = DistributedBatchShardSampler(sampler, self.batch_size, self.drop_last, world_size=self.world_size, rank=self.rank)
else:
batch_sampler = BatchShardSampler(sampler, self.batch_size, self.drop_last)
else:
sampler = batch_sampler.sampler
self.sampler = sampler
self.batch_sampler = batch_sampler
self.last_iter = None
def set_seq_len(self, seq_len):
self.seq_len = seq_len
self.batch_sampler.set_seq_len(seq_len)
def set_samples_per_shard(self, samples_per_shard):
self.samples_per_shard = samples_per_shard
self.batch_sampler.set_samples_per_shard(samples_per_shard)
def set_persist_state(self, persist_state):
self.persist_state = persist_state
self.batch_sampler.set_persist_state(persist_state)
def __len__(self):
return len(self.batch_sampler)/self.batch_size
def __iter__(self):
return _ShardLoaderIter(self)
class _ShardLoaderIter(object):
def __init__(self, shardloader):
self.shardloader = shardloader
self.num_workers = self.shardloader.num_workers
self.batch_sampler = self.shardloader.batch_sampler
self.collate_fn = self.shardloader.collate_fn
self.pin_memory = self.shardloader.pin_memory
self.batch_size = self.batch_sampler.batch_size
self.timeout = self.shardloader.timeout
if self.num_workers == 0:
self.queue_manager = (q for q in self.batch_sampler.manage_queues())
else:
self.queue_manager = _ShardLoaderManager(self.batch_sampler, self.num_workers, self.collate_fn, self.pin_memory, self.timeout)
def __iter__(self):
return self
def __next__(self):
if self.num_workers == 0:
return self.collate_fn(next(self.queue_manager))
else:
return next(self.queue_manager)
MP_STATUS_CHECK_INTERVAL = 5.0
class _ShardLoaderManager(object):
def __init__(self, batch_sampler, num_workers, collate_fn, pin_memory=False, timeout=False):
self.batch_sampler = batch_sampler
self.batch_size = self.batch_sampler.batch_size
self.num_workers = num_workers
self.queue_size = num_workers*2
self.collate_fn = collate_fn
self.pin_memory = pin_memory
self.timeout = timeout
self.data_queues = []
self.workers = []
indices_per_worker = self.batch_size // self.num_workers
all_indices = list(range(self.batch_size))
for i in range(num_workers):
data_queue = multiprocessing.Queue(self.queue_size)
self.data_queues.append(data_queue)
batch_indices = all_indices[indices_per_worker*i:indices_per_worker*(i+1)]
w = multiprocessing.Process(target=self.batch_sampler.manage_queues_multiproc,
args=(batch_indices, data_queue))
w.daemon = True
w.start()
self.workers.append(w)
self.output_queue = queue.Queue(self.queue_size)
cur_device = -1
if torch.cuda.is_available():
cur_device = torch.cuda.current_device()
self.output_thread = threading.Thread(target=_shardloader_pin_memory_loop,
args=(self.output_queue, self.data_queues,
self.collate_fn, self.pin_memory,
cur_device))
self.output_thread.daemon = True
self.output_thread.start()
def __iter__(self):
return self
def _get_batch(self):
# In the non-timeout case, worker exit is covered by SIGCHLD handler.
# But if `pin_memory=True`, we still need account for the possibility
# that `pin_memory_thread` dies.
if self.timeout > 0:
try:
return self.output_queue.get(timeout=self.timeout)
except queue.Empty:
raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout))
elif self.pin_memory:
while self.output_thread.is_alive():
try:
return self.output_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
else:
# while condition is false, i.e., pin_memory_thread died.
raise RuntimeError('Pin memory thread exited unexpectedly')
# In this case, `self.data_queue` is a `queue.Queue`,. But we don't
# need to call `.task_done()` because we don't use `.join()`.
else:
return self.output_queue.get(block=True)
def __next__(self):
return self._get_batch()
def _shardloader_pin_memory_loop(output_queue, data_queues, collate_fn, pin_memory=False, device_id=-1, timeout=0):
queue_results = [list() for _ in data_queues]
output_queue_len = output_queue.maxsize
if device_id >= 0:
torch.cuda.set_device(device_id)
while True:
for i, data_queue in enumerate(data_queues):
try:
res = data_queue.get_nowait()
queue_results[i].append(res)
except queue.Empty:
continue
if sum(len(q)>=1 for q in queue_results) >= len(data_queues):
batch = []
for q in queue_results:
batch.extend(q.pop(0))
batch = collate_fn(batch)
if pin_memory:
batch = pin_memory_batch(batch)
output_queue.put(batch, block=True)
| sentiment-discovery-master | data_utils/loaders.py |
import os
import time
from operator import itemgetter
from bisect import bisect_left, bisect_right
import json
from itertools import accumulate
import csv
import collections
import torch
from torch.utils import data
import pandas as pd
import numpy as np
from .preprocess import process_str, binarize_labels
from .lazy_loader import lazy_array_loader, exists_lazy, make_lazy
from .cache import array_cache
from .tokenization import Tokenization
PERSIST_ALL = -1
PERSIST_SHARD = 1
RESET_STATE = 0
def get_processed_path(path, text_key='text', label_key='label'):
filepath, ext = os.path.splitext(path)
return filepath+'.%s.%s'%(text_key, label_key)+ext
def get_load_path_and_should_process(path, text_key='text', label_key='label'):
processed_path = get_processed_path(path, text_key, label_key)
exists = os.path.exists(processed_path)
if not exists:
return path, True
return processed_path, False
def save_preprocessed(ds, text_key='text', label_key='label'):
processed_path = get_processed_path(ds.path, text_key, label_key)
if not torch.distributed._initialized or torch.distributed.get_rank() == 0:
ds.write(path=processed_path)
return processed_path
class ConcatDataset(data.Dataset):
"""
Dataset to concatenate multiple datasets.
Purpose: useful to assemble different existing datasets, possibly
large-scale datasets as the concatenation operation is done in an
on-the-fly manner.
Arguments:
datasets (sequence): List of datasets to be concatenated.
"""
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
def __init__(self, datasets, **kwargs):
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, 'datasets should not be an empty iterable'
self.datasets = list(datasets)
self.cumulative_sizes = self.cumsum(self.datasets)
self._X = None
self._Y = None
def SetTokenizer(self, tokenizer):
for ds in self.datasets:
ds.SetTokenizer(tokenizer)
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx = bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
@property
def X(self):
if self._X is None:
self._X = []
for data in self.datasets:
self._X.extend(data.X)
return self._X
@property
def Y(self):
if self._Y is None:
self._Y = []
for data in self.datasets:
self._Y.extend(list(data.Y))
self._Y = np.array(self._Y)
return self._Y
@property
def cummulative_sizes(self):
warnings.warn("cummulative_sizes attribute is renamed to "
"cumulative_sizes", DeprecationWarning, stacklevel=2)
return self.cumulative_sizes
class SplitDataset(data.Dataset):
"""
Dataset wrapper to access a subset of another dataset.
Purpose: useful to index into existing datasets, possibly
large-scale datasets as the subindexing operation is done in an
on-the-fly manner.
Arguments:
ds (Dataset or array-like): List of datasets to be subindexed
split_inds (1D array-like): List of indices part of subset
"""
def __init__(self, ds, split_inds, **kwargs):
self.split_inds = list(split_inds)
self.wrapped_data = ds
self.is_lazy = isinstance(ds, lazy_array_loader)
if self.is_lazy:
self.lens = itemgetter(*self.split_inds)(list(self.wrapped_data.lens))
self._X = None
self._Y = None
def __len__(self):
return len(self.split_inds)
def __getitem__(self, index):
return self.wrapped_data[self.split_inds[index]]
@property
def X(self):
if self._X is None:
self._X = itemgetter(*self.split_inds)(self.wrapped_data.X)
return self._X
@property
def Y(self):
if self._Y is None:
self._Y = np.array(itemgetter(*self.split_inds)(self.wrapped_data.Y))
return self._Y
def __iter__(self):
for idx in self.split_inds:
yield self.wrapped_data[idx]
def split_ds(ds, split=[.8,.2,.0], shuffle=True):
"""
Split a dataset into subsets given proportions of how
much to allocate per split. If a split is 0% returns None for that split.
Purpose: Useful for creating train/val/test splits
Arguments:
ds (Dataset or array-like): Data to be split.
split (1D array-like): proportions to split `ds`. `sum(splits) != 0`
shuffle (boolean): Randomly split dataset. Default: True
"""
split_sum = sum(split)
if split_sum == 0:
raise Exception('Split cannot sum to 0.')
split = np.array(split)
split /= split_sum
ds_len = len(ds)
inds = np.arange(ds_len)
if shuffle:
np.random.shuffle(inds)
start_idx = 0
residual_idx = 0
rtn_ds = [None]*len(split)
for i, f in enumerate(split):
if f != 0:
proportion = ds_len*split[i]
residual_idx += proportion % 1
split_ = int(int(proportion) + residual_idx)
split_inds = inds[start_idx:start_idx+max(split_, 1)]
rtn_ds[i] = SplitDataset(ds, split_inds)
start_idx += split_
residual_idx %= 1
return rtn_ds
class csv_dataset(data.Dataset):
"""
Class for loading datasets from csv files.
Purpose: Useful for loading data for unsupervised modeling or transfer tasks
Arguments:
path (str): Path to csv file with dataset.
tokenizer (data_utils.Tokenizer): Tokenizer to use when processing text. Default: None
preprocess_fn (callable): Callable that process a string into desired format.
delim (str): delimiter for csv. Default: ','
binarize_sent (bool): binarize label values to 0 or 1 if they\'re on a different scale. Default: False
drop_unlabeled (bool): drop rows with unlabelled sentiment values. Always fills remaining empty
columns with -1 (regardless if rows are dropped based on sentiment value) Default: False
text_key (str): key to get text from csv. Default: 'sentence'
label_key (str): key to get label from json dictionary. Default: 'label'
Attributes:
X (list): all strings from the csv file
Y (np.ndarray): labels to train against
"""
def __init__(self, path, tokenizer=None, preprocess_fn=None, delim=',',
binarize_sent=False, drop_unlabeled=False, text_key='sentence', label_key='label',
**kwargs):
self.preprocess_fn = preprocess_fn
self.tokenizer = self.SetTokenizer(tokenizer)
self.path = path
self.delim = delim
self.text_key = text_key
self.label_key = label_key
self.drop_unlabeled = drop_unlabeled
if '.tsv' in self.path:
self.delim = '\t'
self.X = []
self.Y = []
try:
cols = [text_key]
if isinstance(label_key, list):
cols += label_key
else:
cols += [label_key]
data = pd.read_csv(self.path, sep=self.delim, usecols=cols, encoding='latin-1')
except:
data = pd.read_csv(self.path, sep=self.delim, usecols=[text_key], encoding='latin-1')
data = data.dropna(axis=0)
self.X = data[text_key].values.tolist()
try:
self.Y = data[label_key].values
except Exception as e:
self.Y = np.ones(len(self.X))*-1
if binarize_sent:
self.Y = binarize_labels(self.Y, hard=binarize_sent)
def SetTokenizer(self, tokenizer):
self.tokenizer = tokenizer
def __len__(self):
return len(self.X)
def __getitem__(self, index):
"""process string and return string,label,and stringlen"""
x = self.X[index]
if self.tokenizer is not None:
x = self.tokenizer.EncodeAsIds(x, self.preprocess_fn)
elif self.preprocess_fn is not None:
x = self.preprocess_fn(x)
y = self.Y[index]
return {'text': x, 'length': len(x), 'label': y}
def write(self, writer_gen=None, path=None, skip_header=False):
"""
given a generator of metrics for each of the data points X_i,
write the metrics, text, and labels to a csv file
"""
if path is None:
path = self.path+'.results'
print('generating csv at ' + path)
with open(path, 'w') as csvfile:
c = csv.writer(csvfile, delimiter=self.delim)
if writer_gen is not None:
#if first item of generator is a header of what the metrics mean then write header to csv file
if not skip_header:
header = (self.label_key,)+tuple(next(writer_gen))+(self.text_key,)
c.writerow(header)
for i, row in enumerate(writer_gen):
row = (self.Y[i],)+tuple(row)+(self.X[i],)
c.writerow(row)
else:
c.writerow([self.label_key, self.text_key])
for row in zip(self.Y, self.X):
c.writerow(row)
class json_dataset(data.Dataset):
"""
Class for loading datasets from a json dump.
Purpose: Useful for loading data for unsupervised modeling or transfer tasks
Arguments:
path (str): path to json file with dataset.
tokenizer (data_utils.Tokenizer): Tokenizer to use when processing text. Default: None
preprocess_fn (callable): callable function that process a string into desired format.
Takes string, maxlen=None, encode=None as arguments. Default: process_str
text_key (str): key to get text from json dictionary. Default: 'sentence'
label_key (str): key to get label from json dictionary. Default: 'label'
Attributes:
all_strs (list): list of all strings from the dataset
all_labels (list): list of all labels from the dataset (if they have it)
"""
def __init__(self, path, tokenizer=None, preprocess_fn=process_str, binarize_sent=False,
text_key='sentence', label_key='label', loose_json=False, **kwargs):
self.preprocess_fn = preprocess_fn
self.path = path
self.tokenizer = self.SetTokenizer(tokenizer)
self.X = []
self.Y = []
self.text_key = text_key
self.label_key = label_key
self.loose_json = loose_json
for j in self.load_json_stream(self.path):
s = j[text_key]
self.X.append(s)
self.Y.append(j[label_key])
if binarize_sent:
self.Y = binarize_labels(self.Y, hard=binarize_sent)
def SetTokenizer(self, tokenizer):
self.tokenizer = tokenizer
def __getitem__(self, index):
"""gets the index'th string from the dataset"""
x = self.X[index]
if self.tokenizer is not None:
x = self.tokenizer.EncodeAsIds(x, self.preprocess_fn)
elif self.preprocess_fn is not None:
x = self.preprocess_fn(x)
y = self.Y[index]
return {'text': x, 'length': len(x), 'label': y}
def __len__(self):
return len(self.X)
def write(self, writer_gen=None, path=None, skip_header=False):
"""
given a generator of metrics for each of the data points X_i,
write the metrics, text, and labels to a json file
"""
if path is None:
path = self.path+'.results'
jsons = []
if writer_gen is not None:
#if first item of generator is a header of what the metrics mean then write header to csv file
def gen_helper():
keys = {}
keys[0] = self.label_key
if not skip_header:
for idx, k in enumerate(tuple(next(writer_gen))):
keys[idx+1] = k
for i, row in enumerate(writer_gen):
if i == 0 and skip_header:
for idx, _ in enumerate(row):
keys[idx+1] = 'metric_%d'%(idx,)
j = {}
for idx, v in enumerate((self.Y[i],)+tuple(row)):
k = keys[idx]
j[k] = v
yield j
else:
def gen_helper():
for y in self.Y:
j = {}
j[self.label_key] = y
yield j
def out_stream():
for i, j in enumerate(gen_helper()):
j[self.text_key] = self.X[i]
yield j
self.save_json_stream(path, out_stream())
def save_json_stream(self, save_path, json_stream):
if self.loose_json:
with open(save_path, 'w') as f:
for i, j in enumerate(json_stream):
write_string = ''
if i != 0:
write_string = '\n'
write_string += json.dumps(j)
f.write(write_string)
else:
jsons = [j for j in json_stream]
json.dump(jsons, open(save_path, 'w'), separators=(',', ':'))
def load_json_stream(self, load_path):
if not self.loose_json:
jsons = json.load(open(load_path, 'r'))
generator = iter(jsons)
else:
def gen_helper():
with open(load_path, 'r') as f:
for row in f:
yield json.loads(row)
generator = gen_helper()
for j in generator:
if self.label_key not in j:
j[self.label_key] = -1
yield j
class data_shard(object):
"""
Data Shard of multiple tokenizations.
Purpose: Useful in L2R unsupervised learning. It's stateful and on consecutive
calls to `get` it returns the next sequence of tokens following the last
sequence of tokens returned.
Arguments:
data (Tokenization or list): data comprising the data shard. Either a Tokenization or list of Tokenizations.
seq_len (int): sequence length to sample from shard
persist_state (int): one of -1,0,1 specifying whether to never reset state,
reset after every sentence, or at end of every shard. Default: 0
Attributes:
all_seq (list): list of all tokenizations
seq_ends (list): cummulative lengths of `all_strs` if they were all concat'd to gether.
`itertools.accumulate([len(s) for s in all_strs])
total_toks (int): `seq_ends[-1]`
num_seq (int): `len(all_seq)`
"""
def __init__(self, data, seq_len=-1, persist_state=0, **kwargs):
self.seq_len = seq_len
self.persist_state = persist_state
if isinstance(data, Tokenization):
self.num_seq = 1
self.all_seq = [data]
self.seq_ends = [len(data)]
else:
self.num_seq = len(data)
self.all_seq = data
self.seq_ends = [len(self.all_seq[0])]
for i in range(1, self.num_seq):
s = self.all_seq[i]
self.seq_ends.append(len(s)+self.seq_ends[-1])
self.pad = self.all_seq[-1].pad
self.total_toks = self.seq_ends[-1]
self.counter = 0
self.seq_counter = 0
self.intra_seq_counter = 0
def set_seq_len(self, val):
self.seq_len = val
def _get(self, seq_len):
"""
Get next sequence and reset mask of `seq_len` length.
"""
rtn_mask = []
rtn = []
if seq_len <= 0:
self.counter = self.total_toks
rtn = []
for seq in self.all_seq:
s = seq[:]
rtn.extend(s)
rtn_mask.extend(self.get_string_mask(s))
self.seq_counter += 1
else:
rtn = []
#add one to the sequence length because we need [0:seq_len] as inputs and [1:seq_len+1] as targets
seq_len += 1
while self.seq_counter < self.num_seq and not len(rtn) >= seq_len:
tokenization = self.all_seq[self.seq_counter]
num_chars = seq_len - len(rtn)
start = self.intra_seq_counter
end = start + num_chars
seq = list(tokenization[start:end])
rtn.extend(seq)
rtn_mask.extend(self.get_string_mask(seq))
seq_complete = len(rtn) == seq_len
self.intra_seq_counter += len(seq)
if self.intra_seq_counter >= len(tokenization):
if seq_complete:
# if sampled seq_len+1 tokens ends on the last token of an example do not advance intra_seq_counter as the last token will be needed for input during next sample
self.intra_seq_counter -= 1
else:
self.seq_counter += 1
self.intra_seq_counter = 0
else:
self.intra_seq_counter -= 1
return rtn, rtn_mask
def get_string_mask(self, s):
"""
Get hidden state reset mask for string being currently sampled.
"""
start_mask = 0
if self.persist_state == PERSIST_SHARD:
start_mask = (self.seq_counter == 0 and self.intra_seq_counter == 0)
elif self.persist_state == RESET_STATE:
start_mask = self.intra_seq_counter == 0
return [start_mask] + [0] * (len(s)-1)
def get(self, seq_len=None):
"""
Get the next sequence from the data shard as well as state reset and padding/loss masks.
Returns a sequence of seq_len+1 so that i`nputs, targets = sequence[:-1], sequence[1:]`
"""
if seq_len is None:
seq_len = self.seq_len
rtn, rtn_mask = self._get(seq_len)
rtn_len = len(rtn)
# returned sequence should be seq_len+1 length since it needs to contain inputs and targets
num_padding = seq_len - (rtn_len-1)
if num_padding > 0:
rtn.extend([self.pad] * num_padding)
rtn_mask.extend([0] * num_padding)
if seq_len > 0:
self.counter += seq_len
# mask all padding + the last valid target token to 0 since they won't be used in loss computation
loss_mask = [1]*(rtn_len-1) + [0]*(num_padding+1)
else:
self.counter = self.total_toks
loss_mask = [1]*rtn_len
return np.array(rtn), np.array(rtn_mask), np.array(loss_mask)
def is_last(self):
return self.counter >= self.total_toks-self.seq_len -1
def is_done(self):
return self.counter >= self.total_toks-1
def __iter__(self):
self.counter = 0
while self.counter < self.total_toks:
yield self.get(self.seq_len) | sentiment-discovery-master | data_utils/datasets.py |
class array_cache(object):
"""
Arguments:
cache_strs (list-like): List like object with __len__ and __getitem__
cache_block_size (int): number of strings to cache in one cache block. Default: 64
cache_size (int): number of caches blocks to store before removing (LRU). Default: 32
Attributes:
num_strs (int): len(cache_strs)
cache (dict): holds cache blocks
cache_blocks (list): list of keys for blocks stored in caches
"""
def __init__(self, cache_strs, cache_block_size=64, cache_size=32):
super(array_cache, self).__init__()
self.cache_size = cache_size
self.cache_block_size = cache_block_size
self.cache_strs = cache_strs
self.num_strs = len(self.cache_strs)
self.cache = {}
self.cache_blocks = []
def __getitem__(self, index):
#get index of cache block of size cache_block_size
block_ind = index//self.cache_block_size
if block_ind not in self.cache:
self.clean_out_cache()
cache_block = self.cache_strs[index:min(index+self.cache_block_size, self.num_strs)]
#store cache block in cache
self.cache[block_ind] = (cache_block)
#append key to cache block list
self.cache_blocks.append(block_ind)
else:
cache_block = self.cache[block_ind]
#get a strings index inside of a cache block
block_ind_ind = index%self.cache_size
return cache_block[block_ind_ind]
def __len__(self):
return len(self.cache_strs)
def clean_out_cache(self):
"""gets index of oldest cache block. and removes the block from cache and removes the index"""
if len(self.cache_blocks) >= self.cache_size:
block_ind = self.cache_blocks.pop(0)
del self.cache[block_ind]
| sentiment-discovery-master | data_utils/cache.py |
import os
import math
from .samplers import BatchSampler, DistributedBatchSampler, TransposedSampler, RandomShardSampler, BatchShardSampler, DistributedBatchShardSampler
from .loaders import DataLoader, ShardLoader
from .preprocess import tokenize_str_batch, binarize_labels, process_str, process_tweet, batch_tokens
from .datasets import json_dataset, csv_dataset, split_ds, get_processed_path, ConcatDataset, SplitDataset, data_shard
from .lazy_loader import exists_lazy, make_lazy, lazy_array_loader
from .tokenization import Tokenization, CommandToken, Tokenizer, CharacterLevelTokenizer, make_tokenizer
TRAIN_DATA = 0
VAL_DATA = 1
TEST_DATA = 2
def should_split(split):
return max(split)/sum(split) != 1.
def get_ext(path):
return os.path.splitext(path)[1]
def get_dataset(path, **kwargs):
"""gets dataset object based on keyword args and file at `path`"""
ext = get_ext(path)
if ext =='.json':
text = json_dataset(path, **kwargs)
elif ext in ['.csv', '.tsv']:
text = csv_dataset(path, **kwargs)
else:
raise NotImplementedError('data file type %s is not supported'%(ext))
return text
def make_dataset(path, seq_length, text_key, label_key, lazy=False, process_fn=process_str, split=[1.],
delim=',', loose=False, binarize_sent=False, drop_unlabeled=False, tokenizer=None,
tokenizer_type='CharacterLevelTokenizer', tokenizer_model_path=None, vocab_size=None,
model_type='bpe', pad_token=0, character_converage=1.0, non_binary_cols=None, **kwargs):
if isinstance(process_fn, str):
process_fn = eval(process_fn)
if non_binary_cols is not None:
label_key = non_binary_cols
def get_dataset_from_path(path_):
if lazy:
if not exists_lazy(path_, data_type='data'):
text = get_dataset(path_, text_key=text_key, label_key=label_key, binarize_sent=binarize_sent,
delim=delim, drop_unlabeled=drop_unlabeled, loose_json=loose)
make_lazy(path_, text.X, data_type='data')
text = lazy_array_loader(path_, data_type='data', map_fn=process_fn)
else:
text = get_dataset(path_, text_key=text_key, label_key=label_key, binarize_sent=binarize_sent,
delim=delim, drop_unlabeled=drop_unlabeled, loose_json=loose, preprocess_fn=process_fn)
return text
if isinstance(path, str):
path = [path]
datasets = [get_dataset_from_path(p) for p in path]
if len(datasets) == 1:
ds = datasets[0]
else:
ds = ConcatDataset(datasets)
if tokenizer is None:
tokenizer = make_tokenizer(tokenizer_type, ds, tokenizer_model_path, vocab_size, model_type,
pad_token, character_converage)
ds.SetTokenizer(tokenizer)
if should_split(split):
ds = split_ds(ds, split)
return ds, tokenizer
| sentiment-discovery-master | data_utils/__init__.py |
from collections import namedtuple
import random
import os
import sentencepiece as spm
def make_tokenizer(tokenizer_type, corpus, model_path=None, vocab_size=None, model_type='bpe', pad_token=0, character_coverage=1.0):
tokenizer_class = tokenizer_type
if isinstance(tokenizer_class, str):
tokenizer_class = eval(tokenizer_class)
return tokenizer_class(corpus=corpus, vocab_size=vocab_size, model_path=model_path, model_type=model_type,
pad_token=pad_token, character_coverage=character_coverage)
class Tokenization(object):
def __init__(self, tokenization, text=None, original_text=None, command_tokens=None, asIds=True):
self.tokenization = tokenization
self.text = text
if self.text is None:
self.text = self.tokenization
self.original_text = original_text
if self.original_text is None:
self.original_text = self.text
self.command_tokens = command_tokens
self.asIds = asIds
self.parse_command_tokens()
def parse_command_tokens(self):
if self.command_tokens is None:
return
for command_token in self.command_tokens:
if self.asIds:
setattr(self, command_token.name, command_token.Id)
else:
setattr(self, command_token.name, command_token.token)
def __getitem__(self, index):
return self.tokenization[index]
def __len__(self):
return len(self.tokenization)
def append(self, other):
if isinstance(other, Tokenization):
self.tokenization.extend(other.tokenization)
self.text += other.text
self.original_text += other.original_text
else:
self.tokenization.append(other)
return self
def extend(self, other):
if isinstance(other, Tokenization):
self.tokenization.extend(other.tokenization)
self.text += other.text
self.original_text += other.original_text
else:
self.tokenization.extend(other)
return self
COMMAND_TUPLE = namedtuple('CommandToken', ('name', 'token', 'Id'))
token_format = "<{0}>"
def prep_command_tokens(tokenlist):
return [CommandToken(tok[0], token_format.format(tok[0]), tok[1]) for tok in tokenlist]
class CommandToken(object):
def __init__(self, name, token, Id):
self.name = name
self.token = token
self.Id = Id
def __str__(self):
return str(COMMAND_TUPLE(self.name, self.token, self.Id))
class Tokenizer(object):
def __init__(self, command_tokens=None):
self.command_tokens = command_tokens
self.command_token_map = {}
self.command_id_map = {}
if command_tokens is not None:
self.command_name_map = {tok.name: tok for tok in command_tokens}
self.command_token_map = {tok.token: tok for tok in command_tokens}
self.command_id_map = {tok.Id: tok for tok in command_tokens}
self.num_command_tokens = len(self.command_tokens)
if not hasattr(self, 'num_text_tokens'):
self.num_text_tokens = 0
if not hasattr(self, 'num_tokens'):
self.num_tokens = self.num_command_tokens + self.num_text_tokens
def __call__(self, text, process_fn=None):
return self.EncodeAsIds(text, process_fn)
@staticmethod
def exists(model_path):
raise NotImplementedError('Tokenizer exists method not implemented')
def Train(self, corpus):
raise NotImplementedError('Tokenizer Train not implemented')
def EncodeAsIds(self, text, process_fn=None):
raise NotImplementedError('Tokenizer EncodeAsIds not implemented')
def EncodeAsTokens(self, text, process_fn=None):
raise NotImplementedError('Tokenizer EncodeAsTokens not implemented')
def IdToToken(self, Id):
raise NotImplementedError('Tokenizer IdToToken not implemented')
def TokenToId(self, token):
raise NotImplementedError('Tokenizer TokenToId not implemented')
def DecodeIds(self, Ids):
raise NotImplementedError('Tokenizer DecodeIds not implemented')
def DecodeTokens(self, Tokens):
raise NotImplementedError('Tokenizer DecodeTokens not implemented')
class CharacterLevelTokenizer(Tokenizer):
def __init__(self, pad_token=0, **kwargs):
self.num_text_tokens = 256
super(CharacterLevelTokenizer, self).__init__(prep_command_tokens([('pad', pad_token)]))
@staticmethod
def exists(model_path):
return True
def Train(self, corpus):
pass
def EncodeAsIds(self, text, process_fn=None):
processed_text = text
if process_fn is not None:
processed_text = process_fn(processed_text)
processed_text = str(processed_text)
tokens = [self.TokenToId(c) for c in processed_text]
return Tokenization(tokens, processed_text, text, self.command_tokens)
def EncodeAsTokens(self, text, process_fn=None):
processed_text = text
if process_fn is not None:
processed_text = process_fn(processed_text)
processed_text = str(processed_text)
tokens = [c for c in processed_text]
return Tokenization(tokens, processed_text, text, self.command_tokens, asIds=False)
def IdToToken(self, Id):
return chr(Id - self.num_command_tokens)
def TokenToId(self, token):
return ord(token) + self.num_command_tokens
def DecodeIds(self, Ids):
if isinstance(Ids, Tokenization):
Ids = Ids.tokenization
return ''.join([self.IdToToken(tok) for tok in Ids])
def DecodeTokens(self, Tokens):
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
return ''.join(Tokens)
def write_corpus_as_lines(dataset, filepath):
"""
Take Dataset or corpus, split it into lines, and write it to `filepath`.
Return the total number of lines, and max length line.
"""
total_sentence_count = 0
maxlen = 0
with open(filepath, 'w') as f:
for entry in dataset:
if isinstance(entry, dict):
entry = entry['text']
lines = entry.strip().split('\n')
for line in lines:
sentences = line.split('.')
total_sentence_count += len(sentences)
for sentence in sentences:
maxlen = max(len(line), maxlen)
f.write(sentence+'.\n')
return total_sentence_count, maxlen
MAX_SENTENCEPIECE_SENTENCES = 100000000
class SentencePieceTokenizer(Tokenizer):
def __init__(self, model_type='bpe', vocab_size=None, corpus=None, model_path=None, character_coverage=1.0, pad_token=0, **kwargs):
self.character_coverage = character_coverage
self.model_type = model_type.lower()
self.spm_model = model_path
self.num_text_tokens = vocab_size
make_train = not SentencePieceTokenizer.exists(self.spm_model)
if make_train:
assert corpus is not None and self.num_text_tokens is not None
self.Train(corpus, self.num_text_tokens)
self.load_spm_model()
super(SentencePieceTokenizer, self).__init__(prep_command_tokens([('pad', pad_token)]))
@staticmethod
def exists(model_path):
if model_path is None:
return False
# check if path exists
dne = not os.path.exists(model_path)
# check if path.model exists
if dne and not model_path.endswith('.model'):
dne = not os.path.exists(model_path+'.model')
return not dne
def load_spm_model(self):
if not os.path.exists(self.spm_model) and not self.spm_model.endswith('.model'):
self.spm_model = self.spm_model+'.model'
self.sp = spm.SentencePieceProcessor()
self.sp.Load(self.spm_model)
self.vocab_size = self.num_text_tokens = len(self.sp)
def Train(self, corpus, num_text_tokens):
self.num_text_tokens = num_text_tokens
use_model_path = self.spm_model
random_hash = str(random.randint(0, 2147483647))
if use_model_path is None:
use_model_path = random_hash
if use_model_path.endswith('.model'):
use_model_path = use_model_path[:use_model_path.rfind('.model')]
input_path = use_model_path+'.txt.'+random_hash
print('Writing temporary dataset for tokenization to '+input_path)
line_count, maxlenline = write_corpus_as_lines(corpus, input_path)
line_count = min(line_count, MAX_SENTENCEPIECE_SENTENCES)
print('Training sentencepiece model')
train_string = '--input={file_path} --model_prefix={model_prefix} --vocab_size={vocab_size}' \
+ ' --model_type={model_type} --input_sentence_size={input_sentence_size} --character_coverage={character_coverage} ' \
+ '--max_sentence_length={max_len}'
train_string = train_string.format(file_path=input_path, model_prefix=use_model_path, vocab_size=num_text_tokens,
model_type=self.model_type, input_sentence_size=int(line_count), character_coverage=self.character_coverage,#)#,
max_len=str(maxlenline))
spm.SentencePieceTrainer.Train(train_string)
os.remove(input_path)
self.spm_model = use_model_path+'.model'
print('Sentencepiece model written to '+self.spm_model)
def EncodeAsIds(self, text, process_fn=None):
processed_text = text
if process_fn is not None:
processed_text = process_fn(processed_text)
tokens = [tok + self.num_command_tokens for tok in self.sp.EncodeAsIds(processed_text)]
# tokens = self.sp.EncodeAsIds(processed_text)
return Tokenization(tokens, processed_text, text, self.command_tokens)
def EncodeAsTokens(self, text, process_fn=None):
processed_text = text
if process_fn is not None:
processed_text = process_fn(processed_text)
tokens = self.sp.EncodeAsTokens(processed_text)
return Tokenization(tokens, processed_text, text, self.command_tokens, asIds=False)
def IdToToken(self, Id):
return self.sp.IdToToken(Id - self.num_command_tokens)
# return self.sp.IdToToken(Id)
def TokenToId(self, token):
return self.sp.TokenToId(token) + self.num_command_tokens
def DecodeIds(self, Ids):
if isinstance(Ids, Tokenization):
Ids = Ids.tokenization
return self.sp.DecodeIds([Ids - self.num_command_tokens])
# return self.sp.DecodeIds(Ids)
def DecodeTokens(self, Tokens):
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
return self.sp.DecodeTokens(Tokens) | sentiment-discovery-master | data_utils/tokenization.py |
import math
import os
import sys
import torch
from torch.utils import data
import numpy as np
from .datasets import data_shard
class DistributedBatchSampler(data.sampler.BatchSampler):
"""
similar to normal implementation of distributed batch sampler, except if sampler is transposed sampler
has option to wrap around instead of not dropping last half batch. This is useful for persisting state
"""
def __init__(self, sampler, batch_size, drop_last, rank=-1, world_size=2, wrap_last=False):
super(DistributedBatchSampler, self).__init__(sampler, batch_size, drop_last)
if rank == -1:
rank = torch.distributed.get_rank()
self.rank = rank
self.world_size = world_size
self.sampler.wrap_around = 0
self.wrap_around = 0
self.wrap_last = wrap_last
self.start_iter = 0
def __iter__(self):
batch = []
last_batch = None
i = 0
for idx in self.data_iterator(self.sampler, wrap_around=False):
batch.append(idx)
if len(batch) == self.batch_size:
tbatch = self._batch(batch)
if i >= self.start_iter:
yield tbatch
self.start_iter = 0
i += 1
last_batch = np.array(list(tbatch))
batch = []
batch_len = len(batch)
if batch_len > 0 and not self.drop_last:
if self.wrap_last:
self.sampler.wrap_around -= (self.batch_size)
self.wrap_around += (len(batch))
self.wrap_around %= self.batch_size
if isinstance(self.sampler, TransposedSampler):
for i, idx in enumerate(self.data_iterator(self.sampler, wrap_around=True)):
if i == 0:
continue
batch.append(idx)
new_batch_len = len(batch)
if len(batch) == self.batch_size:
break
yield self._batch(batch)
if self.wrap_last:
self.sampler.wrap_around += self.batch_size
def data_iterator(self, _iter, wrap_around=False):
"""iterates through data and handles wrap around"""
for i, idx in enumerate(_iter):
if i < self.wrap_around%self.batch_size:
continue
if wrap_around:
self.wrap_around += 1
self.wrap_around %= self.batch_size
yield idx
def _batch(self, batch):
"""extracts samples only pertaining to this worker's batch"""
start = self.rank*self.batch_size//self.world_size
end = (self.rank+1)*self.batch_size//self.world_size
return batch[start:end]
class BatchSampler(data.sampler.BatchSampler):
"""
Normal implementation of batch sampler, except if sampler is transposed sampler it
has option to wrap around instead of not dropping last half batch.
Useful for persisting state.
"""
def __init__(self, sampler, batch_size, drop_last, wrap_last=False):
super(BatchSampler, self).__init__(sampler, batch_size, drop_last)
self.wrap_around = 0
self.sampler.wrap_around = 0
self.wrap_last = wrap_last
self.start_iter = 0
def __iter__(self):
batch = []
last_batch = None
i = 0
for idx in self.data_iterator(self.sampler, wrap_around=False):
batch.append(idx)
new_batch_len = len(batch)
if new_batch_len == self.batch_size:
if i >= self.start_iter:
yield batch
self.start_iter = 0
i += 1
last_batch = np.array(list(batch))
batch = []
if len(batch) > 0 and (self.wrap_last or not self.drop_last):
if self.wrap_last:
self.sampler.wrap_around -= (self.batch_size)
self.wrap_around += (len(batch))
self.wrap_around %= self.batch_size
if isinstance(self.sampler, TransposedSampler):
for i, idx in enumerate(self.data_iterator(self.sampler, wrap_around=True)):
if i == 0:
continue
batch.append(idx)
if len(batch) == self.batch_size:
break
yield batch
if self.wrap_last:
self.sampler.wrap_around += self.batch_size
def data_iterator(self, _iter, wrap_around=False):
"""iterates through data and handles wrap around"""
for i, idx in enumerate(_iter):
if i < self.wrap_around%self.batch_size:
continue
if wrap_around:
self.wrap_around += 1
self.wrap_around %= self.batch_size
yield idx
class TransposedSampler(data.sampler.Sampler):
"""
Instead of performing sequential sampling, samples array in a transposed fashion given the
batch size to sampled. Instead of generating the following indices for a batch size of 2
1 3 5
2 4 6
It will generate
1 2 3
4 5 6
"""
def __init__(self, data_source, batch_size, data_sampler=None):
self.data_source = data_source
self.batch_size = batch_size
self.len_ds = len(data_source)
self.strat_width = self.len_ds//batch_size
#self.strat_width = math.ceil(self.len_ds/batch_size)
self.data_sampler = data_sampler
self.wrap_around = 0
def transpose_helper(self, x):
"""computes index corrseponding to transpose of index x"""
return ((x%self.batch_size)*self.strat_width+(x//self.batch_size))%self.len_ds
x += self.wrap_around
return ((x%self.batch_size)*self.strat_width+(x//self.batch_size))%self.len_ds
def __iter__(self):
if self.data_sampler is None:
return iter(map(self.transpose_helper, range(len(self))))
return iter(map(self.transpose_helper, iter(self.data_sampler)))
def __len__(self):
#return self.len_ds
return self.strat_width*self.batch_size
class RandomShardSampler(object):
"""
Sampler for data shards.
Purpose: Samples data shards used for L2R unsupervised modeling from the `data_source`.
Arguments:
data_source (Dataset or array-like): Dataset of tokenizations to sample data from.
samples_per_shard (int): Number of samples per shard to gather from `data_source`.
seq_len (int): seq_len value to use when creating a data shard. Can be reset later with
`set_seq_len`.
persist_state (int): persist_state value to use when creating a data shard. See
data_utils.data_shard documentation for valid values. Can be reset later with
`set_persist_state`.
random_state (np.RandomState): Random number generator state to use for sampling data. If
no value is supplied it uses numpy's default random state (not thread safe).
"""
def __init__(self, data_source, samples_per_shard, seq_len=-1, persist_state=0):
self.data_source = data_source
self.source_size = len(data_source)
self.samples_per_shard = samples_per_shard
self.seq_len = seq_len
self.persist_state = persist_state
def set_seq_len(self, seq_len):
self.seq_len = seq_len
def set_samples_per_shard(self, samples_per_shard):
self.samples_per_shard = samples_per_shard
def set_persist_state(self, persist_state):
self.persist_state = persist_state
def get(self, random_state, samples_per_shard=None):
"""
Uses either supplied random state or default random state to sample data from
the data source, create a datashard, and return it.
"""
if samples_per_shard is None:
samples_per_shard = self.samples_per_shard
sample_ids = random_state.randint(self.source_size, size=samples_per_shard)
samples = [self.data_source[i] for i in sample_ids]
samples = [sample['text'] if isinstance(sample, dict) else sample for sample in samples]
return data_shard(samples, self.seq_len, self.persist_state)
def __len__(self):
return self.source_size
class BatchShardSampler(object):
"""
Class to manage the random state of and sample a batch of active shards.
Uses one random state per batch index to control sampling of data shards for that batch index.
Purpose: Intended for use with data_utils.ShardLoader to perform L2R unsupervised Learning.
Arguments:
shard_sampler (RandomShardSampler): shard sampler used to sample data shards.
batch_size (int): Batch size to sample.
drop_last (boolean): Pretty much useless. Used to give a fake length.
random_batch (list): List of random states to use.
Attributes:
batch (list): Batch of shard queues (a list that contains shards). Call `.get` and
`.isdone()` on `shard_queue[0]` to get next batch and check if shard is done.
"""
def __init__(self, shard_sampler, batch_size, drop_last, random_batch=None):
self.shard_sampler = shard_sampler
self.batch_size = batch_size
self.drop_last = drop_last
# self.batch = None
self.random_batch = random_batch
if self.random_batch is None:
self.random_batch = [np.random.RandomState(seed) for seed in np.random.randint(batch_size*999, size=batch_size)]
def set_seq_len(self, seq_len):
self.seq_len = seq_len
self.shard_sampler.set_seq_len(seq_len)
def set_samples_per_shard(self, samples_per_shard):
self.samples_per_shard = samples_per_shard
self.shard_sampler.set_samples_per_shard(samples_per_shard)
def set_persist_state(self, persist_state):
self.persist_state = persist_state
self.shard_sampler.set_persist_state(persist_state)
def get_shard(self, b):
return self.shard_sampler.get(random_state=self.random_batch[b])
def iter_queue(self, b):
live_shard = self.get_shard(b)
while True:
if live_shard.is_done():
live_shard = self.get_shard(b)
yield live_shard.get()
def manage_queues(self):
queues = [self.iter_queue(b) for b in range(self.batch_size)]
while True:
yield [next(q) for q in queues]
def manage_queues_multiproc(self, queue_indices=None, output_queue=None):
assert output_queue is not None
if queue_indices is None:
queue_indices = list(range(self.batch_size))
queues = [self.iter_queue(b) for b in queue_indices]
while True:
output_queue.put([next(q) for q in queues], block=True)
def __iter__(self):
return self.manage_queues()
def __len__(self):
if self.drop_last:
return len(self.shard_sampler) // self.batch_size
else:
return (len(self.shard_sampler) + self.batch_size - 1) // self.batch_size
class DistributedBatchShardSampler(BatchShardSampler):
"""
Coordinates random states so that shard sampling for distributed training can be coordinated
without any communication between distributed processes. This is possible since random numbers
are pseudo-deterministic, so if the random states of the global batch are known data loading
can be coordinated without communication with other processes.
Purpose: For use with distributed training of L2R modeling.
Arguments:
shard_sampler (RandomShardSampler): Shard sampler used to sample data shards.
local_batch_size (int): Local batch size to sample.
drop_last (boolean): Pretty much useless. Used to give a fake length.
local_random_batch (list): List of random states to use locally for this worker.
world_size (int): Number of workers in distributed training.
rank (int): Rank of this distributed worker.
batch (list): Batch of shard queues (a list that contains shards). Call `.get` and
`.isdone()` on `shard_queue[0]` to get next batch and check if shard is done.
"""
def __init__(self, shard_sampler, local_batch_size, drop_last, local_random_batch=None, world_size=1, rank=0):
self.global_batch_size = int(local_batch_size*world_size)
if local_random_batch is None:
local_random_batch = [np.random.RandomState(seed) for seed in np.random.randint(self.global_batch_size*999, size=self.global_batch_size)]
local_random_batch = local_random_batch[local_batch_size*rank:local_batch_size*(rank+1)]
super(DistributedBatchShardSampler, self).__init__(shard_sampler, local_batch_size, drop_last, local_random_batch)
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.global_batch_size
else:
return (len(self.sampler) + self.global_batch_size - 1) // self.global_batch_size | sentiment-discovery-master | data_utils/samplers.py |
import torch
from .weight_norm import WeightNorm
from .reparameterization import Reparameterization
def apply_weight_norm(module, name='', dim=0, hook_child=True):
"""
Applies weight normalization to a parameter in the given module.
If no parameter is provided, applies weight normalization to all
parameters in model (except 1-d vectors and scalars).
.. math::
\mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|}
Weight normalization is a reparameterization that decouples the magnitude
of a weight tensor from its direction. This replaces the parameter specified
by `name` (e.g. "weight") with two parameters: one specifying the magnitude
(e.g. "weight_g") and one specifying the direction (e.g. "weight_v").
Weight normalization is implemented via a hook that recomputes the weight
tensor from the magnitude and direction before every :meth:`~Module.forward`
call.
By default, with `dim=0`, the norm is computed independently per output
channel/plane. To compute a norm over the entire weight tensor, use
`dim=None`.
See https://arxiv.org/abs/1602.07868
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
dim (int, optional): dimension over which to compute the norm
hook_child (boolean, optional): adds reparameterization hook to direct parent of the
parameters. If False, it's added to `module` instead. Default: True
Returns:
The original module with the weight norm hook
Example::
>>> m = apply_weight_norm(nn.Linear(20, 40), name='weight')
Linear (20 -> 40)
>>> m.weight_g.size()
torch.Size([40, 1])
>>> m.weight_v.size()
torch.Size([40, 20])
"""
return apply_reparameterization(module, reparameterization=WeightNorm, hook_child=hook_child,
name=name, dim=dim)
def remove_weight_norm(module, name='', remove_all=False):
"""
Removes the weight normalization reparameterization of a parameter from a module.
If no parameter is supplied then all weight norm parameterizations are removed.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = apply_weight_norm(nn.Linear(20, 40))
>>> remove_weight_norm(m)
"""
return remove_reparameterization(module, reparameterization=WeightNorm,
name=name, remove_all=remove_all)
def get_module_and_name(module, name):
"""
recursively fetches (possible) child module and name of weight to be reparameterized
"""
module2use = module
if name == '':
return module, name
names = name.split('.')
for i, n in enumerate(names):
param_or_module = getattr(module2use, n)
if i == len(names)-1:
if isinstance(param_or_module, torch.nn.Parameter):
return module2use, n
else:
return param_or_module, ''
else:
module2use = param_or_module
def apply_reparameterization(module, reparameterization=None, name='', dim=0, hook_child=True):
"""
Applies a given weight reparameterization (such as weight normalization) to
a parameter in the given module. If no parameter is given, applies the reparameterization
to all parameters in model (except 1-d vectors and scalars).
Args:
module (nn.Module): containing module
reparameterization (Reparameterization): reparamaterization class to apply
name (str, optional): name of weight parameter
dim (int, optional): dimension over which to perform reparameterization op
hook_child (boolean, optional): adds reparameterization hook to direct parent of the
parameters. If False, it's added to `module` instead. Default: True
Returns:
The original module with the reparameterization hook
Example::
>>> m = apply_reparameterization(nn.Linear(20, 40), WeightNorm)
Linear (20 -> 40)
"""
assert reparameterization is not None
module2use, name2use = get_module_and_name(module, name)
if name2use != '':
Reparameterization.apply(module, name, dim, reparameterization, hook_child)
else:
names = [n for n,_ in module2use.named_parameters()]
if name2use != '':
names = [name2use+'.'+n for n in names]
if name != '':
names = [name+'.'+n for n in names]
for name in names:
apply_reparameterization(module, reparameterization, name, dim, hook_child)
return module
def remove_reparameterization(module, reparameterization=Reparameterization,
name='', remove_all=False):
"""
Removes the given reparameterization of a parameter from a module.
If no parameter is supplied then all reparameterizations are removed.
Args:
module (nn.Module): containing module
reparameterization (Reparameterization): reparamaterization class to apply
name (str, optional): name of weight parameter
remove_all (bool, optional): if True, remove all reparamaterizations of given type. Default: False
Example:
>>> m = apply_reparameterization(nn.Linear(20, 40),WeightNorm)
>>> remove_reparameterization(m)
"""
if name != '' or remove_all:
to_remove = []
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, reparameterization) and (hook.name == name or remove_all):
hook.remove(module)
to_remove.append(k)
if len(to_remove) > 0:
for k in to_remove:
del module._forward_pre_hooks[k]
return module
if not remove_all:
raise ValueError("reparameterization of '{}' not found in {}"
.format(name, module))
else:
modules = [module]+[x for x in module.modules()]
for m in modules:
remove_reparameterization(m, reparameterization=reparameterization, remove_all=True)
return module
| sentiment-discovery-master | reparameterization/__init__.py |
import torch
from torch.nn.parameter import Parameter
#from ..utils import FusedNorm
import time
from .reparameterization import Reparameterization
def _norm(p, dim):
"""Computes the norm over all dimensions except dim"""
if dim is None:
return p.norm()
elif dim == 0:
output_size = (p.size(0),) + (1,) * (p.dim() - 1)
return p.contiguous().view(p.size(0), -1).norm(dim=1).view(*output_size)
elif dim == p.dim() - 1:
output_size = (1,) * (p.dim() - 1) + (p.size(-1),)
return p.contiguous().view(-1, p.size(-1)).norm(dim=0).view(*output_size)
return _norm(p.transpose(0, dim), 0).transpose(0, dim)
HALF_TYPES = (torch.cuda.HalfTensor, torch.HalfTensor)
class WeightNorm(Reparameterization):
"""
Weight normalization is a reparameterization that decouples the magnitude
of a weight tensor from its direction. This replaces the parameter specified
by `name` (e.g. "weight") with two parameters: one specifying the magnitude
(e.g. "weight_g") and one specifying the direction (e.g. "weight_v").
Weight normalization is implemented via a hook that recomputes the weight
tensor from the magnitude and direction before every :meth:`~Module.forward`
call.
.. math::
\mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|}
By default, with `dim=0`, the norm is computed independently per output
channel/plane. To compute a norm over the entire weight tensor, use
`dim=None`.
"""
def compute_weight(self, module=None, name=None):
"""
Computes weight normalized weight value to assign value to module attribute
with name `name`.
Arguments:
module (nn.Module): module with weight we'd like to reparameterize
Returns:
w (Tensor): Tensor object containing value of reparameterized weight
"""
if module is None:
module = self.module
if name is None:
name = self.name
module, name = Reparameterization.get_module_and_name(module, name)
g = getattr(module, name + '_g')
v = getattr(module, name + '_v')
w = (v * (g / _norm(v, self.dim)))
#fused_norm = FusedNorm.apply
#v = v.contiguous()
#w = g*fused_norm(v)
return w
def reparameterize(self, name, weight, dim):
"""
Creates Parameters v and gto be used for weight normalization
and creates names that for attributes for the module these Parameters
will correspond to. The parameters will be registered according to the names
provided.
Arguments:
module (nn.Module): module with weight we'd like to reparameterize
name (str, optional): name of weight parameter
dim (int, optional): dimension over which to compute parameterization
Returns:
names (list, str): names of Parameters to be used for reparameterization
params (list, Parameter): Parameters to be used for reparameterization
"""
names = [name + '_g', name + '_v']
params = [Parameter(_norm(weight, dim).data), Parameter(weight.data)]
return names, params
| sentiment-discovery-master | reparameterization/weight_norm.py |
import torch
from torch.nn.parameter import Parameter
import sys
class Reparameterization(object):
"""
Class interface for performing weight reparameterizations
Arguments:
name (str): name of weight parameter
dim (int): dimension over which to compute the norm
module (nn.Module): parent module to which param `name` is registered to
retain_forward (bool, optional): if False deletes weight on call to
module.backward. Used to avoid memory leaks with DataParallel Default: True
Attributes:
reparameterization_names (list, str): contains names of all parameters
needed to compute reparameterization.
backward_hook_key (int): torch.utils.hooks.RemovableHandle.id for hook used in module backward pass.
"""
def __init__(self, name, dim, module, retain_forward=True):
self.name = name
self.dim = dim
self.evaluated = False
self.retain_forward = retain_forward
self.reparameterization_names = []
self.backward_hook_key = None
self.module = module
def compute_weight(self, module=None, name=None):
"""
Computes reparameterized weight value to assign value to module attribute
with name `name`.
See WeightNorm class for example.
Arguments:
module (nn.Module): module with weight we'd like to reparameterize
Returns:
w (Tensor): Tensor object containing value of reparameterized weight
"""
raise NotImplementedError
def reparameterize(self, name, weight, dim):
"""
Creates Parameters to be used for reparameterization and creates names that
for attributes for the module these Parameters will correspond to.
The parameters will be registered according to the names provided.
See WeightNorm class for example.
Arguments:
module (nn.Module): module with weight we'd like to reparameterize
name (str, optional): name of weight parameter
dim (int, optional): dimension over which to compute parameterization
Returns:
names (list, str): names of Parameters to be used for reparameterization
params (list, Parameter): Parameters to be used for reparameterization
"""
raise NotImplementedError
@staticmethod
def apply(module, name, dim, reparameterization=None, hook_child=True):
"""
Applies reparametrization to module's `name` parameter and modifies instance attributes as appropriate.
`hook_child` adds reparameterization hook to direct parent of the parameters. If False, it's added to `module` instead.
"""
if reparameterization is None:
reparameterization = Reparameterization
module2use, name2use = Reparameterization.get_module_and_name(module, name)
# does not work on sparse
if name2use is None or isinstance(module2use, (torch.nn.Embedding, torch.nn.EmbeddingBag)):
return
if hook_child:
fn = reparameterization(name2use, dim, module2use)
else:
fn = reparameterization(name, dim, module)
weight = getattr(module2use, name2use)
if weight.dim() <= 1:
return
# remove weight from parameter list
del module2use._parameters[name2use]
# add parameters of reparameterization of parameter to module
names, params = fn.reparameterize(name2use, weight, dim)
for n, p in zip(names, params):
module2use.register_parameter(n, p)
# add parameters to reparameterization so they can be removed later
fn.reparameterization_names = names
setattr(module2use, name2use, None)
hook_module = module2use
if not hook_child:
hook_module = module
# recompute weight before every forward()
hook_module.register_forward_pre_hook(fn)
# remove weight during backward
handle = hook_module.register_backward_hook(fn.backward_hook)
# get hook key so we can delete it later
fn.backward_hook_key = handle.id
return fn
@staticmethod
def get_module_and_name(module, name):
"""
recursively fetches (possible) child module and name of weight to be reparameterized
"""
name2use = None
module2use = None
names = name.split('.')
if len(names) == 1 and names[0] != '':
name2use = names[0]
module2use = module
elif len(names) > 1:
module2use = module
name2use = names[0]
for i in range(len(names)-1):
module2use = getattr(module2use, name2use)
name2use = names[i+1]
return module2use, name2use
def get_params(self, module):
"""gets params of reparameterization based on known attribute names"""
return [getattr(module, n) for n in self.reparameterization_names]
def remove(self, module):
"""removes reparameterization and backward hook (does not remove forward hook)"""
module2use, name2use = Reparameterization.get_module_and_name(module, self.name)
for p in self.get_params(module2use):
p.requires_grad = False
weight = self.compute_weight(module2use, name2use)
delattr(module2use, name2use)
for n in self.reparameterization_names:
del module2use._parameters[n]
module2use.register_parameter(name2use, Parameter(weight.data))
del module._backward_hooks[self.backward_hook_key]
def __call__(self, module, inputs):
"""callable hook for forward pass"""
module2use, name2use = Reparameterization.get_module_and_name(module, self.name)
_w = getattr(module2use, name2use)
if not self.evaluated or _w is None:
if _w is not None:
delattr(module2use, name2use)
w = self.compute_weight(module2use, name2use)
setattr(module2use, name2use, w)
self.evaluated = True
def backward_hook(self, module, grad_input, grad_output):
"""callable hook for backward pass"""
module2use, name2use = Reparameterization.get_module_and_name(module, self.name)
wn = getattr(module2use, name2use)
self.evaluated = False
| sentiment-discovery-master | reparameterization/reparameterization.py |
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Copyright (c) 2016, Facebook, inc (Adam Paszke). All rights reserved.
###############################################################################
'''
Code adapted from https://github.com/pytorch/pytorch/blob/master/torch/utils/checkpoint.py
Introduced rng management in order to get correct results with random layers (eg. dropout)
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import warnings
def detach_variable(inputs):
if isinstance(inputs, tuple):
out = []
for inp in inputs:
x = inp.detach()
x.requires_grad = inp.requires_grad
out.append(x)
return tuple(out)
else:
raise RuntimeError(
"Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__)
def check_backward_validity(inputs):
if not any(inp.requires_grad for inp in inputs):
warnings.warn("None of the inputs have requires_grad=True. Gradients will be None")
class CheckpointFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, run_function, *args):
check_backward_validity(args)
ctx.run_function = run_function
ctx.random_state = torch.cuda.get_rng_state()
ctx.save_for_backward(*args)
with torch.no_grad():
outputs = run_function(*args)
return outputs
@staticmethod
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), please use .backward() if possible")
inputs = ctx.saved_tensors
detached_inputs = detach_variable(inputs)
backward_rng = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(ctx.random_state)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
torch.cuda.set_rng_state(backward_rng)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
torch.autograd.backward(outputs, args)
return (None,) + tuple(inp.grad for inp in detached_inputs)
def checkpoint(function, *args):
r"""Checkpoint a model or part of the model
Checkpointing works by trading compute for memory. Rather than storing all
intermediate activations of the entire computation graph for computing
backward, the checkpointed part does **not** save intermediate activations,
and instead recomputes them in backward pass. It can be applied on any part
of a model.
Specifically, in the forward pass, :attr:`function` will run in
:func:`torch.no_grad` manner, i.e., not storing the intermediate
activations. Instead, the forward pass saves the inputs tuple and the
:attr:`function` parameter. In the backwards pass, the saved inputs and
:attr:`function` is retreived, and the forward pass is computed on
:attr:`function` again, now tracking the intermediate activations, and then
the gradients are calculated using these activation values.
.. warning::
Checkpointing doesn't work with :func:`torch.autograd.grad`, but only
with :func:`torch.autograd.backward`.
.. warning::
If :attr:`function` invocation during backward does anything different
than the one during forward, e.g., due to some global variable, the
checkpointed version won't be equivalent, and unfortunately it can't be
detected.
.. warning:
At least one of the inputs needs to have :code:`requires_grad=True` if
grads are needed for model inputs, otherwise the checkpointed part of the
model won't have gradients.
Args:
function: describes what to run in the forward pass of the model or
part of the model. It should also know how to handle the inputs
passed as the tuple. For example, in LSTM, if user passes
``(activation, hidden)``, :attr:`function` should correctly use the
first input as ``activation`` and the second input as ``hidden``
args: tuple containing inputs to the :attr:`function`
Returns:
Output of running :attr:`function` on :attr:`*args`
"""
return CheckpointFunction.apply(function, *args)
def checkpoint_sequential(functions, segments, *inputs):
r"""A helper function for checkpointing sequential models.
Sequential models execute a list of modules/functions in order
(sequentially). Therefore, we can divide such a model in various segments
and checkpoint each segment. All segments except the last will run in
:func:`torch.no_grad` manner, i.e., not storing the intermediate
activations. The inputs of each checkpointed segment will be saved for
re-running the segment in the backward pass.
See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works.
.. warning::
Checkpointing doesn't work with :func:`torch.autograd.grad`, but only
with :func:`torch.autograd.backward`.
.. warning:
At least one of the inputs needs to have :code:`requires_grad=True` if
grads are needed for model inputs, otherwise the checkpointed part of the
model won't have gradients.
Args:
functions: A :class:`torch.nn.Sequential` or the list of modules or
functions (comprising the model) to run sequentially.
segments: Number of chunks to create in the model
inputs: tuple of Tensors that are inputs to :attr:`functions`
Returns:
Output of running :attr:`functions` sequentially on :attr:`*inputs`
Example:
>>> model = nn.Sequential(...)
>>> input_var = checkpoint_sequential(model, chunks, input_var)
"""
def run_function(start, end, functions):
def forward(*inputs):
input = inputs[0]
for j in range(start, end + 1):
input = functions[j](input)
return input
return forward
if isinstance(functions, torch.nn.Sequential):
functions = list(functions.children())
segment_size = len(functions) // segments
# the last chunk has to be non-volatile
end = -1
for start in range(0, segment_size * (segments - 1), segment_size):
end = start + segment_size - 1
inputs = checkpoint(run_function(start, end, functions), *inputs)
if not isinstance(inputs, tuple):
inputs = (inputs,)
return run_function(end + 1, len(functions) - 1, functions)(*inputs) | sentiment-discovery-master | model/checkpoint.py |
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Copyright (c) 2017, Facebook, inc. All rights reserved.
###############################################################################
'''
Code adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/transformer.py
Introduced optimal gradient checkpointing for intermediate layers in ./transformer.py
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., bias=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim**-0.5
self._mask = None
self.in_proj_weight = nn.Parameter(torch.Tensor(3*embed_dim, embed_dim))
if bias:
self.in_proj_bias = nn.Parameter(torch.Tensor(3*embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj.bias, 0.)
def forward(self, query, key, value, mask_future_timesteps=False,
key_padding_mask=None, incremental_state=None,
need_weights=True, static_kv=False):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert kv_same and not qkv_same
key = value = None
else:
saved_state = None
if qkv_same:
# self-attention
q, k, v = self.in_proj_qkv(query)
elif kv_same:
# encoder-decoder attention
q = self.in_proj_q(query)
if key is None:
assert value is None
# this will allow us to concat it with previous value and get
# just get the previous value
k = v = q.new(0)
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if saved_state is not None:
if 'prev_key' in saved_state:
k = torch.cat((saved_state['prev_key'], k), dim=0)
if 'prev_value' in saved_state:
v = torch.cat((saved_state['prev_value'], v), dim=0)
saved_state['prev_key'] = k
saved_state['prev_value'] = v
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(0)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
q = q.contiguous().view(tgt_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)
k = k.contiguous().view(src_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)
v = v.contiguous().view(src_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
# only apply masking at training time (when incremental state is None)
if mask_future_timesteps and incremental_state is None:
assert query.size() == key.size(), \
'mask_future_timesteps only applies to self-attention'
attn_weights += self.buffered_mask(attn_weights).unsqueeze(0)
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.float().masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
).type_as(attn_weights) # FP16 support: cast to float and back
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(attn_weights)
attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
# average attention weights over heads
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
else:
attn_weights = None
return attn, attn_weights
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2*self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2*self.embed_dim)
def _in_proj(self, input, start=None, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
if end is not None:
weight = weight[:end, :]
if bias is not None:
bias = bias[:end]
if start is not None:
weight = weight[start:, :]
if bias is not None:
bias = bias[start:]
return F.linear(input.type_as(weight), weight, bias)
def buffered_mask(self, tensor):
attn = self.out_proj(attn)
if need_weights:
# average attention weights over heads
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
else:
attn_weights = None
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2*self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2*self.embed_dim)
def _in_proj(self, input, start=None, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
if end is not None:
weight = weight[:end, :]
if bias is not None:
bias = bias[:end]
if start is not None:
weight = weight[start:, :]
if bias is not None:
bias = bias[start:]
return F.linear(input.type_as(weight), weight, bias)
def buffered_mask(self, tensor):
dim = tensor.size(-1)
if self._mask is None:
self._mask = torch.triu(fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._mask.size(0) < dim:
self._mask = torch.triu(fill_with_neg_inf(self._mask.resize_(dim, dim)), 1)
return self._mask[:dim, :dim]
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return get_incremental_state(
self,
incremental_state,
'attn_state',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
set_incremental_state(
self,
incremental_state,
'attn_state',
buffer,
)
class LearnedPositionalEmbedding(nn.Embedding):
"""This module learns positional embeddings up to a fixed maximum size.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
def __init__(self, num_embeddings, embedding_dim, padding_idx, left_pad):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.left_pad = left_pad
def forward(self, input, incremental_state=None):
"""Input is expected to be of size [bsz x seqlen]."""
if incremental_state is not None:
# positions is the same for every token when decoding a single step
positions = input.data.new(1, 1).fill_(self.padding_idx + input.size(1))
else:
positions = make_positions(input.data, self.padding_idx, self.left_pad)
return super().forward(positions)
def max_positions(self):
"""Maximum number of supported positions."""
return self.num_embeddings - self.padding_idx - 1
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
def __init__(self, embedding_dim, padding_idx, left_pad, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.left_pad = left_pad
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.register_buffer('_float_tensor', torch.FloatTensor())
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(self, input, incremental_state=None):
"""Input is expected to be of size [bsz x seqlen]."""
# recompute/expand embeddings if needed
bsz, seq_len = input.size()
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.type_as(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
return self.weights[self.padding_idx + seq_len, :].expand(bsz, 1, -1)
positions = make_positions(input.data, self.padding_idx, self.left_pad)
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
class GeLU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(0.79788456080 * (x + 0.044715 * x * x * x)))
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
return m
def LayerNorm(embedding_dim):
m = nn.LayerNorm(embedding_dim)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.)
return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad, learned=False):
if learned:
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(embedding_dim, padding_idx, left_pad, num_embeddings)
return m
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
def _get_full_incremental_state_key(module_instance, key):
module_name = module_instance.__class__.__name__
# assign a unique ID to each module instance, so that incremental state is
# not shared across module instances
if not hasattr(module_instance, '_fairseq_instance_id'):
INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
module_instance._fairseq_instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
return '{}.{}.{}'.format(module_name, module_instance._fairseq_instance_id, key)
def get_incremental_state(module, incremental_state, key):
"""Helper for getting incremental state for an nn.Module."""
full_key = _get_full_incremental_state_key(module, key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(module, incremental_state, key, value):
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = _get_full_incremental_state_key(module, key)
incremental_state[full_key] = value
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float('-inf')).type_as(t)
def make_positions(tensor, padding_idx, left_pad):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
max_pos = padding_idx + 1 + tensor.size(1)
if not hasattr(make_positions, 'range_buf'):
make_positions.range_buf = tensor.new()
make_positions.range_buf = make_positions.range_buf.type_as(tensor)
if make_positions.range_buf.numel() < max_pos:
torch.arange(padding_idx + 1, max_pos, out=make_positions.range_buf)
mask = tensor.ne(padding_idx)
positions = make_positions.range_buf[:tensor.size(1)].expand_as(tensor)
if left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
return tensor.clone().masked_scatter_(mask, positions[mask])
| sentiment-discovery-master | model/transformer_utils.py |
from .distributed import *
from .model import *
from .sentiment_classifier import *
from .transformer import *
from .transformer_utils import * | sentiment-discovery-master | model/__init__.py |
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from .RNN_utils import RNN
from .transformer_utils import Embedding
from .transformer import TransformerDecoder
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.decoder = nn.Linear(nhid, ntoken)
self.rnn=getattr(RNN, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.decoder.bias.data.fill_(0)
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def forward(self, input, reset_mask=None, chkpt_grad=False, **kwargs):
emb = self.drop(self.encoder(input))
self.rnn.detach_hidden()
output, hidden = self.rnn(emb, reset_mask=reset_mask)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def state_dict(self, destination=None, prefix='', keep_vars=False):
sd = {}
sd['encoder'] = self.encoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd['rnn'] = self.rnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd = {'encoder': sd}
sd['decoder'] = self.decoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
return sd
def load_state_dict(self, state_dict, strict=True):
if 'decoder' in state_dict:
self.decoder.load_state_dict(state_dict['decoder'], strict=strict)
self.encoder.load_state_dict(state_dict['encoder']['encoder'], strict=strict)
self.rnn.load_state_dict(state_dict['encoder']['rnn'], strict=strict)
class RNNFeaturizer(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, all_layers=False, concat_pools=[False] * 3, hidden_warmup=False, residuals=False, get_lm_out=False):
super(RNNFeaturizer, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.rnn=getattr(RNN, rnn_type)(ninp, nhid, nlayers, dropout=dropout)#, residuals=residuals)
# self.rnn=getattr(RNN, rnn_type)(ninp, nhid, nlayers, dropout=dropout, residuals=residuals)
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
self.all_layers = all_layers
self.hidden_warmup = hidden_warmup
self.aux_lm_loss = get_lm_out
if self.aux_lm_loss:
self.decoder = nn.Linear(nhid, ntoken)
self.concat_max, self.concat_min, self.concat_mean = concat_pools
self.output_size = self.nhid if not self.all_layers else self.nhid * self.nlayers
self.output_size *= (1 + sum(concat_pools))
def forward(self, input, seq_len=None, get_hidden=False, chkpt_grad=False, **kwargs):
if not self.hidden_warmup:
self.rnn.reset_hidden(input.size(1))
if self.aux_lm_loss:
outs = []
if seq_len is None:
for i in range(input.size(0)):
emb = self.drop(self.encoder(input[i]))
out, hidden = self.rnn(emb.unsqueeze(0), collect_hidden=True, chkpt_grad=chkpt_grad)
if self.aux_lm_loss:
outs.append(out)
cell = self.get_features(hidden)
if self.concat_pools:
cell = torch.cat((cell, torch.mean(cell, -1), torch.max(cell, -1)))
if get_hidden:
cell = (self.get_features(hidden, get_hidden=True), cell)
else:
last_cell = last_hidden = 0
ops = ['max', 'min', 'add']
maps = {
k : {'last_c' : 0, 'last_h' : 0, 'c' : None, 'h' : None, 'op' : ops[i]}
for i, k in enumerate(['concat_max', 'concat_min', 'concat_mean']) if getattr(self, k)
}
full_emb = self.drop(self.encoder(input))
for i in range(input.size(0)):
emb = full_emb[i]
out, hidden = self.rnn(emb.unsqueeze(0), collect_hidden=True)
if self.aux_lm_loss:
outs.append(out)
# print(hidden) -> [[[tensor(...)]], [[tensor(...)]]]
# print(hidden[0][0][0].size()) -> torch.Size([128, 4096])
cell = self.get_features(hidden)
if i == 0: # instantiate pools for cell
for k, d in maps.items():
d['c'] = cell
if get_hidden:
hidden = self.get_features(hidden, get_hidden=True)
if i == 0: # instantiate pools for hidden
for k, d in maps.items():
d['h'] = hidden
if i > 0:
cell = get_valid_outs(i, seq_len, cell, last_cell)
for k, d in maps.items():
d['c'] = getattr(torch, d['op'])(d['c'], cell)
d['c'] = get_valid_outs(i, seq_len, d['c'], d['last_c'])
if get_hidden:
for k, d in maps.items():
d['h'] = getattr(torch, d['op'])(d['h'], hidden)
d['h'] = get_valid_outs(i, seq_len, d['h'], d['last_h'])
hidden = get_valid_outs(i, seq_len, hidden, last_hidden)
last_cell = cell
for k, d in maps.items():
d['last_c'] = d['c']
if get_hidden:
last_hidden = hidden
for k, d in maps.items():
d['last_h'] = d['h']
# print("Cell dimensions: ", cell.size()) -> torch.Size([128, 4096])
seq_len = seq_len.view(-1, 1).float()
if self.concat_mean:
maps['concat_mean']['c'] /= seq_len
if get_hidden:
maps['concat_mean']['h'] /= seq_len
for k, d in maps.items():
cell = torch.cat([cell, d['c']], -1)
if get_hidden:
hidden = torch.cat([hidden, d['h']], -1)
if get_hidden:
cell = (hidden, cell)
if self.aux_lm_loss:
return cell, self.decoder(torch.cat(outs, 0))
else:
return cell, None
def get_features(self, hidden, get_hidden=False):
if not get_hidden:
cell = hidden[1]
else:
cell = hidden[0]
#get cell state from layers
cell = cell[0]
if self.all_layers:
return torch.cat(cell, -1)
else:
return cell[-1]
def state_dict(self, destination=None, prefix='', keep_vars=False):
sd = {}
sd['encoder'] = self.encoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd['rnn'] = self.rnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd = {'encoder': sd}
if self.aux_lm_loss:
sd['decoder'] = self.decoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
return sd
def load_state_dict(self, state_dict, strict=True):
self.encoder.load_state_dict(state_dict['encoder']['encoder'], strict=strict)
self.rnn.load_state_dict(state_dict['encoder']['rnn'], strict=strict)
if self.aux_lm_loss:
self.decoder.load_state_dict(state_dict['decoder'], strict=strict)
def get_valid_outs(timestep, seq_len, out, last_out):
invalid_steps = timestep >= seq_len
if (invalid_steps.long().sum() == 0):
return out
return selector_circuit(out, last_out, invalid_steps)
def selector_circuit(val0, val1, selections):
selections = selections.type_as(val0.data).view(-1, 1).contiguous()
return (val0*(1-selections)) + (val1*selections)
class TransformerDecoderModel(nn.Module):
"""Base class for encoder-decoder models."""
def __init__(self, args):
super().__init__()
self._is_generation_fast = False
self.encoder = TransformerDecoder(args, Embedding(args.data_size, args.decoder_embed_dim, padding_idx=args.padding_idx))
def forward(self, src_tokens, get_attention=True, chkpt_grad=False, **kwargs):
decoder_out, attn = self.encoder(src_tokens, src_tokens, chkpt_grad=chkpt_grad)
if get_attention:
return decoder_out, attn
return decoder_out
def max_positions(self):
"""Maximum length supported by the model."""
return self.encoder.max_positions()
def get_targets(self, sample, net_output):
"""Get targets from either the sample or the net's output."""
return sample['target']
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.encoder.get_normalized_probs(net_output, log_probs, sample)
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.encoder.max_positions()
def load_state_dict(self, state_dict, strict=True):
"""Copies parameters and buffers from state_dict into this module and
its descendants.
Overrides the method in nn.Module; compared with that method this
additionally "upgrades" state_dicts from old checkpoints.
"""
state_dict = self.upgrade_state_dict(state_dict)
super().load_state_dict(state_dict, strict)
def upgrade_state_dict(self, state_dict):
assert state_dict is not None
def do_upgrade(m):
if m != self and hasattr(m, 'upgrade_state_dict'):
m.upgrade_state_dict(state_dict)
self.apply(do_upgrade)
sd = {}
for k,v in state_dict.items():
if k.startswith('decoder'):
k = k.replace('decoder', 'encoder')
sd[k] = v
return sd
def make_generation_fast_(self, **kwargs):
"""Optimize model for faster generation."""
if self._is_generation_fast:
return # only apply once
self._is_generation_fast = True
# remove weight norm from all modules in the network
def apply_remove_weight_norm(module):
try:
nn.utils.remove_weight_norm(module)
except ValueError: # this module didn't have weight norm
return
self.apply(apply_remove_weight_norm)
def apply_make_generation_fast_(module):
if module != self and hasattr(module, 'make_generation_fast_'):
module.make_generation_fast_(**kwargs)
self.apply(apply_make_generation_fast_)
def train(mode):
if mode:
raise RuntimeError('cannot train after make_generation_fast')
# this model should no longer be used for training
self.eval()
self.train = train
class TransformerFeaturizer(nn.Module):
def __init__(self, get_lm_out, args):
super(TransformerFeaturizer, self).__init__()
args.use_final_embed = True
self.encoder = TransformerDecoderModel(args)
self.aux_lm_loss = get_lm_out
def forward(self, input, seq_len=None, get_hidden=False, chkpt_grad=False, **kwargs):
encoder_out = self.encoder(input, get_attention=get_hidden, chkpt_grad=chkpt_grad, **kwargs)
if get_hidden:
encoder_out = encoder_out[0]
feats = encoder_out[seq_len.squeeze(), torch.arange(seq_len.size(0))]
if get_hidden:
feats = [feats, None]
lm_out = None
if self.aux_lm_loss:
lm_out = F.linear(encoder_out, self.encoder.encoder.embed_out)
return feats, lm_out
def state_dict(self, destination=None, prefix='', keep_vars=False):
return self.encoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
def load_state_dict(self, state_dict, strict=True):
return self.encoder.load_state_dict(state_dict, strict=strict)
| sentiment-discovery-master | model/model.py |
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from itertools import chain
from .model import RNNFeaturizer, TransformerFeaturizer
from .transformer_utils import GeLU
class BinaryClassifier(nn.Module):
def __init__(self, num_features=4096, **kwargs):
super().__init__()
self.nclasses = 2
self.dense0 = nn.Linear(num_features, 1)
self.neurons = None
self.thresholds = torch.tensor(np.array([.5])).float()
self.final = 1
self.device = -1
print('init BinaryClassifier with %d features' % num_features)
def cuda(self, device=None):
super(BinaryClassifier, self).cuda(device)
self.device = device
self.thresholds = self.thresholds.cuda(device)
def cpu(self):
super(BinaryClassifier, self).cpu()
self.device = -1
self.thresholds = self.thresholds.cpu()
def forward(self, X, **kwargs):
out = torch.sigmoid(self.linear(X)).float()
return threshold_predictions(out, self.thresholds)
#return F.sigmoid(self.linear(X), dim=-1).float()
def linear(self, X):
weight = self.dense0.weight
if self.neurons is not None:
#weight = weight[torch.arange(weight.size(0)).unsqueeze(1), self.neurons].contiguous()
weight = weight[:, self.neurons].contiguous()
if X.size(-1) == self.dense0.weight.size(-1):
X = X[:, self.neurons].contiguous()
torch.cuda.synchronize()
return F.linear(X, weight, self.dense0.bias)
def set_neurons(self, num_neurons=None):
if num_neurons is None:
self.neurons = None
return self.get_neurons()
neurons, values = self.get_neurons(num_neurons=num_neurons)
self.neurons = neurons
return neurons, values
def get_neurons(self, num_neurons=None):
if num_neurons is None:
return self.dense0.weight
values, neurons = torch.topk(self.dense0.weight.abs().float(), num_neurons, 1)
neurons = neurons[0]
values = self.dense0.weight[:, neurons]
return neurons, values
def get_thresholds(self):
return self.thresholds
def set_thresholds(self, thresholds, **kwargs):
if isinstance(thresholds, float):
thresholds = [thresholds]
if isinstance(thresholds, (list, tuple, np.ndarray, np.generic)):
thresholds = torch.tensor(thresholds).float()
if self.device == -1:
thresholds = thresholds.cpu()
else:
thresholds = thresholds.cuda(self.device)
self.thresholds = thresholds
return None
def state_dict(self, destination=None, prefix='', keep_vars=False):
sd = self.dense0.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd['neurons'] = self.neurons
sd['thresholds'] = self.get_thresholds()
return sd
def load_state_dict(self, state_dict, strict=True):
if 'neurons' in state_dict:
self.neurons = state_dict['neurons']
if 'thresholds' in state_dict:
self.set_thresholds(state_dict['thresholds'])
sd = {}
for k, v in state_dict.items():
if k != 'neurons' and k != 'thresholds':
sd[k] = v
self.dense0.load_state_dict(sd, strict=strict)
NONLINEARITY_MAP = {
'prelu': nn.PReLU,
'relu': nn.ReLU,
'elu': nn.ELU,
'selu': nn.SELU,
'leaky': nn.LeakyReLU,
'gelu': GeLU
}
class MultiLayerBinaryClassifier(nn.Module):
def __init__(self, input_layer_size, layer_sizes, dropout=0.1, init_dropout=True, heads_per_class=1,
nonlinearity='PReLU', softmax=False, double_threshold=False, dual_threshold=False, **kwargs):
super(MultiLayerBinaryClassifier, self).__init__()
self.heads_per_class = heads_per_class
self.nclasses = int(layer_sizes[-1])
self.thresholds = torch.tensor(np.array([.5]*self.nclasses)).float()
self.double_threshold = double_threshold
self.dual_threshold = dual_threshold
self.device = -1
if self.heads_per_class > 1:
print('Using multiple heads per class: %d' % heads_per_class)
layer_sizes = list(layer_sizes)
layer_sizes[-1] = int(layer_sizes[-1]) * heads_per_class
self.neurons = None
self.layer_sizes = [input_layer_size] + list(map(int, layer_sizes))
self.final = self.layer_sizes[-1]
self.dropout = dropout
assert nonlinearity.lower() in NONLINEARITY_MAP
self.nonlinearity = NONLINEARITY_MAP[nonlinearity.lower()]()
# layer_sizes are sizes of the input and hidden layers, so the final 1 is assumed.
layer_list = []
# Since we recieve input from the Transformer bottleneck... it may make sense to dropout on that input first
if init_dropout:
layer_list.extend([nn.Dropout(p=self.dropout)])
layer_list.extend(list(chain.from_iterable(
[[nn.Linear(self.layer_sizes[i], self.layer_sizes[i+1]), self.nonlinearity, nn.Dropout(p=self.dropout)] for i in range(len(self.layer_sizes) - 2)]
)))
self.final_layer = nn.Linear(*self.layer_sizes[-2:])
extend_list = [self.final_layer]
if not softmax:
extend_list += [nn.Sigmoid()]
layer_list.extend(extend_list)
self.model = nn.Sequential(*layer_list)
self.neurons = None
self.softmax = softmax
print('init MultiLayerBinaryClassifier with layers %s and dropout %s' % (self.layer_sizes[1:], self.dropout))
def forward(self, X, **kwargs):
out = self.model(X).float()
if self.heads_per_class <= 1:
if self.softmax:
clf_out = torch.max(out, -1, keepdim=True)[1]
else:
out, clf_out = threshold_predictions(out, self.thresholds, double_threshold=self.double_threshold,
dual_threshold=self.dual_threshold)
return out, clf_out
out = out.view(out.size(0), -1, self.heads_per_class)
probs = out
if self.softmax:
probs = F.softmax(probs, 1)
clf_mean = probs.mean(dim=2)
if self.softmax:
clf_out = torch.max(clf_mean, -1, keepdim=True)[1]
else:
clf_mean, clf_out = threshold_predictions(clf_mean, self.thresholds, double_threshold=self.double_threshold,
dual_threshold=self.dual_threshold)
clf_std = probs.std(dim=2)
return out, clf_mean, clf_std, clf_out
# HACK -- parameter to measure *variation* between last layer of the MLP.
# Why? To support multihead -- for the same category, where we want multiple heads to predict with different functions
# (similar to training a mixture of models) -- useful for uncertainty sampling
def get_last_layer_variance(self, eps=.00001):
final_layer_weight = self.final_layer.weight
fl_norm = torch.norm(final_layer_weight,2,1)
final_layer_weight = final_layer_weight * (1.0 / fl_norm).unsqueeze(1)
final_layer_dot = final_layer_weight @ torch.transpose(final_layer_weight, 0, 1)
# Compute matrix of all NxN layers -- in normalized form
final_layer_dot_loss = (torch.norm(final_layer_dot,2,1) - 1.)
final_layer_dot_loss = final_layer_dot_loss/(self.final_layer.weight.shape[0]+eps)
final_layer_dot_loss = torch.sum(final_layer_dot_loss)
# Return the average loss -- per dot comparison.
return final_layer_dot_loss
def cuda(self, device=None):
super(MultiLayerBinaryClassifier, self).cuda(device)
self.device = device
self.thresholds = self.thresholds.cuda(device)
def cpu(self):
super(MultiLayerBinaryClassifier, self).cpu()
self.device = -1
self.thresholds = self.thresholds.cpu()
def get_thresholds(self):
return self.thresholds
def set_thresholds(self, thresholds, double_threshold=False, dual_threshold=False, **kwargs):
self.dual_threshold = dual_threshold
self.double_threshold = double_threshold
if isinstance(thresholds, float):
thresholds = [thresholds]
if isinstance(thresholds, (list, tuple, np.ndarray, np.generic)):
thresholds = torch.tensor(thresholds).float()
if self.device == -1:
thresholds = thresholds.cpu()
else:
thresholds = thresholds.cuda(self.device)
self.thresholds = thresholds
return None
def state_dict(self, destination=None, prefix='', keep_vars=False):
sd = {}
sd['sd'] = super(MultiLayerBinaryClassifier, self).state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd['thresholds'] = self.get_thresholds()
sd['dual_threshold'] = self.dual_threshold
sd['double_threshold'] = self.double_threshold
return sd
def load_state_dict(self, state_dict, strict=True):
double_threshold = state_dict['double_threshold'] if 'double_threshold' in state_dict else False
dual_threshold = state_dict['dual_threshold'] if 'dual_threshold' in state_dict else False
self.set_thresholds(state_dict['thresholds'], double_threshold, dual_threshold)
return super(MultiLayerBinaryClassifier, self).load_state_dict(state_dict['sd'], strict=strict)
def get_neurons(self, *args, **kwargs):
return None
def set_neurons(self, *args, **kwargs):
return None
def threshold_predictions(predictions, thresholds, double_threshold=False, dual_threshold=False):
if double_threshold:
positive = (predictions > thresholds.max()).float()
neutral = ((1-positive) * (predictions > thresholds.min()).float())*.5
return predictions, (positive+neutral)
preds = (predictions > thresholds).float()
if dual_threshold:
positive = preds[:,0]
negative = preds[:,1]
equals = (positive==negative).float().view(-1,1)
preds = torch.cat([preds*(1-equals), equals.view(-1,1)], dim=-1)
predictions = torch.cat([predictions, XOR(predictions[:,0], predictions[:,1]).view(-1, 1)], dim=-1)
# print(preds, predictions)
# exit()
return predictions, preds
def XOR(A, B):
return A+B-(2*A*B)
class MultiHeadBCELoss(torch.nn.BCELoss):
def __init__(self, weight=None, size_average=None, reduce=None, reduction='elementwise_mean', heads_per_class=1):
super(MultiHeadBCELoss, self).__init__(weight=weight, size_average=size_average, reduce=reduce, reduction=reduction)
self.heads_per_class = heads_per_class
def forward(self, input, target):
input = input.permute(0, 2, 1)
target = target.unsqueeze(1).expand(-1, self.heads_per_class, -1)
return super(MultiHeadBCELoss, self).forward(input, target)
class MultiHeadCrossEntropyLoss(torch.nn.CrossEntropyLoss):
def __init__(self, weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='elementwise_mean'):
super(MultiHeadCrossEntropyLoss, self).__init__(weight=weight, size_average=size_average, reduce=reduce, reduction=reduction, ignore_index=ignore_index)
self.heads_per_class = heads_per_class
def forward(self, input, target):
input = input.permute(0, 2, 1)
target = target.unsqueeze(1).expand(-1, self.heads_per_class)
return super(MultiHeadCrossEntropyLoss, self).forward(input, target)
class SentimentClassifier(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, model_type, ntoken, ninp, nhid, nlayers, classifier_hidden_layers=None, dropout=0.5, all_layers=False, concat_pools=[False] * 3, get_lm_out=False, args=None):
super().__init__()
self.model_type = model_type
if model_type == 'transformer':
self.lm_encoder = TransformerFeaturizer(get_lm_out, args)
out_size = args.decoder_embed_dim
else:
# NOTE: Dropout is for Classifier. Add separate RNN dropout or via params, if needed.
self.lm_encoder = RNNFeaturizer(model_type, ntoken, ninp, nhid, nlayers, dropout=0.0, all_layers=all_layers,
concat_pools=concat_pools, get_lm_out=get_lm_out, hidden_warmup=args.num_hidden_warmup > 0)
out_size = self.lm_encoder.output_size
self.encoder_dim = out_size
if classifier_hidden_layers is None:
self.classifier = BinaryClassifier(num_features=self.encoder_dim, double_threshold=args.double_thresh, dual_threshold=args.dual_thresh)
else:
self.classifier = MultiLayerBinaryClassifier(self.encoder_dim, classifier_hidden_layers, dropout=dropout, heads_per_class=args.heads_per_class,
softmax=args.use_softmax, double_threshold=args.double_thresh, dual_threshold=args.dual_thresh and not args.joint_binary_train)
self.out_dim = self.classifier.final
self.nclasses = self.classifier.nclasses
self.neurons_ = None
self.thresholds = self.classifier.thresholds
# If we want to output multiple heads, and average the output
self.heads_per_class = args.heads_per_class
def cuda(self, device=None):
self.lm_encoder.cuda(device)
self.classifier.cuda(device)
return self
def cpu(self):
self.lm_encoder.cpu()
self.classifier.cpu()
return self
def forward(self, input, seq_len=None, get_hidden=False):
hidden, lm_out = self.lm_encoder(input, seq_len, get_hidden)
if get_hidden:
hidden = hidden[0]
if self.neurons is not None:
hidden = hidden[:, self.neurons].contiguous()
classifier_in = hidden
class_out = self.classifier(classifier_in)
return class_out, (lm_out, classifier_in)
def state_dict(self, destination=None, prefix='', keep_vars=False):
sd = {}
sd['lm_encoder'] = self.lm_encoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd['classifier'] = self.classifier.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
return sd
def load_state_dict(self, state_dict, strict=True):
self.lm_encoder.load_state_dict(state_dict['lm_encoder'], strict=strict)
self.classifier.load_state_dict(state_dict['classifier'], strict=strict)
self.neurons = self.classifier.neurons
self.thresholds = self.classifier.thresholds
def get_thresholds(self):
return self.classifier.get_thresholds()
def set_thresholds(self, thresholds, double_threshold=False, dual_threshold=False):
rtn = self.classifier.set_thresholds(thresholds, double_threshold=double_threshold,
dual_threshold=dual_threshold)
self.thresholds = self.classifier.thresholds
return rtn
def get_neurons(self, **kwargs):
return self.classifier.get_neurons(**kwargs)
def set_neurons(self, num_neurons=None):
rtn = self.classifier.set_neurons(num_neurons=num_neurons)
self.neurons_ = self.classifier.neurons
return rtn
@property
def neurons(self):
return self.neurons_
@neurons.setter
def neurons(self, val):
self.neurons_ = val
self.classifier.neurons = val
| sentiment-discovery-master | model/sentiment_classifier.py |
import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
import torch.distributed as dist
from torch.nn.modules import Module
from torch.autograd import Variable
class DistributedDataParallel(Module):
def __init__(self, module):
super(DistributedDataParallel, self).__init__()
self.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
self.module = module
for p in self.module.parameters():
if torch.is_tensor(p):
dist.broadcast(p, 0)
def allreduce_params():
if(self.needs_reduction):
self.needs_reduction = False
buckets = {}
for name, param in self.module.named_parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if self.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case.")
self.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
torch.cuda.synchronize()
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
self.hook_handles = []
self.hooks = []
for param in list(self.module.parameters()):
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
# handle = param.register_hook(allreduce_hook)
#self.hooks.append(allreduce_hook)
#self.hook_handles.append(handle)
self.allreduce_params = allreduce_params
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
def state_dict(self, destination=None, prefix='', keep_vars=False):
#[h.remove() for h in self.hook_handles]
sd = self.module.state_dict(destination, prefix, keep_vars)
# for handle, hook in zip(self.hook_handles, self.hooks):
# d = handle.hooks_dict_ref()
# d[handle.id] = hook
return sd
def load_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
'''
def _sync_buffers(self):
buffers = list(self.module._all_buffers())
if len(buffers) > 0:
# cross-node buffer sync
flat_buffers = _flatten_dense_tensors(buffers)
dist.broadcast(flat_buffers, 0)
for buf, synced in zip(buffers, _unflatten_dense_tensors(flat_buffers, buffers)):
buf.copy_(synced)
def train(self, mode=True):
# Clear NCCL communicator and CUDA event cache of the default group ID,
# These cache will be recreated at the later call. This is currently a
# work-around for a potential NCCL deadlock.
if dist._backend == dist.dist_backend.NCCL:
dist._clear_group_cache()
super(DistributedDataParallel, self).train(mode)
self.module.train(mode)
'''
| sentiment-discovery-master | model/distributed.py |
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Copyright (c) 2017, Facebook, inc. All rights reserved.
###############################################################################
'''
Code adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/transformer.py
Introduced optimal gradient checkpointing for intermediate layers
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .transformer_utils import *
import torch.utils.checkpoint as checkpoint
class TransformerModel(nn.Module):
"""Base class for encoder-decoder models."""
def __init__(self, encoder, decoder):
super().__init__()
self._is_generation_fast = False
self.encoder = encoder
self.decoder = decoder
def forward(self, src_tokens, get_attention=True, **kwargs):
encoder_out = self.encoder(src_tokens)
decoder_out, attn = self.decoder(src_tokens, encoder_out)
if get_attention:
return decoder_out, attn
return decoder_out
def max_positions(self):
"""Maximum length supported by the model."""
return (self.encoder.max_positions(), self.decoder.max_positions())
def get_targets(self, sample, net_output):
"""Get targets from either the sample or the net's output."""
return sample['target']
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.decoder.get_normalized_probs(net_output, log_probs, sample)
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder.max_positions()
def load_state_dict(self, state_dict, strict=True):
"""Copies parameters and buffers from state_dict into this module and
its descendants.
Overrides the method in nn.Module; compared with that method this
additionally "upgrades" state_dicts from old checkpoints.
"""
self.upgrade_state_dict(state_dict)
super().load_state_dict(state_dict, strict)
def upgrade_state_dict(self, state_dict):
assert state_dict is not None
def do_upgrade(m):
if m != self and hasattr(m, 'upgrade_state_dict'):
m.upgrade_state_dict(state_dict)
self.apply(do_upgrade)
def make_generation_fast_(self, **kwargs):
"""Optimize model for faster generation."""
if self._is_generation_fast:
return # only apply once
self._is_generation_fast = True
# remove weight norm from all modules in the network
def apply_remove_weight_norm(module):
try:
nn.utils.remove_weight_norm(module)
except ValueError: # this module didn't have weight norm
return
self.apply(apply_remove_weight_norm)
def apply_make_generation_fast_(module):
if module != self and hasattr(module, 'make_generation_fast_'):
module.make_generation_fast_(**kwargs)
self.apply(apply_make_generation_fast_)
def train(mode):
if mode:
raise RuntimeError('cannot train after make_generation_fast')
# this model should no longer be used for training
self.eval()
self.train = train
class DecoderPreprocessor(nn.Module):
def __init__(self, args, embed_tokens, left_pad=True):
super().__init__()
def forward(self, src_tokens):
return {
'encoder_out': src_tokens, # T x B x C
'encoder_padding_mask': None, # B x T
}
class TransformerEncoder(nn.Module):
"""Transformer encoder."""
def __init__(self, args, embed_tokens, left_pad=False):
super().__init__()
self.dropout = args.dropout
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
256, embed_dim, self.padding_idx,
left_pad=left_pad,
learned=args.encoder_learned_pos,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
def forward(self, src_tokens, **kwargs):
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
#x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out_dict, new_order):
if encoder_out_dict['encoder_out'] is not None:
encoder_out_dict['encoder_out'] = \
encoder_out_dict['encoder_out'].index_select(1, new_order)
if encoder_out_dict['encoder_padding_mask'] is not None:
encoder_out_dict['encoder_padding_mask'] = \
encoder_out_dict['encoder_padding_mask'].index_select(0, new_order)
return encoder_out_dict
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.embed_positions.max_positions()
def upgrade_state_dict(self, state_dict):
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
if 'encoder.embed_positions.weights' in state_dict:
del state_dict['encoder.embed_positions.weights']
if 'encoder.embed_positions._float_tensor' not in state_dict:
state_dict['encoder.embed_positions._float_tensor'] = torch.FloatTensor()
return state_dict
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: dropout -> add residual -> layernorm.
In the tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
dropout -> add residual.
We default to the approach in the paper, but the tensor2tensor approach can
be enabled by setting `normalize_before=True`.
"""
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout,
)
self.dropout = args.dropout
self.relu_dropout = args.relu_dropout
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.layer_norms = nn.ModuleList([LayerNorm(self.embed_dim) for i in range(2)])
def forward(self, x, encoder_padding_mask):
residual = x
x = self.maybe_layer_norm(0, x, before=True)
x, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(0, x, after=True)
residual = x
x = self.maybe_layer_norm(1, x, before=True)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(1, x, after=True)
return x
def maybe_layer_norm(self, i, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return self.layer_norms[i](x)
else:
return x
class TransformerDecoder(nn.Module):
"""Transformer decoder."""
def __init__(self, args, embed_tokens, left_pad=False):
super().__init__()
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
num_tokens = embed_tokens.num_embeddings
embed_dim = embed_tokens.embedding_dim
padding_idx = embed_tokens.padding_idx
if hasattr(args, 'mos') and (args.mos or args.mos_reduce_dim is not None):
assert not args.use_final_embed
self.mos_layer = MixtureOfSoftmax(
input_size=embed_dim, output_size=num_tokens, reduce_dim_size=args.mos_reduce_dim,
num_experts=args.mos_num_experts, dropout=0.1, dropoutl=0.1
)
self.use_final_embed = args.use_final_embed
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
256, embed_dim, padding_idx,
left_pad=left_pad,
learned=args.decoder_learned_pos,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayer(args)
for i in range(args.decoder_layers)
])
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(num_tokens, embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=embed_dim ** -0.5)
def forward(self, prev_output_tokens, encoder_out, incremental_state=None, chkpt_grad=False, **kwargs):
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
#x = x.transpose(0, 1)
def custom(start, end):
def custom_forward(*inputs):
layers = self.layers[start:end]
x_ = inputs[0]
for layer in layers:
x_, attn = layer(x_, None, None, None)
return x_
return custom_forward
if self.training and chkpt_grad:
l = 0
num_layers = len(self.layers)
chunk_length = math.ceil(math.sqrt(num_layers))
while l < num_layers:
x = checkpoint.checkpoint(custom(l, l+chunk_length), x)
l += chunk_length
attn = None
# decoder layers
else:
for layer in self.layers:
x, attn = layer(x, None, None, None)
# T x B x C -> B x T x C
#x = x.transpose(0, 1)
# project back to size of vocabulary
if self.share_input_output_embed:
x = F.linear(x, self.embed_tokens.weight)
elif not self.use_final_embed:
if hasattr(self, 'mos_layer'):
x = self.mos_layer(x)
else:
x = F.linear(x, self.embed_out)
return x, attn
def get_normalized_probs(self, net_output, log_probs, _):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output[0].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
def max_positions(self):
"""Maximum output length supported by the decoder."""
return self.embed_positions.max_positions()
def upgrade_state_dict(self, state_dict):
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
if 'decoder.embed_positions.weights' in state_dict:
del state_dict['decoder.embed_positions.weights']
if 'decoder.embed_positions._float_tensor' not in state_dict:
state_dict['decoder.embed_positions._float_tensor'] = torch.FloatTensor()
return state_dict
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block."""
def __init__(self, args):
super().__init__()
self.GeLU = GeLU()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout,
)
self.dropout = args.dropout
self.relu_dropout = args.relu_dropout
self.normalize_before = args.decoder_normalize_before
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout,
)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.layer_norms = nn.ModuleList([LayerNorm(self.embed_dim) for i in range(2)])
def forward(self, x, encoder_out, encoder_padding_mask, incremental_state):
residual = x
x = self.maybe_layer_norm(0, x, before=True)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
mask_future_timesteps=True,
incremental_state=incremental_state,
need_weights=False,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(0, x, after=True)
residual = x
x = self.maybe_layer_norm(1, x, before=True)
x = self.GeLU(self.fc1(x))
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(1, x, after=True)
return x, attn
def maybe_layer_norm(self, i, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return self.layer_norms[i](x)
else:
return x
| sentiment-discovery-master | model/transformer.py |
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import math
#This function could have some real bad perf penalties if used incorrectly
#Uses in the RNN API should be fine. DIDN'T USE!
def reverse_dir_tensor(tensor, dim=0):
"""
reverse_dir_tensor stub
"""
chunked = [sub_tensor for sub_tensor in
tensor.chunk(tensor.size(dim), dim)]
chunked = chunked[::-1]
return torch.cat( chunked, dim=dim).view(*tensor.size())
def is_iterable(maybe_iterable):
return isinstance(maybe_iterable, (list, tuple))
def flatten_list(tens_list):
"""
flatten_list stub
"""
if not (is_iterable(tens_list)):
return tens_list
return torch.cat(tens_list, dim=0).view(len(tens_list), *tens_list[0].size() )
#These modules always assumes batch_first
class bidirectionalRNN(nn.Module):
"""
bidirectionalRNN stub
"""
def __init__(self, inputRNN, num_layers=1, dropout = 0):
super(bidirectionalRNN, self).__init__()
self.dropout = dropout
self.fwd = stackedRNN(inputRNN, num_layers=num_layers, dropout = dropout)
self.bckwrd = stackedRNN(inputRNN.new_like(), num_layers=num_layers, dropout = dropout)
self.rnns = nn.ModuleList([self.fwd, self.bckwrd])
#collect hidden option will return all hidden/cell states from entire RNN
def forward(self, input, collect_hidden=False):
"""
forward() stub
"""
seq_len = input.size(0)
bsz = input.size(1)
fwd_out, fwd_hiddens = list(self.fwd(input, collect_hidden = collect_hidden))
bckwrd_out, bckwrd_hiddens = list(self.bckwrd(input, reverse=True, collect_hidden = collect_hidden))
output = torch.cat( [fwd_out, bckwrd_out], -1 )
hiddens = tuple( torch.cat(hidden, -1) for hidden in zip( fwd_hiddens, bckwrd_hiddens) )
return output, hiddens
def reset_parameters(self):
"""
reset_parameters() stub
"""
for rnn in self.rnns:
rnn.reset_parameters()
def init_hidden(self, bsz):
"""
init_hidden() stub
"""
for rnn in self.rnns:
rnn.init_hidden(bsz)
def detach_hidden(self):
"""
detach_hidden() stub
"""
for rnn in self.rnns:
rnn.detachHidden()
def reset_hidden(self, bsz, reset_mask=None):
"""
reset_hidden() stub
"""
for rnn in self.rnns:
rnn.reset_hidden(bsz, reset_mask=reset_mask)
def init_inference(self, bsz):
"""
init_inference() stub
"""
for rnn in self.rnns:
rnn.init_inference(bsz)
#assumes hidden_state[0] of inputRNN is output hidden state
#constructor either takes an RNNCell or list of RNN layers
class stackedRNN(nn.Module):
"""
stackedRNN stub
"""
def __init__(self, inputRNN, num_layers=1, dropout=0):
super(stackedRNN, self).__init__()
self.dropout = dropout
if isinstance(inputRNN, RNNCell):
self.rnns = [inputRNN]
for i in range(num_layers-1):
self.rnns.append(inputRNN.new_like(inputRNN.output_size))
elif isinstance(inputRNN, list):
assert len(inputRNN) == num_layers, "RNN list length must be equal to num_layers"
self.rnns=inputRNN
else:
raise RuntimeError()
self.nLayers = len(self.rnns)
self.rnns = nn.ModuleList(self.rnns)
'''
Returns output as hidden_state[0] Tensor([sequence steps][batch size][features])
If collect hidden will also return Tuple(
[n_hidden_states][sequence steps] Tensor([layer][batch size][features])
)
If not collect hidden will also return Tuple(
[n_hidden_states] Tensor([layer][batch size][features])
'''
def forward(self, input, collect_hidden=False, reverse=False, reset_mask=None):
"""
forward() stub
"""
seq_len = input.size(0)
bsz = input.size(1)
inp_iter = reversed(range(seq_len)) if reverse else range(seq_len)
hidden_states = [[] for i in range(self.nLayers)]
outputs = []
for seq in inp_iter:
if not reverse and reset_mask is not None:
self.reset_hidden(bsz, reset_mask=reset_mask[seq])
for layer in range(self.nLayers):
if layer == 0:
prev_out = input[seq]
outs = self.rnns[layer](prev_out)
if collect_hidden:
hidden_states[layer].append(outs)
elif seq == seq_len-1:
hidden_states[layer].append(outs)
prev_out = outs[0]
if reverse and reset_mask is not None:
self.reset_hidden(bsz, reset_mask=reset_mask[seq])
outputs.append(prev_out)
if reverse:
outputs = list(reversed(outputs))
'''
At this point outputs is in format:
list( [seq_length] x Tensor([bsz][features]) )
need to convert it to:
list( Tensor([seq_length][bsz][features]) )
'''
output = flatten_list(outputs)
'''
hidden_states at this point is in format:
list( [layer][seq_length][hidden_states] x Tensor([bsz][features]) )
need to convert it to:
For not collect hidden:
list( [hidden_states] x Tensor([layer][bsz][features]) )
For collect hidden:
list( [hidden_states][seq_length] x Tensor([layer][bsz][features]) )
'''
if not collect_hidden:
seq_len = 1
n_hid = self.rnns[0].n_hidden_states
new_hidden = [ [ [ None for k in range(self.nLayers)] for j in range(seq_len) ] for i in range(n_hid) ]
for i in range(n_hid):
for j in range(seq_len):
for k in range(self.nLayers):
new_hidden[i][j][k] = hidden_states[k][j][i]
hidden_states = new_hidden
#Now in format list( [hidden_states][seq_length][layer] x Tensor([bsz][features]) )
#Reverse seq_length if reverse
if reverse:
hidden_states = list( list(reversed(list(entry))) for entry in hidden_states)
#flatten layer dimension into tensor
hiddens = list( list(
flatten_list(seq) for seq in hidden )
for hidden in hidden_states )
#Now in format list( [hidden_states][seq_length] x Tensor([layer][bsz][features]) )
#Remove seq_length dimension if not collect_hidden
if not collect_hidden:
hidden_states = list( entry[0] for entry in hidden_states)
return output, hidden_states
def reset_parameters(self):
"""
reset_parameters() stub
"""
for rnn in self.rnns:
rnn.reset_parameters()
def init_hidden(self, bsz):
"""
init_hidden() stub
"""
for rnn in self.rnns:
rnn.init_hidden(bsz)
def detach_hidden(self):
"""
detach_hidden() stub
"""
for rnn in self.rnns:
rnn.detach_hidden()
def reset_hidden(self, bsz, reset_mask=None):
"""
reset_hidden() stub
"""
for rnn in self.rnns:
rnn.reset_hidden(bsz, reset_mask=reset_mask)
def init_inference(self, bsz):
"""
init_inference() stub
"""
for rnn in self.rnns:
rnn.init_inference(bsz)
class RNNCell(nn.Module):
"""
RNNCell stub
gate_multiplier is related to the architecture you're working with
For LSTM-like it will be 4 and GRU-like will be 3.
Always assumes input is NOT batch_first.
Output size that's not hidden size will use output projection
Hidden_states is number of hidden states that are needed for cell
if one will go directly to cell as tensor, if more will go as list
"""
def __init__(self, gate_multiplier, input_size, hidden_size, cell, n_hidden_states = 2, bias = False, output_size = None):
super(RNNCell, self).__init__()
self.gate_multiplier = gate_multiplier
self.input_size = input_size
self.hidden_size = hidden_size
self.cell = cell
self.bias = bias
self.output_size = output_size
if output_size is None:
self.output_size = hidden_size
self.gate_size = gate_multiplier * self.hidden_size
self.n_hidden_states = n_hidden_states
self.w_ih = nn.Parameter(torch.Tensor(self.gate_size, self.input_size))
self.w_hh = nn.Parameter(torch.Tensor(self.gate_size, self.output_size))
#Check if there's recurrent projection
if(self.output_size != self.hidden_size):
self.w_ho = nn.Parameter(torch.Tensor(self.output_size, self.hidden_size))
self.b_ih = self.b_hh = None
if self.bias:
self.b_ih = nn.Parameter(torch.Tensor(self.gate_size))
self.b_hh = nn.Parameter(torch.Tensor(self.gate_size))
#hidden states for forward
self.hidden = [ None for states in range(self.n_hidden_states)]
self.reset_parameters()
def new_like(self, new_input_size=None):
"""
new_like() stub
"""
if new_input_size is None:
new_input_size = self.input_size
return type(self)(self.gate_multiplier,
new_input_size,
self.hidden_size,
self.cell,
self.n_hidden_states,
self.bias,
self.output_size)
#Use xavier where we can (weights), otherwise use uniform (bias)
def reset_parameters(self, gain=1):
"""
reset_parameters() stub
"""
stdev = 1.0 / math.sqrt(self.hidden_size)
for param in self.parameters():
param.data.uniform_(-stdev, stdev)
'''
def reset_parameters(self, gain=1):
stdv = 1.0 / math.sqrt(self.gate_size)
self.w_ih.uniform_(-stdv, stdv)
self.w_hh.uniform_(-stdv, stdv)
if self.bias:
self.b_ih.uniform_(-stdv/2, stdv/2)
self.b_hh.uniform_(-stdv/2, stdv/2)
#for param in self.parameters():
# #if (param.dim() > 1):
# # torch.nn.init.xavier_normal(param, gain)
# #else:
# param.data.uniform_(-stdv, stdv)
'''
def init_hidden(self, bsz):
"""
init_hidden() stub
"""
for param in self.parameters():
if param is not None:
a_param = param
break
for i, _ in enumerate(self.hidden):
if(self.hidden[i] is None or self.hidden[i].data.size()[0] != bsz):
if i==0:
hidden_size = self.output_size
else:
hidden_size = self.hidden_size
tens = a_param.data.new(bsz, hidden_size).zero_()
self.hidden[i] = Variable(tens, requires_grad=False)
def reset_hidden(self, bsz, reset_mask=None):
"""
reset_hidden() stub
"""
if reset_mask is not None:
if (reset_mask != 0).any():
for i, v in enumerate(self.hidden):
if reset_mask.numel() == 1:
self.hidden[i] = v.data.zero_()
else:
reset_mask = reset_mask.view(self.hidden[i].size(0), 1).contiguous()
self.hidden[i] = v * (1 - reset_mask).type_as(v.data)
return
for i, _ in enumerate(self.hidden):
self.hidden[i] = None
self.init_hidden(bsz)
def detach_hidden(self):
"""
detach_hidden() stub
"""
for i, _ in enumerate(self.hidden):
if self.hidden[i] is None:
raise RuntimeError("Must inialize hidden state before you can detach it")
for i, _ in enumerate(self.hidden):
self.hidden[i] = self.hidden[i].detach()
def forward(self, input):
"""
forward() stub
if not inited or bsz has changed this will create hidden states
"""
self.init_hidden(input.size()[0])
hidden_state = self.hidden[0] if self.n_hidden_states == 1 else self.hidden
self.hidden = self.cell(input, hidden_state, self.w_ih, self.w_hh, b_ih=self.b_ih, b_hh=self.b_hh)
if(self.n_hidden_states > 1):
self.hidden = list(self.hidden)
else:
self.hidden=[self.hidden]
if self.output_size != self.hidden_size:
self.hidden[0] = F.linear(self.hidden[0], self.w_ho)
return tuple(self.hidden)
| sentiment-discovery-master | model/RNN_utils/RNN/RNNBackend.py |
import torch
# from torch.nn._functions.rnn import LSTMCell, RNNReLUCell, RNNTanhCell, GRUCell
from .RNNBackend import bidirectionalRNN, stackedRNN, RNNCell
from .cells import mLSTMRNNCell, mLSTMCell
_VF = torch._C._VariableFunctions
_rnn_impls = {
'LSTM': _VF.lstm_cell,
'GRU': _VF.gru_cell,
'RNN_TANH': _VF.rnn_tanh_cell,
'RNN_RELU': _VF.rnn_relu_cell,
}
def toRNNBackend(inputRNN, num_layers, bidirectional=False, dropout = 0):
"""
toRNNBackend stub
"""
if bidirectional:
return bidirectionalRNN(inputRNN, num_layers, dropout = dropout)
else:
return stackedRNN(inputRNN, num_layers, dropout = dropout)
def LSTM(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None):
"""
LSTM stub
"""
inputRNN = RNNCell(4, input_size, hidden_size, _rnn_impls['LSTM'], 2, bias, output_size)
# inputRNN = RNNCell(4, input_size, hidden_size, LSTMCell, 2, bias, output_size)
return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout)
def GRU(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None):
"""
GRU stub
"""
inputRNN = RNNCell(3, input_size, hidden_size, _rnn_impls['GRU'], 1, bias, output_size)
# inputRNN = RNNCell(3, input_size, hidden_size, GRUCell, 1, bias, output_size)
return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout)
def ReLU(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None):
"""
ReLU stub
"""
inputRNN = RNNCell(1, input_size, hidden_size, _rnn_impls['RNN_RELU'], 1, bias, output_size)
# inputRNN = RNNCell(1, input_size, hidden_size, RNNReLUCell, 1, bias, output_size)
return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout)
def Tanh(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None):
"""
Tanh stub
"""
inputRNN = RNNCell(1, input_size, hidden_size, _rnn_impls['RNN_TANH'], 1, bias, output_size)
inputRNN = RNNCell(1, input_size, hidden_size, RNNTanhCell, 1, bias, output_size)
return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout)
def mLSTM(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None):
"""
mLSTM stub
"""
print("Creating mlstm")
inputRNN = mLSTMRNNCell(input_size, hidden_size, bias=bias, output_size=output_size)
return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout)
| sentiment-discovery-master | model/RNN_utils/RNN/models.py |
from .models import LSTM, GRU, ReLU, Tanh, mLSTM
__all__ = ['models']
| sentiment-discovery-master | model/RNN_utils/RNN/__init__.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .RNNBackend import RNNCell
# from torch.nn._functions.thnn import rnnFusedPointwise as fusedBackend
_VF = torch._C._VariableFunctions
import math
class mLSTMRNNCell(RNNCell):
"""
mLSTMRNNCell stub
"""
def __init__(self, input_size, hidden_size, bias = False, output_size = None):
gate_multiplier = 4
super(mLSTMRNNCell, self).__init__(gate_multiplier, input_size, hidden_size, mLSTMCell, n_hidden_states = 2, bias = bias, output_size = output_size)
self.w_mih = nn.Parameter(torch.Tensor(self.output_size, self.input_size))
self.w_mhh = nn.Parameter(torch.Tensor(self.output_size, self.output_size))
self.reset_parameters()
def forward(self, input):
"""
mLSTMRNNCell.forward() stub
"""
#if not inited or bsz has changed this will create hidden states
self.init_hidden(input.size()[0])
hidden_state = self.hidden[0] if self.n_hidden_states == 1 else self.hidden
self.hidden = list(
self.cell(input, hidden_state, self.w_ih, self.w_hh, self.w_mih, self.w_mhh,
b_ih=self.b_ih, b_hh=self.b_hh)
)
if self.output_size != self.hidden_size:
self.hidden[0] = F.linear(self.hidden[0], self.w_ho)
return tuple(self.hidden)
def new_like(self, new_input_size=None):
if new_input_size is None:
new_input_size = self.input_size
return type(self)(
new_input_size,
self.hidden_size,
self.bias,
self.output_size)
def mLSTMCell(input, hidden, w_ih, w_hh, w_mih, w_mhh, b_ih=None, b_hh=None):
"""
mLSTMCell stub
"""
# TODO: look into fusedLSTM not getting proper results.
if input.is_cuda:
m = F.linear(input, w_mih) * F.linear(hidden[0], w_mhh)
state = _VF.lstm_cell
return state(input, (m, hidden[1]), w_ih, w_hh, b_ih, b_hh)
hx, cx = hidden
m = F.linear(input, w_mih) * F.linear(hidden[0], w_mhh)
igates = F.linear(input, w_ih, b_ih) + F.linear(m, w_hh, b_hh)
ingate, forgetgate, cellgate, outgate = igates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * F.tanh(cy)
return hy, cy
| sentiment-discovery-master | model/RNN_utils/RNN/cells.py |
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from .loss_scaler import DynamicLossScaler, LossScaler
from .fp16util import model_grads_to_master_grads, master_params_to_model_params
FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
def fp32_to_fp16(val):
"""Convert fp32 `val` to fp16"""
def half_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, FLOAT_TYPES):
val = val.half()
return val
return conversion_helper(val, half_conversion)
def fp16_to_fp32(val):
"""Convert fp16 `val` to fp32"""
def float_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, HALF_TYPES):
val = val.float()
return val
return conversion_helper(val, float_conversion)
class FP16_Module(nn.Module):
def __init__(self, module):
super(FP16_Module, self).__init__()
self.add_module('module', module.half())
def forward(self, *inputs, **kwargs):
return fp16_to_fp32(self.module(*(fp32_to_fp16(inputs)), **kwargs))
def state_dict(self, destination=None, prefix='', keep_vars=False):
return self.module.state_dict(destination, prefix, keep_vars)
def load_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
class FP16_Optimizer(object):
"""
:class:`FP16_Optimizer` is designed to wrap an existing PyTorch optimizer,
and manage (dynamic) loss scaling and master weights in a manner transparent to the user.
For standard use, only two lines must be changed: creating the :class:`FP16_Optimizer` instance,
and changing the call to ``backward``.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
# Name the FP16_Optimizer instance to replace the existing optimizer
# (recommended but not required):
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
# loss.backward() becomes:
optimizer.backward(loss)
...
Example with dynamic loss scaling::
...
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
# optional arg to control dynamic loss scaling behavior
# dynamic_loss_args={'scale_window' : 500})
# Usually, dynamic_loss_args is not necessary.
Args:
init_optimizer (torch.optim.optimizer): Existing optimizer created with the parameters to optimize. Internally, :class:`FP16_Optimizer` replaces the passed optimizer's fp16 parameters, if any, with fp32 master parameters copied from the original ones. :class:`FP16_Optimizer` also stores references to the original fp16 parameters, and updates these fp16 parameters from the master fp32 copy at the end of each :attr:`step`.
static_loss_scale (float, optional, default=1.0): Loss scale used internally to scale gradients computed by the model. Any fp16 gradients will be copied to fp32, then downscaled before being applied to the fp32 master params, so ``static_loss_scale`` should not affect learning rate.
dynamic_loss_scale (bool, optional, default=False): Use dynamic loss scaling. If True, this will override any ``static_loss_scale`` option.
dynamic_loss_args (dict, optional, default=None): Dict of kwargs that will be forwarded to the internal :class:`DynamicLossScaler` instance's constructor. Keys of this dict must match kwargs accepted by :class:`DynamicLossScaler`'s constructor. If ``dynamic_loss_args`` is unspecified, :class:`DynamicLossScaler`'s defaults will be used.
``init_optimizer`` is expected to have been constructed in the ordinary way.
It is recommended (although not required) that the newly constructed :class:`FP16_Optimizer` instance be
named to replace ``init_optimizer``, for two reasons:
First, it means that references to the same name
later in the file will not have to change.
Second, :class:`FP16_Optimizer` reserves the right (as an implementation detail) to
modify ``init_optimizer``. If you do choose a unique name for the new
:class:`FP16_Optimizer` instance, you should only work with this new instance,
because the preexisting optimizer might no longer behave as expected.
``init_optimizer`` may be any Pytorch optimizer.
It may contain a mixture of fp16 and fp32 parameters organized into any number of
``param_groups`` with different hyperparameters. The :class:`FP16_Optimizer` constructor will
ingest these ``param_groups`` and remember them.
Calls to ::
loss.backward()
must be replaced with ::
optimizer.backward(loss)
because :class:`FP16_Optimizer` requires ownership of the backward pass to implement
loss scaling and copies to master gradients.
.. note::
Loss scaling, either static or dynamic, is orthogonal to learning rate, because gradients
are downscaled before being applied. This means that adjusting the loss scale, or using
dynamic loss scaling, should not require retuning the learning rate or any other
hyperparameters.
**Advanced options**
**Closures**: :class:`FP16_Optimizer` can wrap a Pytorch optimizer that receives a closure.
See docstring for :attr:`step`.
**Gradient clipping**: Use :attr:`clip_master_grads`.
**Multiple losses**: If your model accumulates gradients from multiple losses,
this can be made more efficient by supplying ``update_master_grads=False``
to :attr:`backward`. See docstring for :attr:`backward`.
**Manually adjusting loss scale**: The current loss scale can be retrieved or set via ::
print(optimizer.loss_scale)
optimizer.loss_scale = new_loss_scale
For static loss scaling, manually adjusting the loss scale over time is a reasonable
thing to do. During later epochs, gradients may become smaller, and a
higher loss scale may be required, analogous to scheduling the learning rate. Dynamic loss
scaling is more subtle (see :class:`DynamicLossScaler`) and in this case, manually adjusting
the loss scale is not recommended.
**Multi_GPU training**: If the wrapped ``init_optimizer`` was created from a model wrapped in
Pytorch DistributedDataParallel or Apex DistributedDataParallel, :class:`FP16_Optimizer`
should still work as intended.
"""
def __init__(self,
init_optimizer,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None):
if not torch.cuda.is_available:
raise SystemError("Cannot use fp16 without CUDA.")
self.fp16_groups = []
self.fp32_from_fp16_groups = []
self.fp32_from_fp32_groups = []
for i, param_group in enumerate(init_optimizer.param_groups):
print("FP16_Optimizer processing param group {}:".format(i))
fp16_params_this_group = []
fp32_params_this_group = []
for param in param_group['params']:
if param.requires_grad:
if param.type() == 'torch.cuda.HalfTensor':
print("FP16_Optimizer received torch.cuda.HalfTensor with {}"
.format(param.size()))
fp16_params_this_group.append(param)
elif param.type() == 'torch.cuda.FloatTensor':
print("FP16_Optimizer received torch.cuda.FloatTensor with {}"
.format(param.size()))
fp32_params_this_group.append(param)
else:
raise TypeError("Wrapped parameters must be either "
"torch.cuda.FloatTensor or torch.cuda.HalfTensor. "
"Received {}".format(param.type()))
fp32_from_fp16_params_this_group = [param.detach().clone().float()
for param in fp16_params_this_group]
for param in fp32_from_fp16_params_this_group:
param.requires_grad = True
param_group['params'] = fp32_from_fp16_params_this_group + fp32_params_this_group
self.fp16_groups.append(fp16_params_this_group)
self.fp32_from_fp16_groups.append(fp32_from_fp16_params_this_group)
self.fp32_from_fp32_groups.append(fp32_params_this_group)
self.optimizer = init_optimizer#.__class__(init_optimizer.param_groups)
if dynamic_loss_scale:
self.dynamic_loss_scale = True
if dynamic_loss_args is not None:
self.loss_scaler = DynamicLossScaler(**dynamic_loss_args)
else:
self.loss_scaler = DynamicLossScaler()
else:
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(static_loss_scale)
self.overflow = False
self.first_closure_call_this_step = True
def __getstate__(self):
raise RuntimeError("FP16_Optimizer should be serialized using state_dict().")
def __setstate__(self, state):
raise RuntimeError("FP16_Optimizer should be deserialized using load_state_dict().")
def zero_grad(self):
"""
Zero fp32 and fp16 parameter grads.
"""
# In principle, only the .grad attributes of the model params need to be zeroed,
# because gradients are copied into the FP32 master params. However, we zero
# all gradients owned by the optimizer, just to be safe:
self.optimizer.zero_grad()
# Zero fp16 gradients owned by the model:
for fp16_group in self.fp16_groups:
for param in fp16_group:
if param.grad is not None:
param.grad.detach_() # as in torch.optim.optimizer.zero_grad()
param.grad.zero_()
def _check_overflow(self):
params = []
for group in self.fp16_groups:
for param in group:
params.append(param)
for group in self.fp32_from_fp32_groups:
for param in group:
params.append(param)
self.overflow = self.loss_scaler.has_overflow(params)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
def _master_params_to_model_params(self):
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
master_params_to_model_params(fp16_group, fp32_from_fp16_group)
# To consider: Integrate distributed with this wrapper by registering a hook on each variable
# that does the overflow check, gradient copy + downscale, and fp32 allreduce in a different stream.
def _model_grads_to_master_grads(self):
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
model_grads_to_master_grads(fp16_group, fp32_from_fp16_group)
def _downscale_master(self):
if self.loss_scale != 1.0:
for group in self.optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.mul_(1./self.loss_scale)
def clip_master_grads(self, max_norm, norm_type=2):
"""
Clips fp32 master gradients via ``torch.nn.utils.clip_grad_norm``.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the current fp32 gradients (viewed as a single vector).
.. warning::
Returns -1 if the most recently computed fp16 gradients overflowed (that is, if ``self.overflow`` is ``True``).
"""
if not self.overflow:
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
return torch.nn.utils.clip_grad_norm(fp32_params, max_norm, norm_type)
else:
return -1
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_from_fp16'] = self.fp32_from_fp16_groups
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current_group, saved_group in zip(self.fp32_from_fp16_groups, state_dict['fp32_from_fp16']):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
def step(self, closure=None): # could add clip option.
"""
If no closure is supplied, :attr:`step` should be called after
``fp16_optimizer_obj.backward(loss)``.
:attr:`step` updates the fp32 master copy of parameters using the optimizer supplied to
:class:`FP16_Optimizer`'s constructor, then copies the updated fp32 params into the fp16 params
originally referenced by :class:`FP16_Optimizer`'s constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, :attr:`step` may be called without a prior call to
:attr:`backward(loss)`.
This control flow is identical to `ordinary Pytorch optimizer use`_ with closures.
However, the user should take care that any ``loss.backward()`` call within the closure
has been replaced by ``fp16_optimizer_obj.backward(loss)``.
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to :class:`FP16_Optimizer`'s constructor. closure should call :attr:`zero_grad()` on the :class:`FP16_Optimizer` object, compute the loss, call :attr:`backward(loss)`, and return the loss.
Example with closure::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# loss.backward() becomes:
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. warning::
Currently, calling :attr:`step` with a closure is not compatible with dynamic loss scaling.
.. _`ordinary Pytorch optimizer use`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
"""
if closure is not None and isinstance(self.loss_scaler, DynamicLossScaler):
raise TypeError("Using step with a closure is currently not "
"compatible with dynamic loss scaling.")
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
if self.overflow:
print("OVERFLOW! Skipping step. Attempted loss scale: {}, reducing to {}"
.format(scale, self.loss_scale))
return
if closure is not None:
retval = self._step_with_closure(closure)
else:
retval = self.optimizer.step()
self._master_params_to_model_params()
return retval
def _step_with_closure(self, closure):
def wrapped_closure():
if self.first_closure_call_this_step:
# We expect that the fp16 params are initially fresh on entering self.step(),
# so _master_params_to_model_params() is unnecessary the first time wrapped_closure()
# is called within self.optimizer.step().
self.first_closure_call_this_step = False
else:
# If self.optimizer.step() internally calls wrapped_closure more than once,
# it may update the fp32 params after each call. However, self.optimizer
# doesn't know about the fp16 params at all. If the fp32 params get updated,
# we can't rely on self.optimizer to refresh the fp16 params. We need
# to handle that manually:
self._master_params_to_model_params()
# Our API expects the user to give us ownership of the backward() call by
# replacing all calls to loss.backward() with optimizer.backward(loss).
# This requirement holds whether or not the call to backward() is made within a closure.
# If the user is properly calling optimizer.backward(loss) within "closure,"
# calling closure() here will give the fp32 master params fresh gradients
# for the optimizer to play with, so all wrapped_closure needs to do is call
# closure() and return the loss.
temp_loss = closure()
return temp_loss
retval = self.optimizer.step(wrapped_closure)
self.first_closure_call_this_step = True
return retval
def backward(self, loss, update_master_grads=True):
"""
:attr:`backward` performs the following conceptual steps:
1. fp32_loss = loss.float() (see first Note below)
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's leaves (which may be fp16, fp32, or a mixture, depending how your model was defined).
4. fp16 grads are then copied to the master params' ``.grad`` attributes (see second Note), which are guaranteed to be fp32.
5. Finally, master grads are divided by loss_scale.
In this way, after :attr:`backward`, the master params have fresh gradients,
and :attr:`step` may be called.
.. note::
:attr:`backward` internally converts the loss to fp32 before applying the loss scale.
This provides some additional safety against overflow if the user has supplied an
fp16 loss value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
:attr:`backward`.
.. warning::
The gradients found in a model's leaves after the call to
:attr:`backward` should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may change over time).
If the user wants to inspect gradients after a call to :attr:`backward`,
only the master gradients should be regarded as valid. These can be retrieved via
:attr:`inspect_master_grad_data()`.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_master_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay the copy, which is useful to eliminate redundant fp16->fp32 grad copies if :attr:`backward` is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling :attr:`update_master_grads` before calling :attr:`step`.
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_master_grads=False)
optimizer.backward(loss2, update_master_grads=False)
optimizer.update_master_grads()
"""
# To consider: try multiple backward passes using retain_grad=True to find
# a loss scale that works. After you find a loss scale that works, do a final dummy
# backward pass with retain_graph=False to tear down the graph. Doing this would avoid
# discarding the iteration, but probably wouldn't improve overall efficiency.
self.loss_scaler.backward(loss.float())
if update_master_grads:
self.update_master_grads()
def update_master_grads(self):
"""
Copy the ``.grad`` attribute from stored references to fp16 parameters to
the ``.grad`` attribute of the fp32 master parameters that are directly
updated by the optimizer. :attr:`update_master_grads` only needs to be called if
``fp16_optimizer_obj.backward`` was called with ``update_master_grads=False``.
"""
if self.dynamic_loss_scale:
self._check_overflow()
if self.overflow: return
self._model_grads_to_master_grads()
self._downscale_master()
def inspect_master_grad_data(self):
"""
When running with :class:`FP16_Optimizer`,
``.grad`` attributes of a model's fp16 leaves should not be
regarded as truthful, because they might be scaled.
After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered,
the fp32 master params' ``.grad``
attributes will contain valid gradients properly divided by the loss scale. However,
because :class:`FP16_Optimizer` flattens some parameters, accessing them may be
nonintuitive. :attr:`inspect_master_grad_data`
allows those gradients to be viewed with shapes corresponding to their associated model leaves.
Returns:
List of lists (one list for each parameter group). The list for each parameter group
is a list of the ``.grad.data`` attributes of the fp32 master params belonging to that group.
"""
raise NotImplementedError("Currently not implemented, working on it...")
fp32_grads_each_group = []
if self.overflow:
print("Warning: calling FP16_Optimizer.inspect_master_grad_data while in an overflow state. "
"Gradients are currently invalid (may be inf, nan, or stale). Returning None.")
return None
else:
return None
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
return self.loss_scaler.loss_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
| sentiment-discovery-master | fp16/fp16.py |
from .fp16 import *
from .loss_scaler import *
| sentiment-discovery-master | fp16/__init__.py |
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class tofp16(nn.Module):
"""
Model wrapper that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def BN_convert_float(module):
'''
Designed to work with network_to_half.
BatchNorm layers need parameters in single precision.
Find all layers and convert them back to float. This can't
be done with built in .apply as that function will apply
fn to all modules, parameters, and buffers. Thus we wouldn't
be able to guard the float conversion based on the module type.
'''
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.float()
for child in module.children():
BN_convert_float(child)
return module
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half()))
def backwards_debug_hook(grad):
raise RuntimeError("master_params recieved a gradient in the backward pass!")
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
| sentiment-discovery-master | fp16/fp16util.py |
import torch
class LossScaler:
def __init__(self, scale=1):
self.cur_scale = scale
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
pass
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
class DynamicLossScaler:
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
# return False
for p in params:
if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data):
return True
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
try:
# Stopgap until upstream fixes sum() on HalfTensors
cpu_sum = float(x.float().sum())
# cpu_sum = float(x.sum())
# print(cpu_sum)
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
if overflow:
#self.cur_scale /= self.scale_factor
self.cur_scale = max(self.cur_scale/self.scale_factor, 1)
self.last_overflow_iter = self.cur_iter
else:
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
self.cur_scale *= self.scale_factor
# self.cur_scale = 1
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
##############################################################
# Example usage below here -- assuming it's in a separate file
##############################################################
if __name__ == "__main__":
import torch
from torch.autograd import Variable
from dynamic_loss_scaler import DynamicLossScaler
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in), requires_grad=False)
y = Variable(torch.randn(N, D_out), requires_grad=False)
w1 = Variable(torch.randn(D_in, H), requires_grad=True)
w2 = Variable(torch.randn(H, D_out), requires_grad=True)
parameters = [w1, w2]
learning_rate = 1e-6
optimizer = torch.optim.SGD(parameters, lr=learning_rate)
loss_scaler = DynamicLossScaler()
for t in range(500):
y_pred = x.mm(w1).clamp(min=0).mm(w2)
loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale
print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale))
print('Iter {} scaled loss: {}'.format(t, loss.data[0]))
print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale))
# Run backprop
optimizer.zero_grad()
loss.backward()
# Check for overflow
has_overflow = DynamicLossScaler.has_overflow(parameters)
# If no overflow, unscale grad and update as usual
if not has_overflow:
for param in parameters:
param.grad.data.mul_(1. / loss_scaler.loss_scale)
optimizer.step()
# Otherwise, don't do anything -- ie, skip iteration
else:
print('OVERFLOW!')
# Update loss scale for next iteration
loss_scaler.update_scale(has_overflow)
| sentiment-discovery-master | fp16/loss_scaler.py |
# Copyright 2019-2020 Nvidia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import gym, minisat # you need the latter to run __init__.py and register the environment.
from collections import defaultdict
from gqsat.utils import build_argparser
from gqsat.agents import MiniSATAgent
DEBUG_ROLLOUTS = 10 # if --debug flag is present, run this many of rollouts, not the whole problem folder
def main():
parser = build_argparser()
args = parser.parse_args()
# key is the name of the problem file, value is a list with two values [minisat_steps_no_restarts, minisat_steps_with_restarts]
results = defaultdict(list)
for with_restarts in [False, True]:
env = gym.make(
"sat-v0",
args=args,
problems_paths=args.eval_problems_paths,
test_mode=True,
with_restarts=with_restarts,
)
agent = MiniSATAgent()
print(f"Testing agent {agent}... with_restarts is set to {with_restarts}")
pr = 0
while env.test_to != 0 or pr == 0:
observation = env.reset()
done = False
while not done:
action = agent.act(observation)
observation, reward, done, info = env.step(action, dummy=True)
print(
f'Rollout {pr+1}, steps {env.step_ctr}, num_restarts {info["num_restarts"]}.'
)
results[env.curr_problem].append(env.step_ctr)
pr += 1
if args.debug and pr >= DEBUG_ROLLOUTS:
break
env.close()
return results, args
from os import path
if __name__ == "__main__":
results, args = main()
for pdir in args.eval_problems_paths.split(":"):
with open(os.path.join(pdir, "METADATA"), "w") as f:
for el in sorted(results.keys()):
cur_dir, pname = path.split(el)
if path.realpath(pdir) == path.realpath(cur_dir):
# no restarts/with restarts
f.write(f"{pname},{results[el][0]},{results[el][1]}\n")
| GraphQSat-main | add_metadata.py |
# Copyright 2019-2020 Nvidia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import pickle
import yaml
from gqsat.utils import build_eval_argparser, evaluate
from gqsat.models import SatModel
from gqsat.agents import GraphAgent
import os
import time
if __name__ == "__main__":
parser = build_eval_argparser()
eval_args = parser.parse_args()
with open(os.path.join(eval_args.model_dir, "status.yaml"), "r") as f:
train_status = yaml.load(f, Loader=yaml.Loader)
args = train_status["args"]
# use same args used for training and overwrite them with those asked for eval
for k, v in vars(eval_args).items():
setattr(args, k, v)
args.device = (
torch.device("cpu")
if args.no_cuda or not torch.cuda.is_available()
else torch.device("cuda")
)
net = SatModel.load_from_yaml(os.path.join(args.model_dir, "model.yaml")).to(
args.device
)
# modify core steps for the eval as requested
if args.core_steps != -1:
# -1 if use the same as for training
net.steps = args.core_steps
net.load_state_dict(
torch.load(os.path.join(args.model_dir, args.model_checkpoint)), strict=False
)
agent = GraphAgent(net, args)
st_time = time.time()
scores, eval_metadata, _ = evaluate(agent, args)
end_time = time.time()
print(
f"Evaluation is over. It took {end_time - st_time} seconds for the whole procedure"
)
# with open("../eval_results.pkl", "wb") as f:
# pickle.dump(scores, f)
for pset, pset_res in scores.items():
res_list = [el for el in pset_res.values()]
print(f"Results for {pset}")
print(
f"median_relative_score: {np.nanmedian(res_list)}, mean_relative_score: {np.mean(res_list)}"
)
| GraphQSat-main | evaluate.py |
# Copyright 2019-2020 Nvidia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import os
from collections import deque
import pickle
import copy
import yaml
from gqsat.utils import build_argparser, evaluate, make_env
from gqsat.models import EncoderCoreDecoder, SatModel
from gqsat.agents import GraphAgent
from gqsat.learners import GraphLearner
from gqsat.buffer import ReplayGraphBuffer
from tensorboardX import SummaryWriter
def save_training_state(
model,
learner,
episodes_done,
transitions_seen,
best_eval_so_far,
args,
in_eval_mode=False,
):
# save the model
model_path = os.path.join(args.logdir, f"model_{learner.step_ctr}.chkp")
torch.save(model.state_dict(), model_path)
# save the experience replay
buffer_path = os.path.join(args.logdir, "buffer.pkl")
with open(buffer_path, "wb") as f:
pickle.dump(learner.buffer, f)
# save important parameters
train_status = {
"step_ctr": learner.step_ctr,
"latest_model_name": model_path,
"buffer_path": buffer_path,
"args": args,
"episodes_done": episodes_done,
"logdir": args.logdir,
"transitions_seen": transitions_seen,
"optimizer_state_dict": learner.optimizer.state_dict(),
"optimizer_class": type(learner.optimizer),
"best_eval_so_far": best_eval_so_far,
"scheduler_class": type(learner.lr_scheduler),
"scheduler_state_dict": learner.lr_scheduler.state_dict(),
"in_eval_mode": in_eval_mode,
}
status_path = os.path.join(args.logdir, "status.yaml")
with open(status_path, "w") as f:
yaml.dump(train_status, f, default_flow_style=False)
return status_path
def get_annealed_eps(n_trans, args):
if n_trans < args.init_exploration_steps:
return args.eps_init
if n_trans > args.eps_decay_steps:
return args.eps_final
else:
assert n_trans - args.init_exploration_steps >= 0
return (args.eps_init - args.eps_final) * (
1 - (n_trans - args.init_exploration_steps) / args.eps_decay_steps
) + args.eps_final
def arg2activation(activ_str):
if activ_str == "relu":
return torch.nn.ReLU
elif activ_str == "tanh":
return torch.nn.Tanh
elif activ_str == "leaky_relu":
return torch.nn.LeakyReLU
else:
raise ValueError("Unknown activation function")
if __name__ == "__main__":
parser = build_argparser()
args = parser.parse_args()
args.device = (
torch.device("cpu")
if args.no_cuda or not torch.cuda.is_available()
else torch.device("cuda")
)
if args.status_dict_path:
# training mode, resuming from the status dict
# load the train status dict
with open(args.status_dict_path, "r") as f:
train_status = yaml.load(f, Loader=yaml.Loader)
eval_resume_signal = train_status["in_eval_mode"]
# swap the args
args = train_status["args"]
# load the model
net = SatModel.load_from_yaml(os.path.join(args.logdir, "model.yaml")).to(
args.device
)
net.load_state_dict(torch.load(train_status["latest_model_name"]))
target_net = SatModel.load_from_yaml(
os.path.join(args.logdir, "model.yaml")
).to(args.device)
target_net.load_state_dict(net.state_dict())
# load the buffer
with open(train_status["buffer_path"], "rb") as f:
buffer = pickle.load(f)
learner = GraphLearner(net, target_net, buffer, args)
learner.step_ctr = train_status["step_ctr"]
learner.optimizer = train_status["optimizer_class"](
net.parameters(), lr=args.lr
)
learner.optimizer.load_state_dict(train_status["optimizer_state_dict"])
learner.lr_scheduler = train_status["scheduler_class"](
learner.optimizer, args.lr_scheduler_frequency, args.lr_scheduler_gamma
)
learner.lr_scheduler.load_state_dict(train_status["scheduler_state_dict"])
# load misc training status params
n_trans = train_status["transitions_seen"]
ep = train_status["episodes_done"]
env = make_env(args.train_problems_paths, args, test_mode=False)
agent = GraphAgent(net, args)
best_eval_so_far = train_status["best_eval_so_far"]
else:
# training mode, learning from scratch or continuing learning from some previously trained model
writer = SummaryWriter()
args.logdir = writer.logdir
model_save_path = os.path.join(args.logdir, "model.yaml")
best_eval_so_far = (
{args.eval_problems_paths: -1}
if not args.eval_separately_on_each
else {k: -1 for k in args.eval_problems_paths.split(":")}
)
env = make_env(args.train_problems_paths, args, test_mode=False)
if args.model_dir is not None:
# load an existing model and continue training
net = SatModel.load_from_yaml(
os.path.join(args.model_dir, "model.yaml")
).to(args.device)
net.load_state_dict(
torch.load(os.path.join(args.model_dir, args.model_checkpoint))
)
else:
# learning from scratch
net = EncoderCoreDecoder(
(env.vertex_in_size, env.edge_in_size, env.global_in_size),
core_out_dims=(
args.core_v_out_size,
args.core_e_out_size,
args.core_e_out_size,
),
out_dims=(2, None, None),
core_steps=args.core_steps,
dec_out_dims=(
args.decoder_v_out_size,
args.decoder_e_out_size,
args.decoder_e_out_size,
),
encoder_out_dims=(
args.encoder_v_out_size,
args.encoder_e_out_size,
args.encoder_e_out_size,
),
save_name=model_save_path,
e2v_agg=args.e2v_aggregator,
n_hidden=args.n_hidden,
hidden_size=args.hidden_size,
activation=arg2activation(args.activation),
independent_block_layers=args.independent_block_layers,
).to(args.device)
print(str(net))
target_net = copy.deepcopy(net)
buffer = ReplayGraphBuffer(args, args.buffer_size)
agent = GraphAgent(net, args)
n_trans = 0
ep = 0
learner = GraphLearner(net, target_net, buffer, args)
eval_resume_signal = False
print(args.__str__())
loss = None
while learner.step_ctr < args.batch_updates:
ret = 0
obs = env.reset(args.train_time_max_decisions_allowed)
done = env.isSolved
if args.history_len > 1:
raise NotImplementedError(
"History len greater than one is not implemented for graph nets."
)
hist_buffer = deque(maxlen=args.history_len)
for _ in range(args.history_len):
hist_buffer.append(obs)
ep_step = 0
save_flag = False
while not done:
annealed_eps = get_annealed_eps(n_trans, args)
action = agent.act(hist_buffer, eps=annealed_eps)
next_obs, r, done, _ = env.step(action)
buffer.add_transition(obs, action, r, done)
obs = next_obs
hist_buffer.append(obs)
ret += r
if (not n_trans % args.step_freq) and (
buffer.ctr > max(args.init_exploration_steps, args.bsize + 1)
or buffer.full
):
step_info = learner.step()
if annealed_eps is not None:
step_info["annealed_eps"] = annealed_eps
# we increment the step_ctr in the learner.step(), that's why we need to do -1 in tensorboarding
# we do not need to do -1 in checking for frequency since 0 has already passed
if not learner.step_ctr % args.save_freq:
# save the exact model you evaluated and make another save after the episode ends
# to have proper transitions in the replay buffer to pickle
status_path = save_training_state(
net,
learner,
ep - 1,
n_trans,
best_eval_so_far,
args,
in_eval_mode=eval_resume_signal,
)
save_flag = True
if (
args.env_name == "sat-v0" and not learner.step_ctr % args.eval_freq
) or eval_resume_signal:
scores, _, eval_resume_signal = evaluate(
agent, args, include_train_set=False
)
for sc_key, sc_val in scores.items():
# list can be empty if we hit the time limit for eval
if len(sc_val) > 0:
res_vals = [el for el in sc_val.values()]
median_score = np.nanmedian(res_vals)
if (
best_eval_so_far[sc_key] < median_score
or best_eval_so_far[sc_key] == -1
):
best_eval_so_far[sc_key] = median_score
writer.add_scalar(
f"data/median relative score: {sc_key}",
np.nanmedian(res_vals),
learner.step_ctr - 1,
)
writer.add_scalar(
f"data/mean relative score: {sc_key}",
np.nanmean(res_vals),
learner.step_ctr - 1,
)
writer.add_scalar(
f"data/max relative score: {sc_key}",
np.nanmax(res_vals),
learner.step_ctr - 1,
)
for k, v in best_eval_so_far.items():
writer.add_scalar(k, v, learner.step_ctr - 1)
for k, v in step_info.items():
writer.add_scalar(k, v, learner.step_ctr - 1)
writer.add_scalar("data/num_episodes", ep, learner.step_ctr - 1)
n_trans += 1
ep_step += 1
writer.add_scalar("data/ep_return", ret, learner.step_ctr - 1)
writer.add_scalar("data/ep_steps", env.step_ctr, learner.step_ctr - 1)
writer.add_scalar("data/ep_last_reward", r, learner.step_ctr - 1)
print(f"Episode {ep + 1}: Return {ret}.")
ep += 1
if save_flag:
status_path = save_training_state(
net,
learner,
ep - 1,
n_trans,
best_eval_so_far,
args,
in_eval_mode=eval_resume_signal,
)
save_flag = False
| GraphQSat-main | dqn.py |
#################################################################################################################################
# All the source files in `minisat` folder were initially copied and later modified from https://github.com/feiwang3311/minisat #
# (which was taken from the MiniSat source at https://github.com/niklasso/minisat). The MiniSAT license is below. #
#################################################################################################################################
# MiniSat -- Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson
# Copyright (c) 2007-2010 Niklas Sorensson
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from .minisat.gym.MiniSATEnv import gym_sat_Env
from gym.envs.registration import register
# Graph-Q-SAT UPD: register the sat environment
register(id="sat-v0", entry_point="minisat.minisat.gym.MiniSATEnv:gym_sat_Env")
| GraphQSat-main | minisat/__init__.py |
#################################################################################################################################
# All the source files in `minisat` folder were initially copied and later modified from https://github.com/feiwang3311/minisat #
# (which was taken from the MiniSat source at https://github.com/niklasso/minisat). The MiniSAT license is below. #
#################################################################################################################################
# MiniSat -- Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson
# Copyright (c) 2007-2010 Niklas Sorensson
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | GraphQSat-main | minisat/minisat/__init__.py |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition(".")[0]
mname = ".".join((pkg, "_GymSolver")).lstrip(".")
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module("_GymSolver")
_GymSolver = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module(
"_GymSolver", [dirname(__file__)]
)
except ImportError:
import _GymSolver
return _GymSolver
try:
_mod = imp.load_module("_GymSolver", fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_GymSolver = swig_import_helper()
del swig_import_helper
else:
import _GymSolver
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if name == "thisown":
return self.this.own(value)
if name == "this":
if type(value).__name__ == "SwigPyObject":
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if not static:
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if name == "thisown":
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError(
"'%s' object has no attribute '%s'" % (class_type.__name__, name)
)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (
self.__class__.__module__,
self.__class__.__name__,
strthis,
)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class GymSolver(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, GymSolver, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, GymSolver, name)
__repr__ = _swig_repr
def __init__(self, arg2, arg3, arg4):
this = _GymSolver.new_GymSolver(arg2, arg3, arg4)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def step(self, arg2):
return _GymSolver.GymSolver_step(self, arg2)
def getReward(self):
return _GymSolver.GymSolver_getReward(self)
def getDone(self):
return _GymSolver.GymSolver_getDone(self)
def getMetadata(self):
return _GymSolver.GymSolver_getMetadata(self)
def getAssignments(self):
return _GymSolver.GymSolver_getAssignments(self)
def getActivities(self):
return _GymSolver.GymSolver_getActivities(self)
def getClauses(self):
return _GymSolver.GymSolver_getClauses(self)
__swig_destroy__ = _GymSolver.delete_GymSolver
__del__ = lambda self: None
GymSolver_swigregister = _GymSolver.GymSolver_swigregister
GymSolver_swigregister(GymSolver)
# This file is compatible with both classic and new-style classes.
| GraphQSat-main | minisat/minisat/gym/GymSolver.py |
#################################################################################################################################
# All the source files in `minisat` folder were initially copied and later modified from https://github.com/feiwang3311/minisat #
# (which was taken from the MiniSat source at https://github.com/niklasso/minisat). The MiniSAT license is below. #
#################################################################################################################################
# MiniSat -- Copyright (c) 2003-2006, Niklas Een, Niklas Sorensson
# Copyright (c) 2007-2010 Niklas Sorensson
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Graph-Q-SAT-UPD. This file is heavly changed and supports variable-sized SAT problems, multiple datasets
# and generates graph-state representations for Graph-Q-SAT.
import numpy as np
import gym
import random
from os import listdir
from os.path import join, realpath, split
from .GymSolver import GymSolver
import sys
MINISAT_DECISION_CONSTANT = 32767
VAR_ID_IDX = (
0 # put 1 at the position of this index to indicate that the node is a variable
)
class gym_sat_Env(gym.Env):
def __init__(
self,
problems_paths,
args,
test_mode=False,
max_cap_fill_buffer=True,
penalty_size=None,
with_restarts=None,
compare_with_restarts=None,
max_data_limit_per_set=None,
):
self.problems_paths = [realpath(el) for el in problems_paths.split(":")]
self.args = args
self.test_mode = test_mode
self.max_data_limit_per_set = max_data_limit_per_set
pre_test_files = [
[join(dir, f) for f in listdir(dir) if f.endswith(".cnf")]
for dir in self.problems_paths
]
if self.max_data_limit_per_set is not None:
pre_test_files = [
np.random.choice(el, size=max_data_limit_per_set, replace=False)
for el in pre_test_files
]
self.test_files = [sl for el in pre_test_files for sl in el]
self.metadata = {}
self.max_decisions_cap = float("inf")
self.max_cap_fill_buffer = max_cap_fill_buffer
self.penalty_size = penalty_size if penalty_size is not None else 0.0001
self.with_restarts = True if with_restarts is None else with_restarts
self.compare_with_restarts = (
False if compare_with_restarts is None else compare_with_restarts
)
try:
for dir in self.problems_paths:
self.metadata[dir] = {}
with open(join(dir, "METADATA")) as f:
for l in f:
k, rscore, msscore = l.split(",")
self.metadata[dir][k] = [int(rscore), int(msscore)]
except Exception as e:
print(e)
print("No metadata available, that is fine for metadata generator.")
self.metadata = None
self.test_file_num = len(self.test_files)
self.test_to = 0
self.step_ctr = 0
self.curr_problem = None
self.global_in_size = 1
self.vertex_in_size = 2
self.edge_in_size = 2
self.max_clause_len = 0
def parse_state_as_graph(self):
# if S is already Done, should return a dummy state to store in the buffer.
if self.S.getDone():
# to not mess with the c++ code, let's build a dummy graph which will not be used in the q updates anyways
# since we multiply (1-dones)
empty_state = self.get_dummy_state()
self.decision_to_var_mapping = {
el: el
for sl in range(empty_state[0].shape[0])
for el in (2 * sl, 2 * sl + 1)
}
return empty_state, True
# S is not yet Done, parse and return real state
(
total_var,
_,
current_depth,
n_init_clauses,
num_restarts,
_,
) = self.S.getMetadata()
var_assignments = self.S.getAssignments()
num_var = sum([1 for el in var_assignments if el == 2])
# only valid decisions
valid_decisions = [
el
for i in range(len(var_assignments))
for el in (2 * i, 2 * i + 1)
if var_assignments[i] == 2
]
valid_vars = [
idx for idx in range(len(var_assignments)) if var_assignments[idx] == 2
]
# we need remapping since we keep only unassigned vars in the observations,
# however, the environment does know about this, it expects proper indices of the variables
vars_remapping = {el: i for i, el in enumerate(valid_vars)}
self.decision_to_var_mapping = {
i: val_decision for i, val_decision in enumerate(valid_decisions)
}
# we should return the vertex/edge numpy objects from the c++ code to make this faster
clauses = self.S.getClauses()
if len(clauses) == 0:
# this is to avoid feeding empty data structures to our model
# when the MiniSAT environment returns an empty graph
# it might return an empty graph since we do not construct it when
# step > max_cap and max_cap can be zero (all decisions are made by MiniSAT's VSIDS).
empty_state = self.get_dummy_state()
self.decision_to_var_mapping = {
el: el
for sl in range(empty_state[0].shape[0])
for el in (2 * sl, 2 * sl + 1)
}
return empty_state, False
clause_counter = 0
clauses_lens = [len(cl) for cl in clauses]
self.max_clause_len = max(clauses_lens)
edge_data = np.zeros((sum(clauses_lens) * 2, 2), dtype=np.float32)
connectivity = np.zeros((2, edge_data.shape[0]), dtype=np.int)
ec = 0
for cl in clauses:
for l in cl:
# if positive, create a [0,1] edge from the var to the current clause, else [1,0]
# data = [1, 0] if l==True else [0, 1]
# this is not a typo, we want two edge here
edge_data[ec : ec + 2, int(l > 0)] = 1
remapped_l = vars_remapping[abs(l) - 1]
# from var to clause
connectivity[0, ec] = remapped_l
connectivity[1, ec] = num_var + clause_counter
# from clause to var
connectivity[0, ec + 1] = num_var + clause_counter
connectivity[1, ec + 1] = remapped_l
ec += 2
clause_counter += 1
vertex_data = np.zeros(
(num_var + clause_counter, self.vertex_in_size), dtype=np.float32
) # both vars and clauses are vertex in the graph
vertex_data[:num_var, VAR_ID_IDX] = 1
vertex_data[num_var:, VAR_ID_IDX + 1] = 1
return (
(
vertex_data,
edge_data,
connectivity,
np.zeros((1, self.global_in_size), dtype=np.float32),
),
False,
)
def random_pick_satProb(self):
if self.test_mode: # in the test mode, just iterate all test files in order
filename = self.test_files[self.test_to]
self.test_to += 1
if self.test_to >= self.test_file_num:
self.test_to = 0
return filename
else: # not in test mode, return a random file in "uf20-91" folder.
return self.test_files[random.randint(0, self.test_file_num - 1)]
def reset(self, max_decisions_cap=None):
self.step_ctr = 0
if max_decisions_cap is None:
max_decisions_cap = sys.maxsize
self.max_decisions_cap = max_decisions_cap
self.curr_problem = self.random_pick_satProb()
self.S = GymSolver(self.curr_problem, self.with_restarts, max_decisions_cap)
self.max_clause_len = 0
self.curr_state, self.isSolved = self.parse_state_as_graph()
return self.curr_state
def step(self, decision, dummy=False):
# now when we drop variables, we store the mapping
# convert dropped var decision to the original decision id
if decision >= 0:
decision = self.decision_to_var_mapping[decision]
self.step_ctr += 1
if dummy:
self.S.step(MINISAT_DECISION_CONSTANT)
(
num_var,
_,
current_depth,
n_init_clauses,
num_restarts,
_,
) = self.S.getMetadata()
return (
None,
None,
self.S.getDone(),
{
"curr_problem": self.curr_problem,
"num_restarts": num_restarts,
"max_clause_len": self.max_clause_len,
},
)
if self.step_ctr > self.max_decisions_cap:
while not self.S.getDone():
self.S.step(MINISAT_DECISION_CONSTANT)
if self.max_cap_fill_buffer:
# return every next state when param is true
break
self.step_ctr += 1
else:
# if we are here, we are not filling the buffer and we need to reduce the counter by one to
# correct for the increment for the last state
self.step_ctr -= 1
else:
# TODO for debugging purposes, we need to add all the checks
# I removed this action_set checks for performance optimisation
# var_values = self.curr_state[0][:, 2]
# var_values = self.S.getAssignments()
# action_set = [
# a
# for v_idx, v in enumerate(var_values)
# for a in (v_idx * 2, v_idx * 2 + 1)
# if v == 2
# ]
if decision < 0: # this is to say that let minisat pick the decision
decision = MINISAT_DECISION_CONSTANT
elif (
decision % 2 == 0
): # this is to say that pick decision and assign positive value
decision = int(decision / 2 + 1)
else: # this is to say that pick decision and assign negative value
decision = 0 - int(decision / 2 + 1)
# if (decision == MINISAT_DECISION_CONSTANT) or orig_decision in action_set:
self.S.step(decision)
# else:
# raise ValueError("Illegal action")
self.curr_state, self.isSolved = self.parse_state_as_graph()
(
num_var,
_,
current_depth,
n_init_clauses,
num_restarts,
_,
) = self.S.getMetadata()
# if we fill the buffer, the rewards are the same as GQSAT was making decisions
if self.step_ctr > self.max_decisions_cap and not self.max_cap_fill_buffer:
# if we do not fill the buffer, but play till the end, we still need to penalize
# since GQSAT hasn't solved the problem
step_reward = -self.penalty_size
else:
step_reward = 0 if self.isSolved else -self.penalty_size
return (
self.curr_state,
step_reward,
self.isSolved,
{
"curr_problem": self.curr_problem,
"num_restarts": num_restarts,
"max_clause_len": self.max_clause_len,
},
)
def normalized_score(self, steps, problem):
pdir, pname = split(problem)
no_restart_steps, restart_steps = self.metadata[pdir][pname]
if self.compare_with_restarts:
return restart_steps / steps
else:
return no_restart_steps / steps
def get_dummy_state(self):
DUMMY_V = np.zeros((2, self.vertex_in_size), dtype=np.float32)
DUMMY_V[:, VAR_ID_IDX] = 1
DUMMY_STATE = (
DUMMY_V,
np.zeros((2, self.edge_in_size), dtype=np.float32),
np.eye(2, dtype=np.long),
np.zeros((1, self.global_in_size), dtype=np.float32),
)
return (
DUMMY_STATE[0],
DUMMY_STATE[1],
DUMMY_STATE[2],
np.zeros((1, self.global_in_size), dtype=np.float32),
)
| GraphQSat-main | minisat/minisat/gym/MiniSATEnv.py |
### The code in this file was originally copied from the Pytorch Geometric library and modified later:
### https://pytorch-geometric.readthedocs.io/en/latest/_modules/torch_geometric/nn/meta.html#MetaLayer
### Pytorch geometric license is below
# Copyright (c) 2019 Matthias Fey <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import torch
from torch.nn import Sequential as Seq, Linear as Lin, ReLU, LayerNorm
from torch_scatter import scatter_mean, scatter_add
from torch_geometric.nn.meta import MetaLayer
from torch import nn
import inspect
import yaml
import sys
class ModifiedMetaLayer(MetaLayer):
def forward(
self, x, edge_index, edge_attr=None, u=None, v_indices=None, e_indices=None
):
row, col = edge_index
if self.edge_model is not None:
edge_attr = self.edge_model(x[row], x[col], edge_attr, u, e_indices)
if self.node_model is not None:
x = self.node_model(x, edge_index, edge_attr, u, v_indices)
if self.global_model is not None:
u = self.global_model(x, edge_attr, u, v_indices, e_indices)
return x, edge_attr, u
class SatModel(torch.nn.Module):
def __init__(self, save_name=None):
super().__init__()
if save_name is not None:
self.save_to_yaml(save_name)
@classmethod
def save_to_yaml(cls, model_name):
# -2 is here because I want to know how many layers below lies the final child and get its init params.
# I do not need nn.Module and 'object'
# this WILL NOT work with multiple inheritance of the leaf children
frame, filename, line_number, function_name, lines, index = inspect.stack()[
len(cls.mro()) - 2
]
args, _, _, values = inspect.getargvalues(frame)
save_dict = {
"class_name": values["self"].__class__.__name__,
"call_args": {
k: values[k] for k in args if k != "self" and k != "save_name"
},
}
with open(model_name, "w") as f:
yaml.dump(save_dict, f, default_flow_style=False)
@staticmethod
def load_from_yaml(fname):
with open(fname, "r") as f:
res = yaml.load(f)
return getattr(sys.modules[__name__], res["class_name"])(**res["call_args"])
def get_mlp(
in_size,
out_size,
n_hidden,
hidden_size,
activation=nn.LeakyReLU,
activate_last=True,
layer_norm=True,
):
arch = []
l_in = in_size
for l_idx in range(n_hidden):
arch.append(Lin(l_in, hidden_size))
arch.append(activation())
l_in = hidden_size
arch.append(Lin(l_in, out_size))
if activate_last:
arch.append(activation())
if layer_norm:
arch.append(LayerNorm(out_size))
return Seq(*arch)
class GraphNet(SatModel):
def __init__(
self,
in_dims,
out_dims,
independent=False,
save_name=None,
e2v_agg="sum",
n_hidden=1,
hidden_size=64,
activation=ReLU,
layer_norm=True,
):
super().__init__(save_name)
self.e2v_agg = e2v_agg
if e2v_agg not in ["sum", "mean"]:
raise ValueError("Unknown aggregation function.")
v_in = in_dims[0]
e_in = in_dims[1]
u_in = in_dims[2]
v_out = out_dims[0]
e_out = out_dims[1]
u_out = out_dims[2]
if independent:
self.edge_mlp = get_mlp(
e_in,
e_out,
n_hidden,
hidden_size,
activation=activation,
layer_norm=layer_norm,
)
self.node_mlp = get_mlp(
v_in,
v_out,
n_hidden,
hidden_size,
activation=activation,
layer_norm=layer_norm,
)
self.global_mlp = get_mlp(
u_in,
u_out,
n_hidden,
hidden_size,
activation=activation,
layer_norm=layer_norm,
)
else:
self.edge_mlp = get_mlp(
e_in + 2 * v_in + u_in,
e_out,
n_hidden,
hidden_size,
activation=activation,
layer_norm=layer_norm,
)
self.node_mlp = get_mlp(
v_in + e_out + u_in,
v_out,
n_hidden,
hidden_size,
activation=activation,
layer_norm=layer_norm,
)
self.global_mlp = get_mlp(
u_in + v_out + e_out,
u_out,
n_hidden,
hidden_size,
activation=activation,
layer_norm=layer_norm,
)
self.independent = independent
def edge_model(src, dest, edge_attr, u=None, e_indices=None):
# source, target: [E, F_x], where E is the number of edges.
# edge_attr: [E, F_e]
if self.independent:
return self.edge_mlp(edge_attr)
out = torch.cat([src, dest, edge_attr, u[e_indices]], 1)
return self.edge_mlp(out)
def node_model(x, edge_index, edge_attr, u=None, v_indices=None):
# x: [N, F_x], where N is the number of nodes.
# edge_index: [2, E] with max entry N - 1.
# edge_attr: [E, F_e]
if self.independent:
return self.node_mlp(x)
row, col = edge_index
if self.e2v_agg == "sum":
out = scatter_add(edge_attr, row, dim=0, dim_size=x.size(0))
elif self.e2v_agg == "mean":
out = scatter_mean(edge_attr, row, dim=0, dim_size=x.size(0))
out = torch.cat([x, out, u[v_indices]], dim=1)
return self.node_mlp(out)
def global_model(x, edge_attr, u, v_indices, e_indices):
if self.independent:
return self.global_mlp(u)
out = torch.cat(
[
u,
scatter_mean(x, v_indices, dim=0),
scatter_mean(edge_attr, e_indices, dim=0),
],
dim=1,
)
return self.global_mlp(out)
self.op = ModifiedMetaLayer(edge_model, node_model, global_model)
def forward(
self, x, edge_index, edge_attr=None, u=None, v_indices=None, e_indices=None
):
return self.op(x, edge_index, edge_attr, u, v_indices, e_indices)
class EncoderCoreDecoder(SatModel):
def __init__(
self,
in_dims,
core_out_dims,
out_dims,
core_steps=1,
encoder_out_dims=None,
dec_out_dims=None,
save_name=None,
e2v_agg="sum",
n_hidden=1,
hidden_size=64,
activation=ReLU,
independent_block_layers=1,
):
super().__init__(save_name)
# all dims are tuples with (v,e) feature sizes
self.steps = core_steps
# if dec_out_dims is None, there will not be a decoder
self.in_dims = in_dims
self.core_out_dims = core_out_dims
self.dec_out_dims = dec_out_dims
self.layer_norm = True
self.encoder = None
if encoder_out_dims is not None:
self.encoder = GraphNet(
in_dims,
encoder_out_dims,
independent=True,
n_hidden=independent_block_layers,
hidden_size=hidden_size,
activation=activation,
layer_norm=self.layer_norm,
)
core_in_dims = in_dims if self.encoder is None else encoder_out_dims
self.core = GraphNet(
(
core_in_dims[0] + core_out_dims[0],
core_in_dims[1] + core_out_dims[1],
core_in_dims[2] + core_out_dims[2],
),
core_out_dims,
e2v_agg=e2v_agg,
n_hidden=n_hidden,
hidden_size=hidden_size,
activation=activation,
layer_norm=self.layer_norm,
)
if dec_out_dims is not None:
self.decoder = GraphNet(
core_out_dims,
dec_out_dims,
independent=True,
n_hidden=independent_block_layers,
hidden_size=hidden_size,
activation=activation,
layer_norm=self.layer_norm,
)
pre_out_dims = core_out_dims if self.decoder is None else dec_out_dims
self.vertex_out_transform = (
Lin(pre_out_dims[0], out_dims[0]) if out_dims[0] is not None else None
)
self.edge_out_transform = (
Lin(pre_out_dims[1], out_dims[1]) if out_dims[1] is not None else None
)
self.global_out_transform = (
Lin(pre_out_dims[2], out_dims[2]) if out_dims[2] is not None else None
)
def get_init_state(self, n_v, n_e, n_u, device):
return (
torch.zeros((n_v, self.core_out_dims[0]), device=device),
torch.zeros((n_e, self.core_out_dims[1]), device=device),
torch.zeros((n_u, self.core_out_dims[2]), device=device),
)
def forward(self, x, edge_index, edge_attr, u, v_indices=None, e_indices=None):
# if v_indices and e_indices are both None, then we have only one graph without a batch
if v_indices is None and e_indices is None:
v_indices = torch.zeros(x.shape[0], dtype=torch.long, device=x.device)
e_indices = torch.zeros(
edge_attr.shape[0], dtype=torch.long, device=edge_attr.device
)
if self.encoder is not None:
x, edge_attr, u = self.encoder(
x, edge_index, edge_attr, u, v_indices, e_indices
)
latent0 = (x, edge_attr, u)
latent = self.get_init_state(
x.shape[0], edge_attr.shape[0], u.shape[0], x.device
)
for st in range(self.steps):
latent = self.core(
torch.cat([latent0[0], latent[0]], dim=1),
edge_index,
torch.cat([latent0[1], latent[1]], dim=1),
torch.cat([latent0[2], latent[2]], dim=1),
v_indices,
e_indices,
)
if self.decoder is not None:
latent = self.decoder(
latent[0], edge_index, latent[1], latent[2], v_indices, e_indices
)
v_out = (
latent[0]
if self.vertex_out_transform is None
else self.vertex_out_transform(latent[0])
)
e_out = (
latent[1]
if self.edge_out_transform is None
else self.edge_out_transform(latent[1])
)
u_out = (
latent[2]
if self.global_out_transform is None
else self.global_out_transform(latent[2])
)
return v_out, e_out, u_out
| GraphQSat-main | gqsat/models.py |
# Copyright 2019-2020 Nvidia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | GraphQSat-main | gqsat/__init__.py |
# Copyright 2019-2020 Nvidia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from minisat.minisat.gym.MiniSATEnv import VAR_ID_IDX
class Agent(object):
def act(self, state):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class MiniSATAgent(Agent):
"""Use MiniSAT agent to solve the problem"""
def act(self, observation):
return -1 # this will make GymSolver use VSIDS to make a decision
def __str__(self):
return "<MiniSAT Agent>"
class RandomAgent(Agent):
"""Uniformly sample the action space"""
def __init__(self, action_space):
super().__init__()
self.action_space = action_space
def act(self, observation):
return self.action_space.sample()
def __str__(self):
return "<Random Agent>"
class GraphAgent:
def __init__(self, net, args):
self.net = net
self.device = args.device
self.debug = args.debug
self.qs_buffer = []
def forward(self, hist_buffer):
self.net.eval()
with torch.no_grad():
vdata, edata, conn, udata = hist_buffer[0]
vdata = torch.tensor(vdata, device=self.device)
edata = torch.tensor(edata, device=self.device)
udata = torch.tensor(udata, device=self.device)
conn = torch.tensor(conn, device=self.device)
vout, eout, _ = self.net(x=vdata, edge_index=conn, edge_attr=edata, u=udata)
res = vout[vdata[:, VAR_ID_IDX] == 1]
if self.debug:
self.qs_buffer.append(res.flatten().cpu().numpy())
return res
def act(self, hist_buffer, eps):
if np.random.random() < eps:
vars_to_decide = np.where(hist_buffer[-1][0][:, VAR_ID_IDX] == 1)[0]
acts = [a for v in vars_to_decide for a in (v * 2, v * 2 + 1)]
return int(np.random.choice(acts))
else:
qs = self.forward(hist_buffer)
return self.choose_actions(qs)
def choose_actions(self, qs):
return qs.flatten().argmax().item()
| GraphQSat-main | gqsat/agents.py |
# Copyright 2019-2020 Nvidia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
from torch_scatter import scatter_max
import torch
from torch.optim.lr_scheduler import StepLR
from minisat.minisat.gym.MiniSATEnv import VAR_ID_IDX
class GraphLearner:
def __init__(self, net, target, buffer, args):
self.net = net
self.target = target
self.target.eval()
self.optimizer = torch.optim.Adam(self.net.parameters(), lr=args.lr)
self.lr_scheduler = StepLR(
self.optimizer, args.lr_scheduler_frequency, args.lr_scheduler_gamma
)
if args.loss == "mse":
self.loss = nn.MSELoss()
elif args.loss == "huber":
self.loss = nn.SmoothL1Loss()
else:
raise ValueError("Unknown Loss function.")
self.bsize = args.bsize
self.gamma = args.gamma
self.buffer = buffer
self.target_update_freq = args.target_update_freq
self.step_ctr = 0
self.grad_clip = args.grad_clip
self.grad_clip_norm_type = args.grad_clip_norm_type
self.device = args.device
def get_qs(self, states):
vout, eout, _ = self.net(
x=states[0],
edge_index=states[2],
edge_attr=states[1],
v_indices=states[4],
e_indices=states[5],
u=states[6],
)
return vout[states[0][:, VAR_ID_IDX] == 1], states[3]
def get_target_qs(self, states):
vout, eout, _ = self.target(
x=states[0],
edge_index=states[2],
edge_attr=states[1],
v_indices=states[4],
e_indices=states[5],
u=states[6],
)
return vout[states[0][:, VAR_ID_IDX] == 1].detach(), states[3]
def step(self):
s, a, r, s_next, nonterminals = self.buffer.sample(self.bsize)
# calculate the targets first to optimize the GPU memory
with torch.no_grad():
target_qs, target_vertex_sizes = self.get_target_qs(s_next)
idx_for_scatter = [
[i] * el.item() * 2 for i, el in enumerate(target_vertex_sizes)
]
idx_for_scatter = torch.tensor(
[el for subl in idx_for_scatter for el in subl],
dtype=torch.long,
device=self.device,
).flatten()
target_qs = scatter_max(target_qs.flatten(), idx_for_scatter, dim=0)[0]
targets = r + nonterminals * self.gamma * target_qs
self.net.train()
qs, var_vertex_sizes = self.get_qs(s)
# qs.shape[1] values per node (same num of actions per node)
gather_idx = (var_vertex_sizes * qs.shape[1]).cumsum(0).roll(1)
gather_idx[0] = 0
qs = qs.flatten()[gather_idx + a]
loss = self.loss(qs, targets)
self.optimizer.zero_grad()
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(
self.net.parameters(), self.grad_clip, norm_type=self.grad_clip_norm_type
)
self.optimizer.step()
if not self.step_ctr % self.target_update_freq:
self.target.load_state_dict(self.net.state_dict())
self.step_ctr += 1
# I do not know a better solution for getting the lr from the scheduler.
# This will fail for different lrs for different layers.
lr_for_the_update = self.lr_scheduler.get_lr()[0]
self.lr_scheduler.step()
return {
"loss": loss.item(),
"grad_norm": grad_norm,
"lr": lr_for_the_update,
"average_q": qs.mean(),
}
| GraphQSat-main | gqsat/learners.py |
# Copyright 2019-2020 Nvidia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
import numpy as np
from minisat.minisat.gym.MiniSATEnv import VAR_ID_IDX
import time
import pickle
from collections import deque
import os
import sys
def add_common_options(parser):
parser.add_argument(
"--with_restarts",
action="store_true",
help="Do restarts in Minisat if set",
dest="with_restarts",
)
parser.add_argument(
"--no_restarts",
action="store_false",
help="Do not do restarts in Minisat if set",
dest="with_restarts",
)
parser.set_defaults(with_restarts=False)
parser.add_argument(
"--compare_with_restarts",
action="store_true",
help="Compare to MiniSAT with restarts",
dest="compare_with_restarts",
)
parser.add_argument(
"--compare_no_restarts",
action="store_false",
help="Compare to MiniSAT without restarts",
dest="compare_with_restarts",
)
parser.set_defaults(compare_with_restarts=False)
parser.add_argument(
"--test_max_data_limit_per_set",
type=int,
help="Max number of problems to load from the dataset for the env. EVAL/TEST mode.",
default=None,
)
parser.add_argument(
"--test_time_max_decisions_allowed",
type=int,
help="Number of steps the agent will act from the beginning of the episode when evaluating. "
"Otherwise it will return -1 asking minisat to make a decision. "
"Float because I want infinity by default (no minisat at all)",
)
parser.add_argument("--env-name", type=str, default="sat-v0", help="Environment.")
parser.add_argument(
"--debug",
action="store_true",
help="Modify the flow of the script, i.e. run for less iterations",
)
parser.add_argument(
"--model-dir",
help="Path to the folder with checkpoints and model.yaml file",
type=str,
)
parser.add_argument(
"--model-checkpoint",
help="Filename of the checkpoint, relative to the --model-dir param.",
type=str,
)
parser.add_argument("--logdir", type=str, help="Dir for writing the logs")
parser.add_argument(
"--eps-final", type=float, default=0.1, help="Final epsilon value."
)
parser.add_argument(
"--eval-problems-paths",
help="Path to the problem dataset for evaluation",
type=str,
)
parser.add_argument(
"--train_max_data_limit_per_set",
type=int,
help="Max number of problems to load from the dataset for the env. TRAIN mode.",
default=None,
)
parser.add_argument("--no-cuda", action="store_true", help="Use the cpu")
parser.add_argument(
"--dump_timings_path",
type=str,
help="If not empty, defines the directory to save the wallclock time performance",
)
def build_eval_argparser():
# for eval we want to use mostly the args used for training these will override those used for training, be careful
parser = argparse.ArgumentParser()
parser.add_argument(
"--core-steps",
type=int,
help="Number of message passing iterations. "
"\-1 for the same number as used for training",
default=-1,
)
parser.add_argument(
"--eval-time-limit",
type=int,
help="Time limit for evaluation. If it takes more, return what it has and quit eval. In seconds.",
)
add_common_options(parser)
return parser
def build_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("--lr", type=float, default=0.0001, help="learning rate")
parser.add_argument(
"--bsize", type=int, default=128, help="Batch size for the learning step"
)
parser.add_argument(
"--eps-init", type=float, default=1.0, help="Exploration epsilon"
)
parser.add_argument(
"--expert-exploration-prob",
type=float,
default=0.0,
help="When do an exploratory action, choose minisat action with this prob. Otherwise choose random.",
)
parser.add_argument("--gamma", type=float, default=0.99, help="Discounting")
parser.add_argument(
"--eps-decay-steps",
type=int,
default=5000,
help="How many transitions to decay for.",
)
parser.add_argument(
"--target-update-freq",
type=int,
default=1000,
help="How often to copy the parameters to traget.",
)
parser.add_argument(
"--batch-updates",
type=int,
default=1000000000,
help="num of batch updates to train for",
)
parser.add_argument(
"--buffer-size", type=int, default=10000, help="Memory Replay size."
)
parser.add_argument(
"--history-len", type=int, default=1, help="Frames to stack for the input"
)
parser.add_argument(
"--init-exploration-steps",
type=int,
default=100,
help="Start learning after this number of transitions.",
)
parser.add_argument(
"--step-freq", type=int, default=4, help="Step every k-th frame"
)
parser.add_argument("--loss", default="mse", help="Loss to use: mse|huber")
parser.add_argument(
"--save-freq",
default=100000,
type=int,
help="How often to save the model. Measured in minibatch updates.",
)
parser.add_argument("--train-problems-paths", type=str)
parser.add_argument(
"--eval-freq",
default=10000,
type=int,
help="How often to evaluate. Measured in minibatch updates.",
)
parser.add_argument(
"--eval-time-limit",
default=3600,
type=int,
help="Time limit for evaluation. If it takes more, return what it has and quit eval. In seconds.",
)
parser.add_argument(
"--status-dict-path", help="Path to the saved status dict", type=str
)
parser.add_argument(
"--core-steps", type=int, default=8, help="Number of message passing iterations"
)
parser.add_argument(
"--priority_alpha", type=float, default=0.5, help="Alpha in the PER"
)
parser.add_argument(
"--priority_beta",
type=float,
default=0.5,
help="Initial value of the IS weight in PER. Annealed to 1 during training.",
)
parser.add_argument(
"--opt", default="sgd", help="Optimizer to use: sgd|adam|rmsprop"
)
parser.add_argument(
"--e2v-aggregator",
default="sum",
help="Aggregation to use for e->v. Can be sum|mean",
)
parser.add_argument(
"--n_hidden", type=int, default=1, help="Number of hidden layers for all MLPs."
)
parser.add_argument(
"--hidden_size",
type=int,
default=64,
help="Number of units in MLP hidden layers.",
)
parser.add_argument(
"--decoder_v_out_size",
type=int,
default=32,
help="Vertex size of the decoder output.",
)
parser.add_argument(
"--decoder_e_out_size",
type=int,
default=1,
help="Edge size of the decoder output.",
)
parser.add_argument(
"--decoder_g_out_size",
type=int,
default=1,
help="Global attr size of the decoder output.",
)
parser.add_argument(
"--encoder_v_out_size",
type=int,
default=32,
help="Vertex size of the decoder output.",
)
parser.add_argument(
"--encoder_e_out_size",
type=int,
default=32,
help="Edge size of the decoder output.",
)
parser.add_argument(
"--encoder_g_out_size",
type=int,
default=32,
help="Global attr size of the decoder output.",
)
parser.add_argument(
"--core_v_out_size",
type=int,
default=64,
help="Vertex size of the decoder output.",
)
parser.add_argument(
"--core_e_out_size",
type=int,
default=64,
help="Edge size of the decoder output.",
)
parser.add_argument(
"--core_g_out_size",
type=int,
default=32,
help="Global attr size of the decoder output.",
)
parser.add_argument(
"--independent_block_layers",
type=int,
default=0,
help="Number of hidden layers in the encoder/decoder",
)
# example from https://stackoverflow.com/a/31347222
parser.add_argument(
"--eval_separately_on_each",
dest="eval_separately_on_each",
help="If you provide multiple eval datasets e.g. path1:path2, it will "
"evaluate separately on each and tensorboard/metric them seaprately.",
action="store_true",
)
parser.add_argument(
"--no_eval_separately_on_each",
dest="eval_separately_on_each",
help="If you provide multiple eval datasets e.g. path1:path2, it will "
"evaluate JOINTLY on them",
action="store_false",
)
parser.set_defaults(eval_separately_on_each=True)
parser.add_argument(
"--train_time_max_decisions_allowed",
type=int,
default=sys.maxsize,
help="Number of steps the agent will act from the beginning of the episode when training. "
"Otherwise it will return -1 asking minisat to make a decision. "
"Float because I want infinity by default (no minisat at all)",
)
parser.add_argument(
"--activation",
type=str,
default="relu",
choices=["relu", "leaky_relu", "tanh"],
help="Activation function",
)
parser.add_argument(
"--lr_scheduler_gamma",
type=float,
default=1.0,
help="Scheduler multiplies lr by this number each LR_SCHEDULER_FREQUENCY number of steps",
)
parser.add_argument(
"--lr_scheduler_frequency",
type=int,
default=1000,
help="Every this number of steps, we multiply the lr by LR_SCHEDULER_GAMMA",
)
parser.add_argument(
"--grad_clip", type=float, default=1.0, help="Clip gradient by norm."
)
parser.add_argument(
"--grad_clip_norm_type",
type=float,
default=2,
help='Which norm to use when clipping. Use float("inf") to use maxnorm.',
)
parser.add_argument(
"--max_cap_fill_buffer",
dest="max_cap_fill_buffer",
help="If true, when cap is surpassed, use -1 and return each state",
action="store_true",
)
parser.add_argument(
"--no_max_cap_fill_buffer",
dest="max_cap_fill_buffer",
help="If this is on, when cap is surpassed, play till the end and return last state only",
action="store_false",
)
parser.set_defaults(max_cap_fill_buffer=False)
parser.add_argument(
"--penalty_size",
type=float,
default=0.0001,
help="amount of penalty to apply each step",
)
add_common_options(parser)
return parser
def batch_graphs(graphs, device):
# we treat a batch of graphs as a one mega graph with several components disconnected from each other
# we can simply concat the data and adjust the connectivity indices
vertex_sizes = torch.tensor([el[0].shape[0] for el in graphs], dtype=torch.long)
edge_sizes = torch.tensor([el[1].shape[0] for el in graphs], dtype=torch.long)
vcumsize = np.cumsum(vertex_sizes)
variable_nodes_sizes = torch.tensor(
[el[0][el[0][:, VAR_ID_IDX] == 1].shape[0] for el in graphs],
dtype=torch.long,
device=device,
)
vbatched = torch.cat([el[0] for el in graphs])
ebatched = torch.cat([el[1] for el in graphs])
gbatched = torch.cat([el[3] for el in graphs]) # 3 is for global
conn = torch.cat([el[2] for el in graphs], dim=1)
conn_adjuster = vcumsize.roll(1)
conn_adjuster[0] = 0
conn_adjuster = torch.tensor(
np.concatenate(
[edge_sizes[vidx].item() * [el] for vidx, el in enumerate(conn_adjuster)]
),
dtype=torch.long,
device=device,
)
conn = conn + conn_adjuster.expand(2, -1)
v_graph_belonging = torch.tensor(
np.concatenate([el.item() * [gidx] for gidx, el in enumerate(vertex_sizes)]),
dtype=torch.long,
device=device,
)
e_graph_belonging = torch.tensor(
np.concatenate([el.item() * [gidx] for gidx, el in enumerate(edge_sizes)]),
dtype=torch.long,
device=device,
)
return (
vbatched,
ebatched,
conn,
variable_nodes_sizes,
v_graph_belonging,
e_graph_belonging,
gbatched,
)
import gym, minisat
def make_env(problems_paths, args, test_mode=False):
max_data_limit_per_set = None
if test_mode and hasattr(args, "test_max_data_limit_per_set"):
max_data_limit_per_set = args.test_max_data_limit_per_set
if not test_mode and hasattr(args, "train_max_data_limit_per_set"):
max_data_limit_per_set = args.train_max_data_limit_per_set
return gym.make(
args.env_name,
problems_paths=problems_paths,
args=args,
test_mode=test_mode,
max_cap_fill_buffer=False if test_mode else args.max_cap_fill_buffer,
penalty_size=args.penalty_size if hasattr(args, "penalty_size") else None,
with_restarts=args.with_restarts if hasattr(args, "with_restarts") else None,
compare_with_restarts=args.compare_with_restarts
if hasattr(args, "compare_with_restarts")
else None,
max_data_limit_per_set=max_data_limit_per_set,
)
def evaluate(agent, args, include_train_set=False):
agent.net.eval()
problem_sets = (
[args.eval_problems_paths]
if not args.eval_separately_on_each
else [k for k in args.eval_problems_paths.split(":")]
)
if include_train_set:
problem_sets.extend(
[args.train_problems_paths]
if not args.eval_separately_on_each
else [k for k in args.train_problems_paths.split(":")]
)
res = {}
st_time = time.time()
print("Starting evaluation. Fasten your seat belts!")
total_iters_ours = 0
total_iters_minisat = 0
for pset in problem_sets:
eval_env = make_env(pset, args, test_mode=True)
DEBUG_ROLLOUTS = None
pr = 0
walltime = {}
scores = {}
with torch.no_grad():
while eval_env.test_to != 0 or pr == 0:
p_st_time = time.time()
obs = eval_env.reset(
max_decisions_cap=args.test_time_max_decisions_allowed
)
done = eval_env.isSolved
while not done:
# if time.time() - st_time > args.eval_time_limit:
# print(
# "Eval time limit surpassed. Returning what I have, and quitting the evaluation."
# )
# return res, eval_env.metadata, False
action = agent.act([obs], eps=0)
obs, _, done, _ = eval_env.step(action)
walltime[eval_env.curr_problem] = time.time() - p_st_time
print(
f"It took {walltime[eval_env.curr_problem]} seconds to solve a problem."
)
sctr = 1 if eval_env.step_ctr == 0 else eval_env.step_ctr
ns = eval_env.normalized_score(sctr, eval_env.curr_problem)
print(f"Evaluation episode {pr+1} is over. Your score is {ns}.")
total_iters_ours += sctr
pdir, pname = os.path.split(eval_env.curr_problem)
total_iters_minisat += eval_env.metadata[pdir][pname][1]
scores[eval_env.curr_problem] = ns
pr += 1
if DEBUG_ROLLOUTS is not None and pr >= DEBUG_ROLLOUTS:
break
print(
f"Evaluation is done. Median relative score: {np.nanmedian([el for el in scores.values()]):.2f}, "
f"mean relative score: {np.mean([el for el in scores.values()]):.2f}, "
f"iters frac: {total_iters_minisat/total_iters_ours:.2f}"
)
res[pset] = scores
if args.dump_timings_path:
target_fname = (
os.path.join(
args.dump_timings_path,
args.eval_problems_paths.replace("/", "_")
+ f"_cap_{args.test_time_max_decisions_allowed}",
)
+ ".pkl"
)
with open(target_fname, "wb") as f:
pickle.dump(walltime, f)
agent.net.train()
return (
res,
{
"metadata": eval_env.metadata,
"iters_frac": total_iters_minisat / total_iters_ours,
"mean_score": np.mean([el for el in scores.values()]),
"median_score": np.median([el for el in scores.values()]),
},
False,
)
| GraphQSat-main | gqsat/utils.py |
# Copyright 2019-2020 Nvidia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from gqsat.utils import batch_graphs
class ReplayGraphBuffer:
def __init__(self, args, size):
self.ctr = 0
self.full = False
self.size = size
self.device = args.device
self.dones = torch.ones(size)
self.rewards = torch.zeros(size)
self.actions = torch.zeros(size, dtype=torch.long)
# dtype=object allows to store references to objects of arbitrary size
self.observations = np.zeros((size, 4), dtype=object)
def add_transition(self, obs, a, r_next, done_next):
self.dones[self.ctr] = int(done_next)
self.rewards[self.ctr] = r_next
self.actions[self.ctr] = a
# should be vertex_data, edge_data, connectivity, global
for el_idx, el in enumerate(obs):
self.observations[self.ctr][el_idx] = el
if (self.ctr + 1) % self.size == 0:
self.ctr = 0
self.full = True
else:
self.ctr += 1
def sample(self, batch_size):
# to be able to grab the next, we use -1
curr_size = self.ctr - 1 if not self.full else self.size - 1
idx = np.random.choice(range(0, curr_size), batch_size)
return (
self.batch(self.observations[idx]),
self.actions[idx].to(self.device),
self.rewards[idx].to(self.device),
self.batch(self.observations[idx + 1]),
1.0 - self.dones[idx].to(self.device),
)
def batch(self, obs):
return batch_graphs(
[[torch.tensor(i, device=self.device) for i in el] for el in obs],
self.device,
)
| GraphQSat-main | gqsat/buffer.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT YOLOv4 Tiny example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/yolo_v4_tiny/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Script to prepare train/val dataset for LPRNet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Prepare train/val dataset for LPRNet tutorial')
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory to OpenALPR's benchmark end2end us license plates."
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to TLT train/eval dataset."
)
return parser.parse_args(args)
def prepare_data(input_dir, img_list, output_dir):
"""Crop the license plates from the orginal images."""
target_img_path = os.path.join(output_dir, "image")
target_label_path = os.path.join(output_dir, "label")
if not os.path.exists(target_img_path):
os.makedirs(target_img_path)
if not os.path.exists(target_label_path):
os.makedirs(target_label_path)
for img_name in img_list:
img_path = os.path.join(input_dir, img_name)
label_path = os.path.join(input_dir,
img_name.split(".")[0] + ".txt")
img = cv2.imread(img_path)
with open(label_path, "r") as f:
label_lines = f.readlines()
assert len(label_lines) == 1
label_items = label_lines[0].split()
assert img_name == label_items[0]
xmin = int(label_items[1])
ymin = int(label_items[2])
width = int(label_items[3])
xmax = xmin + width
height = int(label_items[4])
ymax = ymin + height
lp = label_items[5]
cropped_lp = img[ymin:ymax, xmin:xmax, :]
# save img and label
cv2.imwrite(os.path.join(target_img_path, img_name), cropped_lp)
with open(os.path.join(target_label_path,
img_name.split(".")[0] + ".txt"), "w") as f:
f.write(lp)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
img_files = []
for file_name in os.listdir(args.input_dir):
if file_name.split(".")[-1] == "jpg":
img_files.append(file_name)
total_cnt = len(img_files)
train_cnt = int(total_cnt/2)
val_cnt = total_cnt - train_cnt
train_img_list = img_files[0:train_cnt]
val_img_list = img_files[train_cnt + 1:]
print("Total {} samples in benchmark dataset".format(total_cnt))
print("{} for train and {} for val".format(train_cnt, val_cnt))
train_dir = os.path.join(args.output_dir, "train")
prepare_data(args.input_dir, train_img_list, train_dir)
val_dir = os.path.join(args.output_dir, "val")
prepare_data(args.input_dir, val_img_list, val_dir)
if __name__ == "__main__":
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/lprnet/preprocess_openalpr_benchmark.py |
anthropometic_3D_landmarks = [[0.463302314, 0.499617226, 2.824620485],
[0.433904979, 0.505937393, 2.644347876],
[0.39794359, 0.54824712, 2.468309015],
[0.347156364, 0.608686736, 2.301015556],
[0.261349984, 0.708693571, 2.164755151],
[0.149679065, 0.846413877, 2.038914531],
[0.020857666, 1.000756979, 1.96136412],
[-0.124583332, 1.132211104, 1.890638679],
[-0.332052324, 1.199630469, 1.870016173],
[-0.521015424, 1.142547488, 1.891351938],
[-0.659920681, 1.03973259, 1.961001828],
[-0.797577906, 0.86913183, 2.057442099],
[-0.912351593, 0.80159378, 2.188996568],
[-0.994247332, 0.707895722, 2.321974874],
[-1.045988813, 0.646902599, 2.488486946],
[-1.07838694, 0.581114346, 2.655235291],
[-1.096934579, 0.572226902, 2.832175642],
[0.306385975, 0.988582142, 3.263724804],
[0.238308419, 1.087680236, 3.319453031],
[0.126437213, 1.167731345, 3.357794225],
[-0.003806859, 1.229740798, 3.335844517],
[-0.126166103, 1.249807343, 3.300820023],
[-0.483642399, 1.261558414, 3.320731789],
[-0.594755229, 1.244249567, 3.356189996],
[-0.709202692, 1.193373024, 3.370144337],
[-0.830934606, 1.118067637, 3.342299908],
[-0.911886856, 1.022390895, 3.286355436],
[-0.31427322, 1.28056182, 3.116396815],
[-0.312322683, 1.355140246, 3.0163863],
[-0.310799612, 1.452512272, 2.899074256],
[-0.315011633, 1.537534878, 2.777368128],
[-0.125134574, 1.256734014, 2.648497283],
[-0.216964348, 1.329175174, 2.637426972],
[-0.310138743, 1.389713913, 2.611324817],
[-0.414820289, 1.334226191, 2.642694384],
[-0.513519868, 1.265409455, 2.656487644],
[0.196186462, 1.035192601, 3.090169013],
[0.126957612, 1.119997166, 3.156619817],
[-0.027273278, 1.136058375, 3.157634437],
[-0.100839235, 1.102722079, 3.088872135],
[-0.021972392, 1.132983871, 3.03742063],
[0.127623449, 1.10177733, 3.034567326],
[-0.520080116, 1.100469962, 3.095452815],
[-0.586792942, 1.133374192, 3.17071414],
[-0.745613977, 1.125613876, 3.170327187],
[-0.819571108, 1.0455795, 3.105413705],
[-0.744035766, 1.112881519, 3.048785478],
[-0.589515403, 1.131952509, 3.048771381],
[0.02306129, 1.158300541, 2.368660092],
[-0.080868714, 1.272260003, 2.42186287],
[-0.181587959, 1.345463172, 2.448015809],
[-0.312187512, 1.385880813, 2.454812676],
[-0.452711696, 1.3551175, 2.454890877],
[-0.558453415, 1.285798028, 2.426469952],
[-0.664228875, 1.164380819, 2.386185408],
[-0.571288593, 1.24077671, 2.312964618],
[-0.466966776, 1.311935268, 2.253052473],
[-0.318221454, 1.336186148, 2.228322476],
[-0.170658994, 1.297508962, 2.247573286],
[-0.071347391, 1.225932129, 2.294786155],
[-0.053068627, 1.196410157, 2.366681814],
[-0.16933859, 1.303018831, 2.379662782],
[-0.31283252, 1.344644643, 2.37743327],
[-0.453441203, 1.322521709, 2.388329715],
[-0.585467342, 1.213929333, 2.378763407],
[-0.473488985, 1.302666686, 2.342419624],
[-0.311414156, 1.341170423, 2.319458067],
[-0.16669072, 1.297568522, 2.336282581]] | tao_tutorials-main | notebooks/tao_launcher_starter_kit/gazenet/face_model_nv68.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
"""GazeNet visualization util scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import os
import numpy as np
import json
import face_model_nv68
MIN_LANDMARK_FOR_PNP = 4
NUM_JSON_LANDMARKS = 104
def get_landmarks_info():
"""Get landmarks information.
Return:
landmarks_3D_selected (array): 3D landmarks for a face model
landmarks_2D_set_selected (array): 2D landmarks index from the labeling
le_center (array): left eye center
re_center (array): right eye center
"""
anthropometic_3D_landmarks = face_model_nv68.anthropometic_3D_landmarks
anthropometic_3D_landmarks = np.asarray(anthropometic_3D_landmarks, dtype=float)
anthropometic_3D_landmarks[:, [1, 2]] = anthropometic_3D_landmarks[:, [2, 1]]
anthropometic_3D_landmarks *= -1 # Inverse X, Y, Z axis.
le_outer = anthropometic_3D_landmarks[45]
le_inner = anthropometic_3D_landmarks[42]
re_outer = anthropometic_3D_landmarks[36]
re_inner = anthropometic_3D_landmarks[39]
le_center = (le_inner + le_outer) / 2.0
le_center = np.reshape(le_center, (1, 3))
re_center = (re_inner + re_outer) / 2.0
re_center = np.reshape(re_center, (1, 3))
face_model_scaling = 65.0 / np.linalg.norm(le_center - re_center)
anthropometic_3D_landmarks *= face_model_scaling
le_center *= face_model_scaling
re_center *= face_model_scaling
# Selected landmarks to solve the pnp algorithm
landmarks_2D_set_selected = []
landmarks_3D_selected = []
landmarks_2D_set_selected = [26, 22, 21, 17, 45, 42, 39, 36, 35, 31, 54, 48, 57, 8]
for ind in landmarks_2D_set_selected:
landmarks_3D_selected.append(anthropometic_3D_landmarks[ind])
landmarks_3D_selected = np.asarray(landmarks_3D_selected, dtype=float)
return landmarks_3D_selected, landmarks_2D_set_selected, le_center, re_center
def load_cam_intrinsics(config_path):
"""Load camera intrinsic parameters.
Args:
config_path: path to the config folder
Return:
cam (array): camera matrix
dist (array): distortion parameters
"""
filename = os.path.join(config_path, 'camera_parameters.txt')
assert os.path.isfile(filename)
with open(filename, 'r') as dataf:
lines = dataf.readlines()
cam = []
dist = []
idx = 0
while idx < 3:
cam.append(list(map(float, lines[idx].split(','))))
idx += 1
# to skip the blank line
idx += 1
while idx < len(lines):
dist.append(float(lines[idx]))
idx += 1
assert len(cam) == 3
assert len(dist) >= 5
return np.asarray(cam, dtype=np.float32), np.asarray(dist, dtype=np.float32)
def get_landmarks_dict(json_file_folder):
"""Get landmarks dictionary.
Args:
json_file_folder: input json file folder
Return:
landmarks_dict (dict): dictionary of the landmarks from data factory labels in json
"""
assert os.path.isdir(json_file_folder)
landmarks_dict = dict()
json_list = os.listdir(json_file_folder)
for json_file in json_list:
json_file_name = os.path.join(json_file_folder, json_file)
try:
with open(json_file_name, 'r') as f_json:
json_reader = json.load(f_json)
except Exception:
print('Json file improperly formatted')
for frame_json in json_reader:
if 'annotations' not in frame_json:
continue
frame_name = frame_json['filename']
lm = extract_landmarks_from_json(frame_json['annotations'])
landmarks_dict[frame_name] = lm
return landmarks_dict
def get_landmarks_correpondence(landmarks_3D, landmarks_2D, landmarks_2D_set):
"""Get corresponding 2D and 3D landmarks
Prepare landmarks before computing the PnP
Args:
landmarks_3D (array): 3D landmarks on a face model
landmarks_2D (array): 2D landmarks from data factory labels
landmarks_2D_set (array): 2D landmarks index that corresponds to the 3D landmarks
Return:
landmarks_2D_final (array): filtered 2D landmarks
landmarks_3D_final (array): filtered 3D landmarks
"""
landmarks_2D_final = []
landmarks_3D_final = []
for i in range(len(landmarks_2D_set)):
landmarks_2D_final.append(landmarks_2D[landmarks_2D_set[i]])
landmarks_3D_final.append(landmarks_3D[i])
landmarks_2D_final = np.asarray(landmarks_2D_final, dtype=float)
landmarks_3D_final = np.asarray(landmarks_3D_final, dtype=float)
return landmarks_2D_final, landmarks_3D_final
def compute_EPnP(points_3D, points_2D, camera_mat, distortion_coeffs):
"""Compute rotation and translation of head
Args:
points_3D (array): 3D landmarks points
points_2D (array): 2D landmarks points
camera_mat (array): camera matrix
distortion_coeffs (array): camera distortion
Return:
retval (int): return status value from OpenCV solvePnP
rvec (array): rotation of the head
tvec (int): translation of the head
"""
points_2D = np.expand_dims(points_2D, axis=1)
points_3D = np.expand_dims(points_3D, axis=1)
# Refer to this for the solvePnP function:
# https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga549c2075fac14829ff4a58bc931c033d
retval, rvec, tvec = cv2.solvePnP(points_3D, points_2D, camera_mat,\
distortion_coeffs, None, None, False, 1)
return retval, rvec, tvec
def projectObject2Camera(object_coords, rot_mat, tvec):
"""Project object coordinates (WCS) to camera coordinates (CCS)
Args:
object_coords (array): object coordinates
rot_mat (array): rotation matrix
tvec (array): translation vector
Return:
cam_coords (array): camera coordinate
"""
RPw = rot_mat.dot(object_coords.transpose())
cam_coords = RPw + tvec
return cam_coords
def projectCamera2Image(cam_coords, cam_mat):
"""Project cam coordinates (CCS) to image coordinates (ICS)
Args:
cam_coords (array): camera coordinates
cam_mat (array): camera matrix
Return:
image_coords (array): image coordinate
"""
image_coords = np.matmul(cam_mat, cam_coords)
assert image_coords[2] > 0
image_coords /= image_coords[2]
return image_coords
def extract_fiducial_points(chunk):
"""Extract fiducial landmarks points from a chunk in the label file
Args:
chunk (array): a chunk in the json labeling file
Return:
x (array): 2D landmarks x coordinate
y (array): 2D landmarks y coordinate
occlusions (array): occlusions arrays
num_landmarks (int): number of landmarks points
"""
x = [-1] * NUM_JSON_LANDMARKS
y = [-1] * NUM_JSON_LANDMARKS
occlusions = [-1] * NUM_JSON_LANDMARKS
num_landmarks = None
for point in (point for point in chunk if ('class' not in point and 'version' not in point)):
try:
number = int(''.join(c for c in str(point) if c.isdigit()))
if num_landmarks is None or number > num_landmarks:
num_landmarks = number
if 'x' in str(point).lower() and number <= NUM_JSON_LANDMARKS:
x[number - 1] = str(np.float(chunk[point]))
if 'y' in str(point).lower() and number <= NUM_JSON_LANDMARKS:
y[number - 1] = str(np.float(chunk[point]))
if ('occ' in str(point).lower() and number <= NUM_JSON_LANDMARKS and chunk[point]):
occlusions[number - 1] = 1
for index in range(num_landmarks):
if occlusions[index] == -1:
occlusions[index] = 0
except Exception as e:
print('Exception occured during parsing')
print(str(e))
print(str(point))
return x, y, occlusions, num_landmarks
def extract_landmarks_from_json(json_frame_dict):
"""Extract landmarks form a label file
Args:
json_frame_dict (dict): dictionary of a json label file
Return:
landmarks_2D (array): if successful, return 2D facial landmarks
otherwise, return None
"""
for chunk in json_frame_dict:
if 'class' not in chunk:
continue
chunk_class = str(chunk['class']).lower()
if chunk_class == 'fiducialpoints':
x, y, occlusions, num_landmarks = extract_fiducial_points(chunk)
landmarks_2D = np.asarray([x, y], dtype=np.float).T
return landmarks_2D
return None
def load_frame_gray(frame_path):
"""Load a frame and convert to grayscale
Args:
frame_path: parth to the image
Return:
frame (array): if successful, return a loaded frame in gray scale
otherwise, return None
"""
if os.path.isfile(frame_path):
frame = cv2.imread(frame_path, 0)
assert frame.shape[0] > 0 and frame.shape[1] > 0
return frame
else:
print(frame_path, 'does not exist!')
return None
def visualize_frame(frame_path, landmarks_2D, cam_coord, calib):
"""visualize gaze vector in a frame
Args:
frame_path (array): a chunk in the json labeling file
landmarks_2D (array): 2D landmarks
cam_coord (array): camera coordinate
calib (list): camera calibration parameters
"""
# Eliminate occluded landmarks
landmarks_3D_selected, landmarks_2D_set_selected, le_center, re_center = get_landmarks_info()
landmarks_2D_final, landmarks_3D_final = get_landmarks_correpondence(landmarks_3D_selected, landmarks_2D,
landmarks_2D_set_selected)
# Compute PnP between the generic 3D face model landmarks (WCS) and 2D landmarks (ICS)
# Rotation and translation vectors for 3D-to-2D transformation
camera_mat = calib['cam']
distortion_coeffs = calib['dist']
_, rvec, tvec = compute_EPnP(landmarks_3D_final, landmarks_2D_final, camera_mat, distortion_coeffs)
# Compute head pose angles (euler)
rot_mat = cv2.Rodrigues(rvec)[0]
leye_cam_mm = projectObject2Camera(le_center, rot_mat, tvec).reshape(-1)
leye_gaze_vec = cam_coord - leye_cam_mm
leye_gaze_vec /= np.sqrt(leye_gaze_vec[0] ** 2 + leye_gaze_vec[1] ** 2 + leye_gaze_vec[2] ** 2)
reye_cam_mm = projectObject2Camera(re_center, rot_mat, tvec).reshape(-1)
reye_gaze_vec = cam_coord - reye_cam_mm
reye_gaze_vec /= np.sqrt(reye_gaze_vec[0] ** 2 + reye_gaze_vec[1] ** 2 + reye_gaze_vec[2] ** 2)
le_pc_image_px = (landmarks_2D[42] + landmarks_2D[45]) / 2.0
le_pc_image_px_hom = np.ones(shape=(3, 1), dtype=float)
le_pc_image_px_hom[0] = le_pc_image_px[0]
le_pc_image_px_hom[1] = le_pc_image_px[1]
re_pc_image_px = (landmarks_2D[36] + landmarks_2D[39]) / 2.0
re_pc_image_px_hom = np.ones(shape=(3, 1), dtype=float)
re_pc_image_px_hom[0] = re_pc_image_px[0]
re_pc_image_px_hom[1] = re_pc_image_px[1]
# Draw gaze
# gaze_vector_length: define the length of the line going out off the eyes in visualization
gaze_vector_length = 100
gaze_le_ap_cam_mm = leye_cam_mm + (leye_gaze_vec * gaze_vector_length)
gaze_le_ap_pc_px = projectCamera2Image(gaze_le_ap_cam_mm, camera_mat)
le_pc_image_px = [int(le_pc_image_px[0]), int(le_pc_image_px[1])]
gaze_le_ap_pc_px = [int(gaze_le_ap_pc_px[0]), int(gaze_le_ap_pc_px[1])]
gaze_re_ap_cam_mm = reye_cam_mm + (reye_gaze_vec * gaze_vector_length)
gaze_re_ap_pc_px = projectCamera2Image(gaze_re_ap_cam_mm, camera_mat)
re_pc_image_px = [int(re_pc_image_px[0]), int(re_pc_image_px[1])]
gaze_re_ap_pc_px = [int(gaze_re_ap_pc_px[0]), int(gaze_re_ap_pc_px[1])]
display_frame = load_frame_gray(frame_path)
display_frame = cv2.cvtColor(display_frame,cv2.COLOR_GRAY2RGB)
return display_frame, le_pc_image_px, gaze_le_ap_pc_px, re_pc_image_px, gaze_re_ap_pc_px
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/gazenet/utils_gazeviz.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
"""GazeNet public dataset conversion scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import errno
import os
import json
import argparse
import scipy.io as scio
def mkdir_p(new_path):
"""Makedir, making also non-existing parent dirs.
Args:
new_path (str): path to the directory to be created
"""
try:
print("Creating path {}".format(new_path))
os.makedirs(new_path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(new_path):
pass
else:
raise
def parse_args(args=None):
"""parse the arguments.
Args:
args (list): input argument
"""
parser = argparse.ArgumentParser(description='Transfer MPIIFaceGaze dataset for GazeNet')
parser.add_argument(
'--data_path',
type=str,
required=True,
help="MPIIFaceGaze dataset path"
)
parser.add_argument(
'--json_label_root_path',
type=str,
required=True,
help="root path to the json label folders"
)
return parser.parse_args(args)
def decode_mat(path):
"""Decode mat file.
Args:
path (str): path to the mat file
"""
data = scio.loadmat(path)
return data
def generate_config_folder(calibration_path, config_path):
"""Generate config folder as required data format.
Args:
calibration_path (str): path to the calibration file in MPIIFullFace dataset
config_path (str): config folder path
"""
camera_file_path = os.path.join(calibration_path, 'Camera.mat')
extrinsic_file_path = os.path.join(calibration_path, 'monitorPose.mat')
screen_file_path = os.path.join(calibration_path, 'screenSize.mat')
# Get camera matrix and distortion information
camera_info = decode_mat(camera_file_path)
cameraMatrix = camera_info['cameraMatrix']
distCoeffs = camera_info['distCoeffs']
# Convert camera matrix to expected format
camera_file = os.path.join(config_path, 'camera_parameters.txt')
with open(camera_file, 'w') as f_camera:
content = ''
for k in range(0, 3):
for j in range(0, 2):
content += str(cameraMatrix[k][j]) + ','
content += str(cameraMatrix[k][2]) + '\n'
content += '\n'
for k in range(0, 5):
content += str(distCoeffs[0][k]) + '\n'
f_camera.write(content)
# Get extrinsic information
extrinsic_info = decode_mat(extrinsic_file_path)
rvects = extrinsic_info['rvects']
tvecs = extrinsic_info['tvecs']
R, _ = cv2.Rodrigues(rvects)
# Convert translation vector to expected format
R_file = os.path.join(config_path, 'R.txt')
with open(R_file, 'w') as f_R:
content = ''
for k in range(0, 3):
for j in range(0, 2):
content += str(R[k][j]) + ','
content += str(R[k][2]) + '\n'
f_R.write(content)
# Convert translation vector to expected format
T_file = os.path.join(config_path, 'T.txt')
with open(T_file, 'w') as f_T:
content = ''
for k in range(0, 3):
content += str(tvecs[k][0]) + '\n'
f_T.write(content)
# get screen information
screen_info = decode_mat(screen_file_path)
screen_width = screen_info['width_pixel'][0][0]
screen_height = screen_info['height_pixel'][0][0]
screen_width_phy = screen_info['width_mm'][0][0]
screen_height_phy = screen_info['height_mm'][0][0]
# Convert screen physical size values to expected format
TV_size_file = os.path.join(config_path, 'TV_size')
with open(TV_size_file, 'w') as f_TV_size:
content = str(screen_width_phy) + '\n' + str(screen_height_phy) + '\n'
f_TV_size.write(content)
# Convert screen resolution values to expected format
resolution_file = os.path.join(config_path, 'resolution.txt')
with open(resolution_file, 'w') as f_resolution:
content = str(screen_width) + '\n' + str(screen_height) + '\n'
f_resolution.write(content)
def convert_data(data_path, json_label_root_path):
"""Convert data from public dataset format to required data format.
Args:
data_path (str): data root path
json_label_root_path (str): json label root path
"""
sample_data_path = os.path.join(data_path, 'sample-dataset')
mkdir_p(sample_data_path)
inference_set_path = os.path.join(sample_data_path, 'inference-set')
mkdir_p(inference_set_path)
sample_json_label_path = os.path.join(json_label_root_path, 'data_factory', 'day03')
set_list = os.listdir(sample_json_label_path)
st = set_list[0]
set_name = st + '-day03'
set_path = os.path.join(sample_data_path, set_name)
mkdir_p(set_path)
# Generate config folder from calibration folder
calibration_path = os.path.join(data_path, st, 'Calibration')
config_path = os.path.join(set_path, 'Config')
mkdir_p(config_path)
generate_config_folder(calibration_path, config_path)
inference_config_path = os.path.join(inference_set_path, 'Config')
mkdir_p(inference_config_path)
generate_config_folder(calibration_path, inference_config_path)
# Extract x, y screen pixel ground truth to a dictionary
gt_file = os.path.join(data_path, st, st + '.txt')
public_data_dict = {}
screen_data_dict = {}
with open(gt_file, 'r') as f_gt:
for line in f_gt:
frame_dict = {}
line_split = line.rstrip().split(' ')
frame_name = os.path.join(st, line_split[0])
frame_dict['screen_y'] = line_split[1]
frame_dict['screen_x'] = line_split[2]
screen_data_key = line_split[1] + '_' + line_split[2]
if screen_data_key not in screen_data_dict.keys():
screen_data_dict[screen_data_key] = 0
else:
screen_data_dict[screen_data_key] += 1
frame_dict['id'] = screen_data_dict[screen_data_key]
public_data_dict[frame_name] = frame_dict
# Re-position the json file to each sample data folder
json_file_name = st + '_day03.json'
json_file_full_path = os.path.join(sample_json_label_path, st, json_file_name)
if not os.path.isfile(json_file_full_path):
print("File {} does not exist!".format(json_file_full_path))
# Convert image data
image_data_path = os.path.join(set_path, 'Data')
mkdir_p(image_data_path)
target_json_folder = os.path.join(sample_data_path, set_name, 'json_datafactory_v2')
mkdir_p(target_json_folder)
inference_image_data_path = os.path.join(inference_set_path, 'Data')
mkdir_p(inference_image_data_path)
inference_target_json_folder = os.path.join(inference_set_path, 'json_datafactory_v2')
mkdir_p(inference_target_json_folder)
# create 5 copies of the data (for demonstration purpose)
for k in range(0, 5):
with open(json_file_full_path, 'r') as json_file:
json_reader = json.load(json_file)
user_name = st + '-' + str(k)
user_data_path = os.path.join(image_data_path, user_name)
mkdir_p(user_data_path)
entry = []
for frame_json in json_reader:
if 'annotations' not in frame_json:
continue
frame_path = frame_json['filename']
frame_data_path = os.path.join(data_path, frame_path)
if not os.path.isfile(frame_data_path):
raise ValueError('Image file does not exist in path {}.'.format(frame_data_path))
img = cv2.imread(frame_data_path)
height, width, _ = img.shape
assert height > 0 and width > 0
if frame_path not in public_data_dict.keys():
print("Data {} does not exist!".format(frame_path))
continue
file_name = 'frame_' + public_data_dict[frame_path]['screen_y'] + \
'_' + public_data_dict[frame_path]['screen_x'] + \
'_' + str(public_data_dict[frame_path]['id']) + '.png'
update_frame_path = os.path.join(user_data_path, file_name)
cv2.imwrite(update_frame_path, img)
frame_json['filename'] = update_frame_path
entry.append(frame_json)
update_json_file = st + '-day03' + '_' + user_name + '.json'
target_json_full_path = os.path.join(target_json_folder, update_json_file)
updated_json = open(target_json_full_path, 'w')
json.dump(entry, updated_json, indent=4)
# Use the provided data copy as inference examples
with open(json_file_full_path, 'r') as json_file:
json_reader = json.load(json_file)
entry = []
for frame_json in json_reader:
if 'annotations' not in frame_json:
continue
frame_path = frame_json['filename']
if frame_path not in public_data_dict.keys():
print("Data {} does not exist!".format(frame_path))
continue
frame_data_path = os.path.join(data_path, frame_path)
if not os.path.isfile(frame_data_path):
print("Image file {} does not exist!".format(frame_data_path))
continue
img = cv2.imread(frame_data_path)
height, width, _ = img.shape
assert height > 0 and width > 0
file_name = 'frame_' + public_data_dict[frame_path]['screen_y'] + \
'_' + public_data_dict[frame_path]['screen_x'] + \
'_' + str(public_data_dict[frame_path]['id']) + '.png'
update_frame_path = os.path.join(inference_image_data_path, file_name)
cv2.imwrite(update_frame_path, img)
frame_json['filename'] = file_name
entry.append(frame_json)
target_json_full_path = os.path.join(inference_target_json_folder, json_file_name)
updated_json = open(target_json_full_path, 'w')
json.dump(entry, updated_json, indent=4)
def main(args=None):
"""Main function to parse MPIIFaceGaze public dataset.
Args:
args (list): input argument
"""
args = parse_args(args)
data_path = args.data_path
json_label_root_path = args.json_label_root_path
convert_data(data_path, json_label_root_path)
if __name__ == "__main__":
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/gazenet/mpiifacegaze_convert.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT DSSD example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/dssd/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT SSD example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/ssd/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Script to generate splitted dataset for SSD/DSSD/Retinanet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import shutil
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Generate splitted dataset for SSD/DSSD/Retinanet tutorial')
parser.add_argument(
"--input_image_dir",
type=str,
required=True,
help="Input directory to KITTI training dataset images."
)
parser.add_argument(
"--input_label_dir",
type=str,
required=True,
help="Input directory to KITTI training dataset labels."
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to TAO split dataset."
)
parser.add_argument(
"--val_split",
type=int,
required=False,
default=10,
help="Percentage of training dataset for generating val dataset"
)
return parser.parse_args(args)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
img_files = []
for file_name in os.listdir(args.input_image_dir):
if file_name.split(".")[-1] == "png":
img_files.append(file_name)
total_cnt = len(img_files)
val_ratio = float(args.val_split) / 100.0
val_cnt = int(total_cnt * val_ratio)
train_cnt = total_cnt - val_cnt
val_img_list = img_files[0: val_cnt]
train_img_list = img_files[val_cnt:]
print(f"Total {total_cnt} samples in KITTI training dataset")
print(f"{train_cnt} for train and {val_cnt} for val")
# Create split
os.makedirs(os.path.join(args.output_dir, "training"), exist_ok=True)
os.makedirs(os.path.join(args.output_dir, "val"), exist_ok=True)
train_target_img_path = os.path.join(args.output_dir, "training", "image")
train_target_label_path = os.path.join(args.output_dir, "training", "label")
os.makedirs(train_target_img_path, exist_ok=True)
os.makedirs(train_target_label_path, exist_ok=True)
val_target_img_path = os.path.join(args.output_dir, "val", "image")
val_target_label_path = os.path.join(args.output_dir, "val", "label")
os.makedirs(val_target_img_path, exist_ok=True)
os.makedirs(val_target_label_path, exist_ok=True)
for img_name in train_img_list:
label_name = img_name.split(".")[0] + ".txt"
shutil.copyfile(os.path.join(args.input_image_dir, img_name),
os.path.join(train_target_img_path, img_name))
shutil.copyfile(os.path.join(args.input_label_dir, label_name),
os.path.join(train_target_label_path, label_name))
print("Finished copying training set")
for img_name in val_img_list:
label_name = img_name.split(".")[0] + ".txt"
shutil.copyfile(os.path.join(args.input_image_dir, img_name),
os.path.join(val_target_img_path, img_name))
shutil.copyfile(os.path.join(args.input_label_dir, label_name),
os.path.join(val_target_label_path, label_name))
print("Finished copying validation set")
if __name__ == "__main__":
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/ssd/generate_split.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
"""TLT FpeNet example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/fpenet/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
"""Helper script to sample calibration data for INT8 post-training quantization."""
import argparse
import json
import os
import random
import cv2
import numpy as np
# Color definition for stdout logs.
CRED = '\033[91m'
CGREEN = '\033[92m'
CYELLOW = '\033[93m'
CEND = '\033[0m'
def build_command_line_parser(parser=None):
"""
Sample subset of given dataset for INT8 calibration.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='sample_calibration_images',
description='Sample Calibration Images.')
parser.add_argument(
'-a',
'--annotation_file',
required=True,
help='Path to the annotation file.')
parser.add_argument(
'-oi',
'--old_image_root_dir',
required=True,
help='Path to old root image directory.')
parser.add_argument(
'-ni',
'--new_image_root_dir',
required=True,
help='Path to new root image directory.')
parser.add_argument(
'-o',
'--output_image_root_dir',
required=True,
help='Output file name.')
parser.add_argument(
'-nk',
'--num_keypoints',
required=False,
type=int,
default=80,
help='Number of keypoints.')
parser.add_argument(
'-n',
'--num_images',
required=False,
type=int,
default=100,
help='Number of images to sample for calibration.')
parser.add_argument(
'-mi',
'--model_input_dim',
required=False,
type=int,
default=80,
help='Input size of model input.')
parser.add_argument(
"-r",
"--randomize",
action='store_true',
help="Include this flag to randomize the sampling of data.")
return parser
def parse_command_line_args(cl_args=None):
"""Parser command line arguments to the trainer.
Args:
cl_args (list): List of strings used as command line arguments.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args = parser.parse_args(cl_args)
return args
def parse_json_contents(jsonFile, old_image_root, new_image_root, num_keypoints=80):
'''
Function to read ground truth json file.
Args:
jsonFile (str): Path of json file.
old_image_root (str): Old/original image root path.
new_image_root (str): New image root path.
num_keypoints (int): Number of keypoints to extract from data json.
Returns:
dataset (list): list of samples, sample{img_path, landmarks, occ}.
'''
json_data = json.loads(open(jsonFile, 'r').read())
dataset = list()
for img in json_data:
sample = dict()
sample['img_path'] = ''
sample['landmarks'] = np.zeros((num_keypoints, 2))
try:
fname = str(img['filename'])
fname = fname.replace(old_image_root, new_image_root)
if not os.path.exists(fname):
print(CRED + 'Image does not exist: {}'.format(fname) + CEND)
continue
# Start collecting points information from the json file.
x = [0] * num_keypoints
y = [0] * num_keypoints
for chunk in img['annotations']:
if 'fiducialpoints' not in chunk['class'].lower():
continue
points_data = (point for point in chunk if ('class' not in point and
'version' not in point))
for point in points_data:
number = int(
''.join(c for c in str(point) if c.isdigit()))
if 'x' in str(point).lower() and number <= num_keypoints:
x[number - 1] = str(int(float(chunk[point])))
if 'y' in str(point).lower() and number <= num_keypoints:
y[number - 1] = str(int(float(chunk[point])))
sample = dict()
sample['img_path'] = fname
sample['landmarks'] = np.asarray([x, y]).T
dataset.append(sample)
except Exception as e:
print(CRED + str(e) + CEND)
return dataset
def get_bbox(x1, y1, x2, y2):
'''
Function to get normalized bounding box.
This module makes the bounding box square by
increasing the lower of the bounding width and height.
Args:
x1 (int): x_min value of bbox.
y1 (int): y_min value of bbox.
x2 (int): x_max value of bbox.
y2 (int): y_max value of bbox.
Returns:
Normalized bounding box coordinates in form [x1, y1, x2, y2].
'''
x_start = int(np.floor(x1))
x_end = int(np.ceil(x2))
y_start = int(np.floor(y1))
y_end = int(np.ceil(y2))
width = np.ceil(x_end - x_start)
height = np.ceil(y_end - y_start)
if width < height:
diff = height - width
x_start -= (np.ceil(diff/2.0))
x_end += (np.floor(diff/2.0))
elif width > height:
diff = width - height
y_start -= (np.ceil(diff/2.0))
y_end += (np.floor(diff/2.0))
width = x_end - x_start
height = y_end - y_start
assert width == height
rect_init_square = [int(x_start), int(y_start), int(width), int(height)]
return rect_init_square
def enlarge_bbox(bbox, ratio=1.0):
'''
Module enlarges the bounding box by a scaling factor.
Args:
bbox (list): Bounding box coordinates of the form [x1, y1, x2, y2].
ratio (float): Bounding box enlargement scale/ratio.
Returns:
Scaled bounding box coordinates.
'''
x_start, y_start, width, height = bbox
x_end = x_start + width
y_end = y_start + height
assert width == height, 'width %s is not equal to height %s'\
% (width, height)
change = ratio - 1.0
shift = int((change/2.0)*width)
x_start_new = int(np.floor(x_start - shift))
x_end_new = int(np.ceil(x_end + shift))
y_start_new = int(np.floor(y_start - shift))
y_end_new = int(np.ceil(y_end + shift))
# Assertion for increase length.
width = int(x_end_new - x_start_new)
height = int(y_end_new - y_start_new)
assert height == width
rect_init_square = [x_start_new, y_start_new, width, height]
return rect_init_square
def detect_bbox(kpts, img_size, dist_ratio=1.0, num_kpts=80):
'''
Utility to get the bounding box using only kpt information.
This method gets the kpts and the original image size.
Then, it then gets a square encompassing all key-points and
later enlarges that by dist_ratio.
Args:
kpts: the kpts in either format of 1-dim of size #kpts * 2
or 2-dim of shape [#kpts, 2].
img_size: a 2-value tuple indicating the size of the original image
with format (width_size, height_size)
dist_ratio: the ratio by which the original key-points to be enlarged.
num_kpts (int): Number of keypoints.
Returns:
bbox with values (x_start, y_start, width, height).
'''
x_min = np.nanmin(kpts[:, 0])
x_max = np.nanmax(kpts[:, 0])
y_min = np.nanmin(kpts[:, 1])
y_max = np.nanmax(kpts[:, 1])
bbox = get_bbox(x_min, y_min, x_max, y_max)
# Enlarge the bbox by a ratio.
rect = enlarge_bbox(bbox, dist_ratio)
# Ensure enlarged bounding box within image bounds.
if((bbox[0] < 0) or
(bbox[1] < 0) or
(bbox[2] + bbox[0] > img_size[0]) or
(bbox[3] + bbox[1] > img_size[1])):
return None
return rect
def main(cl_args=None):
"""Sample subset of a given dataset for INT8 calibration based on user arguments.
Args:
args(list): list of arguments to be parsed if called from another module.
"""
args = parse_command_line_args(cl_args)
num_images = args.num_images
annotation_file = args.annotation_file
old_image_root_dir = args.old_image_root_dir
new_image_root_dir = args.new_image_root_dir
output_image_root_dir = args.output_image_root_dir
model_input_dim = args.model_input_dim
num_keypoints = args.num_keypoints
# Create output folder
if not os.path.exists(output_image_root_dir):
os.makedirs(output_image_root_dir)
elif len(os.listdir(output_image_root_dir)):
raise Exception("Output directory contains files! Please specify a valid output directory.")
# Initialize the dataset and read image ids
jsondata = parse_json_contents(annotation_file, old_image_root_dir,
new_image_root_dir, num_keypoints)
# Randomize the dataset
if args.randomize:
random.shuffle(jsondata)
N = len(jsondata)
count = 0
for i in range(N):
landmarks = jsondata[i]['landmarks'].astype('float')
image_path = jsondata[i]['img_path']
image = cv2.imread(image_path)
if image is None:
print(CRED + 'Bad image:{}'.format(image_path) + CEND)
continue
image_shape = image.shape
bbox = detect_bbox(kpts=landmarks,
img_size=(image_shape[1], image_shape[0]))
if bbox is None:
continue
# crop face bbox and resize
img = image[bbox[1]:bbox[1] + bbox[3], bbox[0]:bbox[0] + bbox[2], :]
img = cv2.resize(img, (model_input_dim, model_input_dim), interpolation=cv2.INTER_CUBIC)
cv2.imwrite(os.path.join(output_image_root_dir, image_path.replace('/', '_')), img)
count = count + 1
# Check if enough images have been selected
if count >= num_images:
break
print(CYELLOW + 'Number of images selected: {}'.format(count) + CEND)
if __name__ == '__main__':
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/fpenet/sample_calibration_images.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
"""FPENet data conversion utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import os
import numpy as np
import json
def get_keypoints_from_file(keypoints_file):
'''
This function reads the keypoints file from afw format.
Input:
keypoints_file (str): Path to the keypoints file.
Output:
keypoints (np.array): Keypoints in numpy format [[x, y], [x, y]].
'''
keypoints = []
with open(keypoints_file) as fid:
for line in fid:
if "version" in line or "points" in line or "{" in line or "}" in line:
continue
else:
loc_x, loc_y = line.strip().split(sep=" ")
keypoints.append([float(loc_x), float(loc_y)])
keypoints = np.array(keypoints, dtype=np.float)
assert keypoints.shape[1] == 2, "Keypoints should be 2D."
return keypoints
def convert_dataset(afw_data_path, output_json_path, afw_image_save_path, key_points=80):
'''
Function to convert afw dataset to Sloth format json.
Input:
afw_data_path (str): Path to afw data folder.
output_json_path (str): Path to output json file.
afw_image_save_path (str): Image paths to use in json.
Returns:
None
'''
# get dataset file lists
all_files = os.listdir(afw_data_path)
images = [x for x in all_files if x.endswith('.jpg')]
keypoint_files = [img_path.split(".")[-2] + ".pts" for img_path in images]
output_folder = os.path.dirname(output_json_path)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# read and convert to sloth format
sloth_data = []
for image in images:
image_path = os.path.join(afw_data_path, image)
image_read = cv2.imread(image_path)
if image_read is None:
print('Bad image:{}'.format(image_path))
continue
# convert image to png
image_png = image.replace('.jpg', '.png')
cv2.imwrite(os.path.join(afw_data_path, image_png), image_read)
image_data = {}
image_data['filename'] = os.path.join(afw_image_save_path, image_png)
image_data['class'] = 'image'
annotations = {}
annotations['tool-version'] = '1.0'
annotations['version'] = 'v1'
annotations['class'] = 'FiducialPoints'
keypoint_file = image.split(".")[-2] + ".pts"
image_keypoints = get_keypoints_from_file(os.path.join(afw_data_path, keypoint_file))
if key_points == 80:
for num, keypoint in enumerate(image_keypoints):
annotations["P{}x".format(num+1)] = keypoint[0]
annotations["P{}y".format(num+1)] = keypoint[1]
# fill in dummy keypoints for keypoints 69 to 80
for num in range(69, 81, 1):
annotations["P{}x".format(num)] = image_keypoints[0][0]
annotations["P{}y".format(num)] = image_keypoints[0][1]
annotations["P{}occluded".format(num)] = True
elif key_points==10:
key_id = 1
for num, keypoint in enumerate(image_keypoints):
# change to 10-points dataset:
if (num+1) in [1, 9, 17, 20, 25, 39, 45, 34, 49, 55]:
annotations["P{}x".format(key_id)] = keypoint[0]
annotations["P{}y".format(key_id)] = keypoint[1]
key_id += 1
else:
raise ValueError("This script only generates 10 & 80 keypoints dataset.")
image_data['annotations'] = [annotations]
sloth_data.append(image_data)
# save json
with open(output_json_path, "w") as config_file:
json.dump(sloth_data, config_file, indent=4)
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(
description='Transform dataset for FPENet tutorial')
parser.add_argument(
"--afw_data_path",
type=str,
required=True,
help="Input directory to AFW dataset imnages and ground truth keypoints."
)
parser.add_argument(
"--output_json_path",
type=str,
required=True,
help="Output json file path to save to."
)
parser.add_argument(
"--afw_image_save_path",
type=str,
required=True,
help="Image path to use in jsons."
)
parser.add_argument(
"--num_key_points",
type=int,
default=80,
help="Number of key points."
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args()
convert_dataset(args.afw_data_path, args.output_json_path, args.afw_image_save_path,
args.num_key_points)
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/fpenet/data_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
import argparse
import numpy as np
import h5py
import cv2
import os
import csv
def build_command_line_parser(parser=None):
"""Build command line parser for dataset_convert.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(
prog='process_cohface',
description='Convert COHFACE into heartratenet api compatible dataset',
)
parser.add_argument('-i', '--input_path',
type=str,
required=True,
help='Input path for COHFACE, this is the root of the dataset')
parser.add_argument('-o', '--output_path',
type=str,
required=True,
help = 'Output path for COHFACE, this is the root of the dataset')
parser.add_argument('-start_subject_id', '--start_subject_id',
type=int,
required=True,
help = 'Start subject id for COHFACE')
parser.add_argument('-end_subject_id', '--end_subject_id',
type=int,
required=True,
help = 'End subject id for COHFACE')
parser.add_argument('-b', '--breathing_rate',
action = 'store_true',
default = False,
help = 'If true, processes the dataset for breathing rate, else exports heart rate')
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments.
Args:
args (list): List of strings used as command line arguments.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args_parsed = parser.parse_args()
return args_parsed
def get_timestamp_from_video(video_filename):
"""get video timestamp.
Args:
video_filename (str): video filename
Returns:
timestamps(list of float): a list of timestamps for each frame in video
"""
cap = cv2.VideoCapture(video_filename)
fps = cap.get(cv2.CAP_PROP_FPS)
timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC) / 1000] # convert MSEC to SEC
calc_timestamps = [0.0]
while(cap.isOpened()):
frame_exists, curr_frame = cap.read()
if frame_exists:
timestamps.append(cap.get(cv2.CAP_PROP_POS_MSEC) / 1000)
else:
break
cap.release()
return timestamps
def process_subject(path, output, breathing = False):
"""convert COHFACE data format for subject.
Args:
path (str): input dataset path
output (str): output dataset path after format conversion
breathing (bool): whether get heartrate signal or breathrate signal
Returns:
None
"""
video_file = os.path.join(path, 'data.avi')
vidcap = cv2.VideoCapture(video_file)
fps = vidcap.get(cv2.CAP_PROP_FPS)
timestamps = [vidcap.get(cv2.CAP_PROP_POS_MSEC) / 1000] # convert MSEC to SEC
print(f'Processing {video_file}, fps {fps}')
subject_file = h5py.File(os.path.join(path, 'data.hdf5'), 'r')
#Processing video
count = 0
while vidcap.isOpened():
success, image = vidcap.read()
if success:
cv2.imwrite(os.path.join(output, 'images', format(count,'04d')+'.bmp'), image)
count+=1
timestamps.append(vidcap.get(cv2.CAP_PROP_POS_MSEC) / 1000)
else:
break
vidcap.release()
#Processing image time stamps
image_file = os.path.join(output,'image_timestamps.csv')
with open(image_file, 'w') as file:
header = ['ID','Time']
writer = csv.DictWriter(file, fieldnames = header)
writer.writeheader()
for frame, time in zip(range(count), timestamps):
writer.writerow({'ID': frame,
'Time': time})
pulse_time = subject_file['time']
if breathing:
pulse = subject_file['respiration']
else:
pulse = subject_file['pulse']
#Processing pulse
pulse_file = os.path.join(output,'ground_truth.csv')
with open(pulse_file, 'w') as file:
header = ['Time','PulseWaveform']
writer = csv.DictWriter(file, fieldnames = header)
writer.writeheader()
for time, pulse_val in zip(pulse_time, pulse):
writer.writerow({'Time': time,
'PulseWaveform': pulse_val})
def main(cl_args=None):
"""process cohface.
Args:
args(list): list of arguments to be parsed if called from another module.
"""
args_parsed = parse_command_line(cl_args)
input_path = args_parsed.input_path
output_path = args_parsed.output_path
start_subject_id = args_parsed.start_subject_id
end_subject_id = args_parsed.end_subject_id
breathing_flag = args_parsed.breathing_rate
session_number = 4
for sub in range(start_subject_id,end_subject_id):
for fol in range(session_number):
input_dir = os.path.join(input_path, str(sub), str(fol))
output_dir = os.path.join(output_path, str(sub), str(fol))
os.makedirs(os.path.join(output_dir,'images'))
process_subject(input_dir, output_dir, breathing = breathing_flag)
if __name__ == '__main__':
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/heartratenet/process_cohface.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT Multitask Classification example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/multitask_classification/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
"""Helper script to sample calibration data for INT8 post-training quantization."""
import argparse
import os
import random
import subprocess
import joblib
from pycocotools.coco import COCO
def build_command_line_parser(parser=None):
"""
Sample subset of a given dataset for INT8 calibration.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='sample_calibration_images',
description='Sample Calibration Images.')
parser.add_argument(
'-a',
'--annotation_file',
required=True,
help='Path to the annotation file.')
parser.add_argument(
'-i',
'--image_root_dir',
required=True,
help='Path to root image directory.')
parser.add_argument(
'-o',
'--output_image_root_dir',
required=True,
help='Output file name.')
parser.add_argument(
'-n',
'--num_images',
required=False,
type=int,
default=500,
help='Number of images to sample for calibration.')
parser.add_argument(
'-pth',
'--min_persons_per_image',
required=False,
type=int,
default=1,
help='Threshold for number of persons per selected image.')
parser.add_argument(
'-kth',
'--min_kpts_per_person',
required=False,
type=int,
default=5,
help='Threshold for number of keypoints per selected person.')
parser.add_argument(
"-r",
"--randomize",
action='store_true',
help="Include this flag to randomize the sampling of data.")
return parser
def parse_command_line_args(cl_args=None):
"""Parser command line arguments to the trainer.
Args:
cl_args (list): List of strings used as command line arguments.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args = parser.parse_args(cl_args)
return args
def main(cl_args=None):
"""Sample subset of a given dataset for INT8 calibration based on user arguments.
Args:
args(list): list of arguments to be parsed if called from another module.
"""
args = parse_command_line_args(cl_args)
num_images = args.num_images
annotation_file = args.annotation_file
image_root_dir = args.image_root_dir
output_image_root_dir = args.output_image_root_dir
min_persons_per_image = args.min_persons_per_image
min_kpts_per_person = args.min_kpts_per_person
# Create output folder
if not os.path.exists(output_image_root_dir):
os.makedirs(output_image_root_dir)
elif len(os.listdir(output_image_root_dir)):
raise Exception("Output directory contains files! Please specify a valid output directory.")
# Initialize the dataset and read image ids
dataset = COCO(annotation_file)
image_ids = list(dataset.imgs.keys())
# Randomize the dataset
if args.randomize:
random.shuffle(image_ids)
selected_images = []
for _, image_id in enumerate(image_ids):
filename = dataset.imgs[image_id]['file_name']
# Get annotations
annotation_ids = dataset.getAnnIds(imgIds=image_id)
image_annotation = dataset.loadAnns(annotation_ids)
num_persons = len(image_annotation)
# Check if below given threshold
if num_persons < min_persons_per_image:
continue
qualified_person_count = 0
for pidx in range(num_persons):
num_keypoints = image_annotation[pidx]["num_keypoints"]
if num_keypoints < min_kpts_per_person:
continue
qualified_person_count += 1
if qualified_person_count < min_persons_per_image:
continue
selected_images.append(filename)
# Check if enough images have been selected
if len(selected_images) > num_images:
break
if len(selected_images) < num_images:
print("WARNING: Only {} / {} images sampled.".format(len(selected_images), num_images))
copy_commands = []
# Get commands to copy the required images to destination folder
for idx in range(len(selected_images)):
filename = selected_images[idx]
source_image_path = os.path.join(image_root_dir, filename)
dest_image_path = os.path.join(output_image_root_dir, filename)
if not os.path.exists(os.path.dirname(dest_image_path)):
os.makedirs(os.path.dirname(dest_image_path))
command = 'cp {} {}'.format(source_image_path, dest_image_path)
copy_commands.append(command)
# Launch parallel jobs to copy the images
joblib.Parallel(n_jobs=joblib.cpu_count(), verbose=10)(
joblib.delayed(subprocess.call)(command, shell=True) for command in copy_commands)
if __name__ == '__main__':
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/bpnet/sample_calibration_images.py |
"""
Converts Retail Product Checkout (https://www.kaggle.com/datasets/diyer22/retail-product-checkout-dataset) dataset to classification dataset. Ready for MLRecogNet training.
"""
import os, zipfile
import glob
import cv2
from pycocotools.coco import COCO
from tqdm import tqdm
import numpy as np
import shutil
def create_reference_set(dataset_dir, ref_dir, ref_num = 100):
os.makedirs(ref_dir, exist_ok=True)
classes = os.listdir(dataset_dir)
print(f"Creating reference set from {dataset_dir}...")
for class_name in tqdm(classes):
samples = os.listdir(os.path.join(dataset_dir, class_name))
if not os.path.exists(os.path.join(ref_dir, class_name)):
os.makedirs(os.path.join(ref_dir, class_name))
if len(samples) >= ref_num:
ref_samples = np.random.choice(samples, ref_num, replace=False)
else:
print(f"Warning: {class_name} has only {len(samples)} samples. Copying all samples to reference set.")
ref_samples = samples
for sample in ref_samples:
try:
shutil.copy(os.path.join(dataset_dir, class_name, sample), os.path.join(ref_dir, class_name, sample))
except:
pass
print("Done!")
def crop_images(file_path, bbox, class_id, output_dir):
file_name = os.path.basename(file_path)
class_folder = os.path.join(output_dir, class_id)
if not os.path.exists(class_folder):
os.mkdir(class_folder)
image_count = len(glob.glob( os.path.join(class_folder, file_name+"*.jpg")))
new_file_name = os.path.join(class_folder, file_name + f"_{image_count+1}.jpg")
if os.path.exists(new_file_name):
# skip if file already exists
return
# start processing image
x1, y1, x2, y2 = bbox
# skip if bbox is too small
if x2 < 120 or y2 < 150:
return
try:
image = cv2.imread(file_path)
h, w, _ = image.shape
except:
print(f"{file_path} is not a valid image file")
return
# give 14% margin to the bounding box
cropped_image = image[max(int(y1 - 0.07*y2), 0 ):min(int(y1+1.07*y2), h), \
max(int(x1 - 0.07*x2), 0 ):min(int(x1+1.07*x2), w)]
# resize to 256x256 for faster processing and training
resized_cropped_image = cv2.resize(cropped_image, (256, 256), cv2.INTER_AREA)
cv2.imwrite(os.path.join(class_folder, new_file_name), resized_cropped_image)
# load dataset
data_root_dir = os.path.join(os.environ['HOST_DATA_DIR'],"metric_learning_recognition")
path_to_zip_file = os.path.join(data_root_dir,"retail-product-checkout-dataset.zip")
directory_to_extract_to = os.path.join(data_root_dir, "retail-product-checkout-dataset")
processed_classification_dir = os.path.join(data_root_dir,"retail-product-checkout-dataset_classification_demo")
## unzip dataset
if not os.path.exists(processed_classification_dir):
os.makedirs(processed_classification_dir)
print("Unzipping dataset...")
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
directory_to_extract_to = os.path.join(directory_to_extract_to, "retail_product_checkout")
for dataset in ["train", "val", "test"]:
dataset_dir = os.path.join(directory_to_extract_to, dataset+"2019")
annotation_file = os.path.join(directory_to_extract_to, "instances_"+dataset+"2019.json")
output_dir = os.path.join(processed_classification_dir, dataset)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
## load coco dataset
print(f"Loading COCO {dataset} dataset...")
coco_label = COCO(annotation_file)
# crop images to classification data
for img_object in tqdm(coco_label.dataset["images"]):
image_path = os.path.join(dataset_dir, img_object["file_name"])
# remove top view images
if "camera2" in image_path:
continue
image_id = img_object["id"]
annotation_ids = coco_label.getAnnIds(imgIds=image_id)
for annot in coco_label.loadAnns(annotation_ids):
bbox = annot["bbox"]
class_id = annot["category_id"]
category = coco_label.loadCats(class_id)[0]
class_name = category["supercategory"] + "_" + category["name"]
crop_images(image_path, bbox, class_name, output_dir)
# extract a reference set from training set
## fixed random seed for reproducibility
np.random.seed(0)
create_reference_set(
os.path.join(processed_classification_dir, "train"), \
os.path.join(processed_classification_dir, "reference"), \
ref_num=100)
# split out unknown classes
# select 20% classes as unknown classes
class_list = os.listdir(os.path.join(processed_classification_dir, "train"))
total_class_num = len(class_list)
unknown_classes = np.random.choice(class_list, int(total_class_num*0.2), replace=False)
known_classes = [c for c in class_list if c not in unknown_classes]
known_classes_dir = os.path.join(processed_classification_dir, "known_classes")
unknown_classes_dir = os.path.join(processed_classification_dir, "unknown_classes")
for dataset in ["train", "val", "test", "reference"]:
known_classes_dataset_dir = os.path.join(known_classes_dir, dataset)
unknown_classes_dataset_dir = os.path.join(unknown_classes_dir, dataset)
if not os.path.exists(known_classes_dataset_dir):
os.makedirs(known_classes_dataset_dir)
if not os.path.exists(unknown_classes_dataset_dir):
os.makedirs(unknown_classes_dataset_dir)
for class_name in tqdm(known_classes):
class_dir = os.path.join(processed_classification_dir, dataset, class_name)
os.rename(class_dir, os.path.join(known_classes_dataset_dir, class_name))
for class_name in tqdm(unknown_classes):
class_dir = os.path.join(processed_classification_dir, dataset, class_name)
os.rename(class_dir, os.path.join(unknown_classes_dataset_dir, class_name))
# remove old folders
for dataset in ["train", "val", "test", "reference"]:
shutil.rmtree(os.path.join(processed_classification_dir, dataset))
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/metric_learning_recognition/process_retail_product_checkout_dataset.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT DetectNet v2 example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/detectnet_v2/__init__.py |
import os
from os.path import join as join_path
import re
import glob
import shutil
from random import shuffle
from tqdm import tqdm
DATA_DIR=os.environ.get('LOCAL_DATA_DIR')
source_dir_orig = join_path(DATA_DIR, "VOCdevkit/VOC2012")
target_dir_orig = join_path(DATA_DIR, "formatted")
suffix = '_trainval.txt'
classes_dir = join_path(source_dir_orig, "ImageSets", "Main")
images_dir = join_path(source_dir_orig, "JPEGImages")
classes_files = glob.glob(classes_dir+"/*"+suffix)
for file in classes_files:
# get the filename and make output class folder
classname = os.path.basename(file)
if classname.endswith(suffix):
classname = classname[:-len(suffix)]
target_dir_path = join_path(target_dir_orig, classname)
if not os.path.exists(target_dir_path):
os.makedirs(target_dir_path)
else:
continue
with open(file) as f:
content = f.readlines()
for line in content:
tokens = re.split('\s+', line)
if tokens[1] == '1':
# copy this image into target dir_path
target_file_path = join_path(target_dir_path, tokens[0] + '.jpg')
src_file_path = join_path(images_dir, tokens[0] + '.jpg')
shutil.copyfile(src_file_path, target_file_path)
SOURCE_DIR=os.path.join(DATA_DIR, 'formatted')
TARGET_DIR=os.path.join(DATA_DIR,'split')
# list dir
dir_list = next(os.walk(SOURCE_DIR))[1]
# for each dir, create a new dir in split
for dir_i in tqdm(dir_list):
newdir_train = os.path.join(TARGET_DIR, 'train', dir_i)
newdir_val = os.path.join(TARGET_DIR, 'val', dir_i)
newdir_test = os.path.join(TARGET_DIR, 'test', dir_i)
if not os.path.exists(newdir_train):
os.makedirs(newdir_train)
if not os.path.exists(newdir_val):
os.makedirs(newdir_val)
if not os.path.exists(newdir_test):
os.makedirs(newdir_test)
img_list = glob.glob(os.path.join(SOURCE_DIR, dir_i, '*.jpg'))
# shuffle data
shuffle(img_list)
for j in range(int(len(img_list) * 0.7)):
shutil.copyfile(img_list[j], os.path.join(TARGET_DIR, 'train', os.path.join(dir_i, os.path.basename(img_list[j]))))
for j in range(int(len(img_list) * 0.7), int(len(img_list)*0.8)):
shutil.copyfile(img_list[j], os.path.join(TARGET_DIR, 'val', os.path.join(dir_i, os.path.basename(img_list[j]))))
for j in range(int(len(img_list) * 0.8), len(img_list)):
shutil.copyfile(img_list[j], os.path.join(TARGET_DIR, 'test', os.path.join(dir_i, os.path.basename(img_list[j]))))
print('Done splitting dataset.') | tao_tutorials-main | notebooks/tao_launcher_starter_kit/classification_tf1/tao_voc/prepare_voc.py |
tao_tutorials-main | notebooks/tao_launcher_starter_kit/gesturenet/__init__.py |
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
"""Helper script to sample calibration data for INT8 post-training quantization."""
import argparse
import json
import os
import random
import cv2
# Color definition for stdout logs.
CYELLOW = '\033[93m'
CEND = '\033[0m'
def build_command_line_parser(parser=None):
"""
Sample subset of a given dataset for INT8 calibration.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='sample_calibration_images',
description='Sample Calibration Images.')
parser.add_argument(
'-a',
'--annotation_file',
required=True,
help='Path to the annotation file.')
parser.add_argument(
'-i',
'--image_root_dir',
required=True,
help='Path to root image directory.')
parser.add_argument(
'-o',
'--output_image_root_dir',
required=True,
help='Output file name.')
parser.add_argument(
'-n',
'--num_images',
required=False,
type=int,
default=100,
help='Number of images to sample for calibration.')
parser.add_argument(
'-mi',
'--model_input_dim',
required=False,
type=int,
default=160,
help='Input size of model input.')
parser.add_argument(
"-r",
"--randomize",
action='store_true',
help="Include this flag to randomize the sampling of data.")
return parser
def parse_command_line_args(cl_args=None):
"""Parser command line arguments to the trainer.
Args:
cl_args (list): List of strings used as command line arguments.
Returns:
args_parsed: Parsed arguments.
"""
parser = build_command_line_parser()
args = parser.parse_args(cl_args)
return args
def main(cl_args=None):
"""Sample subset of a given dataset for INT8 calibration based on user arguments.
Args:
args(list): list of arguments to be parsed if called from another module.
"""
args = parse_command_line_args(cl_args)
num_images = args.num_images
annotation_file = args.annotation_file
image_root_dir = args.image_root_dir
output_image_root_dir = args.output_image_root_dir
model_input_dim = args.model_input_dim
# Create output folder
if not os.path.exists(output_image_root_dir):
os.makedirs(output_image_root_dir)
elif len(os.listdir(output_image_root_dir)):
raise Exception("Output directory contains files! Please specify a valid output directory.")
# Initialize the dataset and read image ids
with open(annotation_file) as json_file:
data = json.load(json_file)
images_train = data['train_set']['images']
images_val = data['validation_set']['images']
# Randomize the dataset
if args.randomize:
random.shuffle(images_train)
N = len(images_train)
count = 0
for i in range(N):
img = images_train[i]['bbox_path']
image_path = os.path.join(image_root_dir, img)
img = cv2.imread(image_path)
img = cv2.resize(img, (model_input_dim, model_input_dim), interpolation=cv2.INTER_CUBIC)
cv2.imwrite(os.path.join(output_image_root_dir, str(count+1)+'.png'), img)
count = count + 1
# Check if enough images have been selected
if count >= num_images:
break
print(CYELLOW + 'Number of images selected: {}'.format(count) + CEND)
if __name__ == '__main__':
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/gesturenet/sample_calibration_images.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Script to transform HGR dataset to Label Studio format for Gesturenet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from collections import defaultdict
import json
import numpy as np
import os
import shutil
from xml.dom import minidom
def mk_dir(path):
"""Create a directory if it doesn't exist.
Args:
path (string): Directory path
"""
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as e:
pass
def get_bbox(label_path):
"""Use hgr xml with keypoints to compute tight hand bbox.
Args:
label_path (string): Path to hgr feature point xml with keypoints.
Return:
bbox_label_dict (dict): Dictionary with handbbox in label format required for GestureNet.
"""
bbox_label_dict = {}
bbox_label_dict["type"] = "rectanglelabels"
label = minidom.parse(label_path)
img_metadata = label.getElementsByTagName('IMAGE')[0]
bbox_label_dict["original_width"] = int(
img_metadata.attributes['WIDTH'].value)
bbox_label_dict["original_height"] = int(
img_metadata.attributes['HEIGHT'].value)
bbox_dict = {}
feature_points = label.getElementsByTagName('FeaturePoint')
x_val = [int(fp.attributes['x'].value) for fp in feature_points]
y_val = [int(fp.attributes['y'].value) for fp in feature_points]
x1, x2 = min(x_val), max(x_val)
y1, y2 = min(y_val), max(y_val)
bbox_width = x2 - x1
bbox_height = y2 - y1
bbox_dict["x"] = (float(x1)/bbox_label_dict["original_width"])*100.0
bbox_dict["y"] = (float(y1)/bbox_label_dict["original_height"])*100.0
bbox_dict["width"] = (
float(bbox_width)/bbox_label_dict["original_width"])*100.0
bbox_dict["height"] = (
float(bbox_height)/bbox_label_dict["original_height"])*100.0
bbox_label_dict["value"] = bbox_dict
return bbox_label_dict
def get_gesture_name(img_prefix):
"""Use image filename to extract user id, session id and gesture class.
Args:
img_prefix (string): Name of image without file extension.
Return:
u_id (string): Unique identifier for user.
sess_id (string): Unique name for each recording session.
gesture_class_dict (dict): Dictionary with gesture class in label format required for GestureNet.
"""
gesture_code_label_map = {
"0_A": "random",
"1_A": "random",
"1_P": "thumbs_up",
"2_A": "two",
"2_P": "random",
"3_A": "random",
"3_P": "random",
"5_A": "stop",
"9_A": "ok",
"A_P": "fist",
"B_P": "stop",
"C_P": "random",
"D_P": "random",
"G_A": "random",
"I_P": "random",
"L_P": "random",
"O_P": "ok",
"S_A": "fist",
"S_P": "ok",
"V_A": "two",
"Y_A": "random",
"Y_P": "random"
}
gesture_class_dict = {}
img_prefix_parts = img_prefix.split("_")
sess_id = img_prefix_parts[2]
u_id = img_prefix_parts[3]
gesture_code = "_".join(img_prefix_parts[:2])
if gesture_code in gesture_code_label_map:
gesture_class = gesture_code_label_map[gesture_code]
gesture_dict = {}
gesture_dict["choices"] = []
gesture_dict["choices"].append(gesture_class)
gesture_class_dict["type"] = "choices"
gesture_class_dict["value"] = gesture_dict
return u_id, sess_id, gesture_class_dict
def prepare_set_config(user_dict):
"""Create a dummy dataset config with metadata.
Args:
user_dict (dict): Dictionary mapping each user id to a list of session ids.
Return:
set_config (dict): Dictionary with dataset config.
"""
set_config = {}
set_config["set"] = "data"
set_config["users"] = {}
for uid in user_dict:
set_config["users"][uid] = {}
return set_config
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(
description='Transform dataset for GestureNet tutorial')
parser.add_argument(
"--input_image_dir",
type=str,
required=True,
help="Input directory to HGR dataset images."
)
parser.add_argument(
"--input_label_file",
type=str,
required=True,
help="Input path to HGR dataset feature point labels."
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to TLT dataset."
)
return parser.parse_args(args)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
target_set_path = os.path.join(args.output_dir, "original", "data")
target_label_path = os.path.join(target_set_path, "annotation")
if not os.path.exists(target_set_path):
mk_dir(target_set_path)
mk_dir(target_label_path)
else:
print("This script will not run as output image path already exists.")
return
total_cnt = 0
user_dict = defaultdict(list)
for img_name in os.listdir(args.input_image_dir):
img_prefix = img_name.split(".")[0]
img_path = os.path.join(args.input_image_dir, img_name)
label_path = os.path.join(args.input_label_file, img_prefix+".xml")
if not os.path.exists(label_path):
print("Error reading feature point xml, Please check data")
return
result_list = []
u_id, sess_id, gesture_class_dict = get_gesture_name(img_prefix)
if len(gesture_class_dict) == 0:
continue
total_cnt += 1
user_dict[u_id].append(sess_id)
result_list.append(gesture_class_dict)
bbox_label_dict = get_bbox(label_path)
result_list.append(bbox_label_dict)
img_dest_folder = os.path.join(target_set_path, u_id, sess_id)
mk_dir(img_dest_folder)
img_dest_path = os.path.join(img_dest_folder, img_name)
label_dest_path = os.path.join(target_label_path, img_prefix+".json")
label_json = {}
label_json["task_path"] = img_dest_path
completion_dict = {}
completion_dict["result"] = result_list
label_json["completions"] = []
label_json["completions"].append(completion_dict)
# write label to disk
with open(label_dest_path, "w") as label_file:
json.dump(label_json, label_file, indent=2)
# copy image to required path
shutil.copyfile(img_path, img_dest_path)
print("Total {} samples in dataset".format(total_cnt))
set_config = prepare_set_config(user_dict)
# write set config to disk
with open(os.path.join(target_set_path, "data.json"), "w") as config_file:
json.dump(set_config, config_file, indent=4)
if __name__ == "__main__":
main()
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/gesturenet/convert_hgr_to_tlt_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import sys
def drop_class(label_dir, classes):
"""drop label by class names."""
labels = os.listdir(label_dir)
labels = [os.path.join(label_dir, x) for x in labels]
for gt in labels:
print("Processing ", gt)
with open(gt) as f:
lines = f.readlines()
lines_ret = []
for line in lines:
ls = line.strip()
line = ls.split()
if line[0] in classes:
print("Dropping ", line[0])
continue
else:
lines_ret.append(ls)
with open(gt, "w") as fo:
out = '\n'.join(lines_ret)
fo.write(out)
if __name__ == "__main__":
drop_class(sys.argv[1], sys.argv[2].split(','))
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/pointpillars/specs/drop_class.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import argparse
import numpy as np
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils.object3d_kitti import (
get_objects_from_label
)
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils.calibration_kitti import (
Calibration
)
def parse_args():
parser = argparse.ArgumentParser("Convert camera label to LiDAR label.")
parser.add_argument(
"-l", "--label_dir",
type=str, required=True,
help="Camera label directory."
)
parser.add_argument(
"-c", "--calib_dir",
type=str, required=True,
help="Calibration file directory"
)
parser.add_argument(
"-o", "--output_dir",
type=str, required=True,
help="Output LiDAR label directory"
)
return parser.parse_args()
def generate_lidar_labels(label_dir, calib_dir, output_dir):
"""Generate LiDAR labels from KITTI Camera labels."""
for lab in os.listdir(label_dir):
lab_file = os.path.join(label_dir, lab)
obj_list = get_objects_from_label(lab_file)
calib_file = os.path.join(calib_dir, lab)
calib = Calibration(calib_file)
loc = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
loc_lidar = calib.rect_to_lidar(loc)
# update obj3d.loc
with open(os.path.join(output_dir, lab), "w") as lf:
for idx, lc in enumerate(loc_lidar):
# bottom center to 3D center
obj_list[idx].loc = (lc + np.array([0., 0., obj_list[idx].h / 2.]))
# rotation_y to rotation_z
obj_list[idx].ry = -np.pi / 2. - obj_list[idx].ry
lf.write(obj_list[idx].to_kitti_format())
lf.write('\n')
if __name__ == "__main__":
args = parse_args()
generate_lidar_labels(args.label_dir, args.calib_dir, args.output_dir)
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/pointpillars/specs/gen_lidar_labels.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import sys
def split(list_file, lidar, label, output_lidar, output_label):
"""train/val split of the KITTI dataset."""
with open(list_file) as lf:
file_names = lf.readlines()
file_names = [f.strip() for f in file_names]
for li in os.listdir(lidar):
if li[:-4] in file_names:
os.rename(os.path.join(lidar, li), os.path.join(output_lidar, li))
for la in os.listdir(label):
if la[:-4] in file_names:
os.rename(os.path.join(label, la), os.path.join(output_label, la))
if __name__ == "__main__":
split(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/pointpillars/specs/kitti_split.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import argparse
import numpy as np
from skimage import io
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils.calibration_kitti import (
Calibration
)
def parse_args():
parser = argparse.ArgumentParser("Limit LIDAR points to FOV range.")
parser.add_argument(
"-p", "--points_dir",
type=str, required=True,
help="LIDAR points directory."
)
parser.add_argument(
"-c", "--calib_dir",
type=str, required=True,
help="Calibration file directory"
)
parser.add_argument(
"-o", "--output_dir",
type=str, required=True,
help="Output LiDAR points directory"
)
parser.add_argument(
"-i",
"--image_dir",
type=str, required=True,
help="image directory"
)
return parser.parse_args()
def get_fov_flag(pts_rect, img_shape, calib):
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def generate_lidar_points(points_dir, calib_dir, output_dir, image_dir):
"""Limit LiDAR points to FOV range."""
for pts in os.listdir(points_dir):
pts_file = os.path.join(points_dir, pts)
points = np.fromfile(pts_file, dtype=np.float32).reshape(-1, 4)
calib_file = os.path.join(calib_dir, pts[:-4]+".txt")
calib = Calibration(calib_file)
pts_rect = calib.lidar_to_rect(points[:, 0:3])
img_file = os.path.join(image_dir, pts[:-4]+".png")
img_shape = np.array(io.imread(img_file).shape[:2], dtype=np.int32)
fov_flag = get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
points.tofile(os.path.join(output_dir, pts))
# double check
points_cp = np.fromfile(os.path.join(output_dir, pts), dtype=np.float32).reshape(-1, 4)
assert np.equal(points, points_cp).all()
if __name__ == "__main__":
args = parse_args()
generate_lidar_points(
args.points_dir, args.calib_dir,
args.output_dir, args.image_dir
)
| tao_tutorials-main | notebooks/tao_launcher_starter_kit/pointpillars/specs/gen_lidar_points.py |
Subsets and Splits