repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
alibi-detect
|
alibi-detect-master/doc/source/conf.py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# Hide RemovedInSphinx40Warning. Can remove once upgraded to sphinx>=4.0
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
project = "alibi-detect"
copyright = "2019, Seldon Technologies Ltd"
author = "Seldon Technologies Ltd"
# The short X.Y version
# import alibi_detect
exec(open("../../alibi_detect/version.py").read())
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"sphinxcontrib.apidoc", # automatically generate API docs, see https://github.com/rtfd/readthedocs.org/issues/1139
"sphinxcontrib.bibtex",
"nbsphinx",
"myst_parser",
"sphinx_design",
]
# -- nbsphinx settings -------------------------------------------------------
nbsphinx_execute = "auto"
# Create symlinks for example notebooks
import glob
nb_files = [os.path.basename(f) for f in glob.glob(os.path.join('examples','*.ipynb'))
if not os.path.basename(f).startswith('temp_')]
for nb_file in nb_files:
target = os.path.join('../../examples', nb_file)
if os.path.exists(target):
os.remove(target)
os.symlink(os.path.join('../doc/source/examples', nb_file), target)
# -- Bibliography ------------------------------------------------------------
bibtex_bibfiles = ['refs.bib']
bibtex_default_style = 'unsrtalpha'
# apidoc settings
apidoc_module_dir = "../../alibi_detect"
apidoc_output_dir = "api"
apidoc_excluded_paths = ["**/*test*"]
apidoc_module_first = True
apidoc_separate_modules = True
apidoc_extra_args = ["-d 6"]
# mock imports
# numpy, pandas and matplotlib are not included as these are installed on
# ReadTheDocs PYTHON_VERSION_39 docker image (https://hub.docker.com/r/readthedocs/build/dockerfile/)
autodoc_mock_imports = [
"sklearn",
"skimage",
"requests",
"cv2",
"bs4",
"keras",
"seaborn",
"PIL",
"tensorflow",
"spacy",
"tensorflow_probability",
"scipy",
"prophet",
"torch",
"transformers",
"tqdm",
"dill",
"joblib",
"numba",
"pydantic",
"toml",
"catalogue",
"pykeops"
]
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# source_suffix = '.rst'
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_logo = '_static/Alibi_Detect_Logo_white.png'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"logo_only": True}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# override default theme width
html_css_files = ['theme_overrides.css', 'custom_docs.css'] # override wide tables in RTD theme
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "alibi-detectdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
#
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
#
# Additional stuff for the LaTeX preamble.
# - Replace unicode characters with utf8.
# (U+2588 and U+258E are used in tqdm progress bars)
# - Use enumitem for lists to prevent "too deeply nested" latex error
'preamble': r'''
\DeclareUnicodeCharacter{2588}{=}
\DeclareUnicodeCharacter{258E}{|}
\DeclareUnicodeCharacter{274C}{$\times$}
\DeclareUnicodeCharacter{2705}{$\checkmark$}
\usepackage{enumitem}
\setlistdepth{99}
''',
#
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, "alibi-detect.tex", "alibi-detect Documentation", "Seldon Technologies Ltd", "manual")]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "alibi-detect", "alibi-detect Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, "alibi-detect", "alibi-detect Documentation", author, "alibi-detect", "One line description of project.", "Miscellaneous")
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'sklearn': ('https://scikit-learn.org/stable/', None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- nbsphinx prolog ---------------------------------------------------------
# from https://github.com/vidartf/nbsphinx-link/blob/master/docs/source/conf.py for custom tags
import subprocess
try:
git_rev = subprocess.check_output(["git", "describe", "--exact-match", "HEAD"], universal_newlines=True)
except subprocess.CalledProcessError:
try:
git_rev = subprocess.check_output(["git", "rev-parse", "HEAD"], universal_newlines=True)
except subprocess.CalledProcessError:
git_rev = ""
if git_rev:
git_rev = git_rev.splitlines()[0] + "/"
nbsphinx_prolog = (
r"""
{% set docname = env.doc2path(env.docname, base=False) %}
.. only:: html
.. role:: raw-html(raw)
:format: html
.. nbinfo::
This page was generated from `{{ docname }}`__.
__ https://github.com/SeldonIO/alibi-detect/blob/
"""
+ git_rev
+ "doc/source/"
+ r"{{ docname }}"
)
# -- Override order of preference for image formats --------------------------
# Need to set gif above png so that it is chosen over png if present
from sphinx.builders.html import StandaloneHTMLBuilder
StandaloneHTMLBuilder.supported_image_types = [
'image/svg+xml',
'image/gif',
'image/png',
'image/jpeg'
]
# -- myst-parser configuration -----------------------------------------------
# See https://myst-parser.readthedocs.io/en/stable/syntax/optional.html for
# details of available extensions.
myst_enable_extensions = [
"dollarmath",
"amsmath",
"colon_fence",
"smartquotes",
"tasklist",
"html_image",
]
# Create heading anchors for h1 to h3 (useful for local toc's)
myst_heading_anchors = 3
# Below code fixes a problem with sphinx>=3.2.0 processing functions with
# torch.jit.script decorator. Probably occuring because torch is being mocked
# (see https://github.com/sphinx-doc/sphinx/issues/6709).
def call_mock(self, *args, **kw):
from types import FunctionType, MethodType
if args and type(args[0]) in [type, FunctionType, MethodType]:
# Appears to be a decorator, pass through unchanged
return args[0]
return self
from sphinx.ext.autodoc.mock import _MockObject
_MockObject.__call__ = call_mock
| 11,119 | 31.138728 | 139 |
py
|
InfoGAN
|
InfoGAN-master/launchers/run_mnist_exp.py
|
from __future__ import print_function
from __future__ import absolute_import
from infogan.misc.distributions import Uniform, Categorical, Gaussian, MeanBernoulli
import tensorflow as tf
import os
from infogan.misc.datasets import MnistDataset
from infogan.models.regularized_gan import RegularizedGAN
from infogan.algos.infogan_trainer import InfoGANTrainer
from infogan.misc.utils import mkdir_p
import dateutil
import dateutil.tz
import datetime
if __name__ == "__main__":
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
root_log_dir = "logs/mnist"
root_checkpoint_dir = "ckt/mnist"
batch_size = 128
updates_per_epoch = 100
max_epoch = 50
exp_name = "mnist_%s" % timestamp
log_dir = os.path.join(root_log_dir, exp_name)
checkpoint_dir = os.path.join(root_checkpoint_dir, exp_name)
mkdir_p(log_dir)
mkdir_p(checkpoint_dir)
dataset = MnistDataset()
latent_spec = [
(Uniform(62), False),
(Categorical(10), True),
(Uniform(1, fix_std=True), True),
(Uniform(1, fix_std=True), True),
]
model = RegularizedGAN(
output_dist=MeanBernoulli(dataset.image_dim),
latent_spec=latent_spec,
batch_size=batch_size,
image_shape=dataset.image_shape,
network_type="mnist",
)
algo = InfoGANTrainer(
model=model,
dataset=dataset,
batch_size=batch_size,
exp_name=exp_name,
log_dir=log_dir,
checkpoint_dir=checkpoint_dir,
max_epoch=max_epoch,
updates_per_epoch=updates_per_epoch,
info_reg_coeff=1.0,
generator_learning_rate=1e-3,
discriminator_learning_rate=2e-4,
)
algo.train()
| 1,758 | 25.651515 | 84 |
py
|
InfoGAN
|
InfoGAN-master/launchers/__init__.py
|
from __future__ import print_function
from __future__ import absolute_import
| 76 | 37.5 | 38 |
py
|
InfoGAN
|
InfoGAN-master/tests/test_distributions.py
|
from __future__ import print_function
from __future__ import absolute_import
from nose2.tools import such
from misc.distributions import Categorical, Gaussian, Product, Bernoulli
import numpy as np
import tensorflow as tf
sess = tf.Session()
def random_softmax(ndim):
x = np.random.uniform(size=(ndim,))
x = x - np.max(x)
x = np.exp(x) / np.sum(np.exp(x))
return np.cast['float32'](x)
with such.A("Product Distribution") as it:
dist1 = Product([Categorical(5), Categorical(3)])
dist2 = Product([Gaussian(5), dist1])
@it.should
def test_dist_info_keys():
it.assertEqual(set(dist1.dist_info_keys), {"id_0_prob", "id_1_prob"})
it.assertEqual(set(dist2.dist_info_keys), {"id_0_mean", "id_0_stddev",
"id_1_id_0_prob", "id_1_id_1_prob"})
@it.should
def test_kl_sym():
old_id_0_prob = np.array([random_softmax(5)])
old_id_1_prob = np.array([random_softmax(3)])
new_id_0_prob = np.array([random_softmax(5)])
new_id_1_prob = np.array([random_softmax(3)])
old_dist_info_vars = dict(
id_0_prob=tf.constant(old_id_0_prob),
id_1_prob=tf.constant(old_id_1_prob)
)
new_dist_info_vars = dict(
id_0_prob=tf.constant(new_id_0_prob),
id_1_prob=tf.constant(new_id_1_prob)
)
np.testing.assert_allclose(
dist1.kl(old_dist_info_vars, new_dist_info_vars).eval(session=sess),
Categorical(5).kl(dict(prob=old_id_0_prob), dict(prob=new_id_0_prob)).eval(session=sess) +
Categorical(3).kl(dict(prob=old_id_1_prob), dict(prob=new_id_1_prob)).eval(session=sess)
)
it.createTests(globals())
with such.A("Categorical") as it:
@it.should
def test_categorical():
cat = Categorical(3)
new_prob = np.array(
[random_softmax(3), random_softmax(3)],
)
old_prob = np.array(
[random_softmax(3), random_softmax(3)],
)
x = np.array([
[0, 1, 0],
[0, 0, 1],
], dtype=np.float32)
new_prob_sym = tf.constant(new_prob)
old_prob_sym = tf.constant(old_prob)
x_sym = tf.constant(x)
new_info_sym = dict(prob=new_prob_sym)
old_info_sym = dict(prob=old_prob_sym)
np.testing.assert_allclose(
cat.kl(new_info_sym, new_info_sym).eval(session=sess),
np.array([0., 0.])
)
np.testing.assert_allclose(
cat.kl(old_info_sym, new_info_sym).eval(session=sess),
np.sum(old_prob * (np.log(old_prob + 1e-8) - np.log(new_prob + 1e-8)), axis=-1)
)
np.testing.assert_allclose(
cat.logli(x_sym, old_info_sym).eval(session=sess),
[np.log(old_prob[0][1] + 1e-8), np.log(old_prob[1][2] + 1e-8)],
rtol=1e-5
)
it.createTests(globals())
with such.A("Bernoulli") as it:
@it.should
def test_bernoulli():
bernoulli = Bernoulli(3)
new_p = np.array([[0.5, 0.5, 0.5], [.9, .9, .9]], dtype=np.float32)
old_p = np.array([[.9, .9, .9], [.1, .1, .1]], dtype=np.float32)
x = np.array([[1, 0, 1], [1, 1, 1]], dtype=np.float32)
x_sym = tf.constant(x)
new_p_sym = tf.constant(new_p)
old_p_sym = tf.constant(old_p)
new_info = dict(p=new_p)
old_info = dict(p=old_p)
new_info_sym = dict(p=new_p_sym)
old_info_sym = dict(p=old_p_sym)
# np.testing.assert_allclose(
# np.sum(bernoulli.entropy(dist_info=new_info)),
# np.sum(- new_p * np.log(new_p + 1e-8) - (1 - new_p) * np.log(1 - new_p + 1e-8)),
# )
# np.testing.assert_allclose(
# np.sum(bernoulli.kl(old_info_sym, new_info_sym).eval()),
# np.sum(old_p * (np.log(old_p + 1e-8) - np.log(new_p + 1e-8)) + (1 - old_p) * (np.log(1 - old_p + 1e-8) -
# np.log(1 - new_p + 1e-8))),
# )
# np.testing.assert_allclose(
# np.sum(bernoulli.kl(old_info, new_info)),
# np.sum(old_p * (np.log(old_p + 1e-8) - np.log(new_p + 1e-8)) + (1 - old_p) * (np.log(1 - old_p + 1e-8) -
# np.log(1 - new_p + 1e-8))),
# )
# np.testing.assert_allclose(
# bernoulli.likelihood_ratio_sym(x_sym, old_info_sym, new_info_sym).eval(),
# np.prod((x * new_p + (1 - x) * (1 - new_p)) / (x * old_p + (1 - x) * (1 - old_p) + 1e-8), axis=-1)
# )
np.testing.assert_allclose(
bernoulli.logli(x_sym, old_info_sym).eval(session=sess),
np.sum(x * np.log(old_p + 1e-8) + (1 - x) * np.log(1 - old_p + 1e-8), axis=-1)
)
# np.testing.assert_allclose(
# bernoulli.log_likelihood(x, old_info),
# np.sum(x * np.log(old_p + 1e-8) + (1 - x) * np.log(1 - old_p + 1e-8), axis=-1)
# )
it.createTests(globals())
| 5,105 | 34.957746 | 119 |
py
|
InfoGAN
|
InfoGAN-master/tests/__init__.py
|
from __future__ import print_function
from __future__ import absolute_import
| 76 | 37.5 | 38 |
py
|
InfoGAN
|
InfoGAN-master/infogan/__init__.py
|
from __future__ import print_function
from __future__ import absolute_import
| 76 | 37.5 | 38 |
py
|
InfoGAN
|
InfoGAN-master/infogan/models/regularized_gan.py
|
from infogan.misc.distributions import Product, Distribution, Gaussian, Categorical, Bernoulli
import prettytensor as pt
import tensorflow as tf
import infogan.misc.custom_ops
from infogan.misc.custom_ops import leaky_rectify
class RegularizedGAN(object):
def __init__(self, output_dist, latent_spec, batch_size, image_shape, network_type):
"""
:type output_dist: Distribution
:type latent_spec: list[(Distribution, bool)]
:type batch_size: int
:type network_type: string
"""
self.output_dist = output_dist
self.latent_spec = latent_spec
self.latent_dist = Product([x for x, _ in latent_spec])
self.reg_latent_dist = Product([x for x, reg in latent_spec if reg])
self.nonreg_latent_dist = Product([x for x, reg in latent_spec if not reg])
self.batch_size = batch_size
self.network_type = network_type
self.image_shape = image_shape
assert all(isinstance(x, (Gaussian, Categorical, Bernoulli)) for x in self.reg_latent_dist.dists)
self.reg_cont_latent_dist = Product([x for x in self.reg_latent_dist.dists if isinstance(x, Gaussian)])
self.reg_disc_latent_dist = Product([x for x in self.reg_latent_dist.dists if isinstance(x, (Categorical, Bernoulli))])
image_size = image_shape[0]
if network_type == "mnist":
with tf.variable_scope("d_net"):
shared_template = \
(pt.template("input").
reshape([-1] + list(image_shape)).
custom_conv2d(64, k_h=4, k_w=4).
apply(leaky_rectify).
custom_conv2d(128, k_h=4, k_w=4).
conv_batch_norm().
apply(leaky_rectify).
custom_fully_connected(1024).
fc_batch_norm().
apply(leaky_rectify))
self.discriminator_template = shared_template.custom_fully_connected(1)
self.encoder_template = \
(shared_template.
custom_fully_connected(128).
fc_batch_norm().
apply(leaky_rectify).
custom_fully_connected(self.reg_latent_dist.dist_flat_dim))
with tf.variable_scope("g_net"):
self.generator_template = \
(pt.template("input").
custom_fully_connected(1024).
fc_batch_norm().
apply(tf.nn.relu).
custom_fully_connected(image_size / 4 * image_size / 4 * 128).
fc_batch_norm().
apply(tf.nn.relu).
reshape([-1, image_size / 4, image_size / 4, 128]).
custom_deconv2d([0, image_size / 2, image_size / 2, 64], k_h=4, k_w=4).
conv_batch_norm().
apply(tf.nn.relu).
custom_deconv2d([0] + list(image_shape), k_h=4, k_w=4).
flatten())
else:
raise NotImplementedError
def discriminate(self, x_var):
d_out = self.discriminator_template.construct(input=x_var)
d = tf.nn.sigmoid(d_out[:, 0])
reg_dist_flat = self.encoder_template.construct(input=x_var)
reg_dist_info = self.reg_latent_dist.activate_dist(reg_dist_flat)
return d, self.reg_latent_dist.sample(reg_dist_info), reg_dist_info, reg_dist_flat
def generate(self, z_var):
x_dist_flat = self.generator_template.construct(input=z_var)
x_dist_info = self.output_dist.activate_dist(x_dist_flat)
return self.output_dist.sample(x_dist_info), x_dist_info
def disc_reg_z(self, reg_z_var):
ret = []
for dist_i, z_i in zip(self.reg_latent_dist.dists, self.reg_latent_dist.split_var(reg_z_var)):
if isinstance(dist_i, (Categorical, Bernoulli)):
ret.append(z_i)
return self.reg_disc_latent_dist.join_vars(ret)
def cont_reg_z(self, reg_z_var):
ret = []
for dist_i, z_i in zip(self.reg_latent_dist.dists, self.reg_latent_dist.split_var(reg_z_var)):
if isinstance(dist_i, Gaussian):
ret.append(z_i)
return self.reg_cont_latent_dist.join_vars(ret)
def disc_reg_dist_info(self, reg_dist_info):
ret = []
for dist_i, dist_info_i in zip(self.reg_latent_dist.dists, self.reg_latent_dist.split_dist_info(reg_dist_info)):
if isinstance(dist_i, (Categorical, Bernoulli)):
ret.append(dist_info_i)
return self.reg_disc_latent_dist.join_dist_infos(ret)
def cont_reg_dist_info(self, reg_dist_info):
ret = []
for dist_i, dist_info_i in zip(self.reg_latent_dist.dists, self.reg_latent_dist.split_dist_info(reg_dist_info)):
if isinstance(dist_i, Gaussian):
ret.append(dist_info_i)
return self.reg_cont_latent_dist.join_dist_infos(ret)
def reg_z(self, z_var):
ret = []
for (_, reg_i), z_i in zip(self.latent_spec, self.latent_dist.split_var(z_var)):
if reg_i:
ret.append(z_i)
return self.reg_latent_dist.join_vars(ret)
def nonreg_z(self, z_var):
ret = []
for (_, reg_i), z_i in zip(self.latent_spec, self.latent_dist.split_var(z_var)):
if not reg_i:
ret.append(z_i)
return self.nonreg_latent_dist.join_vars(ret)
def reg_dist_info(self, dist_info):
ret = []
for (_, reg_i), dist_info_i in zip(self.latent_spec, self.latent_dist.split_dist_info(dist_info)):
if reg_i:
ret.append(dist_info_i)
return self.reg_latent_dist.join_dist_infos(ret)
def nonreg_dist_info(self, dist_info):
ret = []
for (_, reg_i), dist_info_i in zip(self.latent_spec, self.latent_dist.split_dist_info(dist_info)):
if not reg_i:
ret.append(dist_info_i)
return self.nonreg_latent_dist.join_dist_infos(ret)
def combine_reg_nonreg_z(self, reg_z_var, nonreg_z_var):
reg_z_vars = self.reg_latent_dist.split_var(reg_z_var)
reg_idx = 0
nonreg_z_vars = self.nonreg_latent_dist.split_var(nonreg_z_var)
nonreg_idx = 0
ret = []
for idx, (dist_i, reg_i) in enumerate(self.latent_spec):
if reg_i:
ret.append(reg_z_vars[reg_idx])
reg_idx += 1
else:
ret.append(nonreg_z_vars[nonreg_idx])
nonreg_idx += 1
return self.latent_dist.join_vars(ret)
def combine_reg_nonreg_dist_info(self, reg_dist_info, nonreg_dist_info):
reg_dist_infos = self.reg_latent_dist.split_dist_info(reg_dist_info)
reg_idx = 0
nonreg_dist_infos = self.nonreg_latent_dist.split_dist_info(nonreg_dist_info)
nonreg_idx = 0
ret = []
for idx, (dist_i, reg_i) in enumerate(self.latent_spec):
if reg_i:
ret.append(reg_dist_infos[reg_idx])
reg_idx += 1
else:
ret.append(nonreg_dist_infos[nonreg_idx])
nonreg_idx += 1
return self.latent_dist.join_dist_infos(ret)
| 7,290 | 42.921687 | 127 |
py
|
InfoGAN
|
InfoGAN-master/infogan/models/__init__.py
|
from __future__ import print_function
from __future__ import absolute_import
| 76 | 37.5 | 38 |
py
|
InfoGAN
|
InfoGAN-master/infogan/algos/infogan_trainer.py
|
from infogan.models.regularized_gan import RegularizedGAN
import prettytensor as pt
import tensorflow as tf
import numpy as np
from progressbar import ETA, Bar, Percentage, ProgressBar
from infogan.misc.distributions import Bernoulli, Gaussian, Categorical
import sys
TINY = 1e-8
class InfoGANTrainer(object):
def __init__(self,
model,
batch_size,
dataset=None,
exp_name="experiment",
log_dir="logs",
checkpoint_dir="ckt",
max_epoch=100,
updates_per_epoch=100,
snapshot_interval=10000,
info_reg_coeff=1.0,
discriminator_learning_rate=2e-4,
generator_learning_rate=2e-4,
):
"""
:type model: RegularizedGAN
"""
self.model = model
self.dataset = dataset
self.batch_size = batch_size
self.max_epoch = max_epoch
self.exp_name = exp_name
self.log_dir = log_dir
self.checkpoint_dir = checkpoint_dir
self.snapshot_interval = snapshot_interval
self.updates_per_epoch = updates_per_epoch
self.generator_learning_rate = generator_learning_rate
self.discriminator_learning_rate = discriminator_learning_rate
self.info_reg_coeff = info_reg_coeff
self.discriminator_trainer = None
self.generator_trainer = None
self.input_tensor = None
self.log_vars = []
def init_opt(self):
self.input_tensor = input_tensor = tf.placeholder(tf.float32, [self.batch_size, self.dataset.image_dim])
with pt.defaults_scope(phase=pt.Phase.train):
z_var = self.model.latent_dist.sample_prior(self.batch_size)
fake_x, _ = self.model.generate(z_var)
real_d, _, _, _ = self.model.discriminate(input_tensor)
fake_d, _, fake_reg_z_dist_info, _ = self.model.discriminate(fake_x)
reg_z = self.model.reg_z(z_var)
discriminator_loss = - tf.reduce_mean(tf.log(real_d + TINY) + tf.log(1. - fake_d + TINY))
generator_loss = - tf.reduce_mean(tf.log(fake_d + TINY))
self.log_vars.append(("discriminator_loss", discriminator_loss))
self.log_vars.append(("generator_loss", generator_loss))
mi_est = tf.constant(0.)
cross_ent = tf.constant(0.)
# compute for discrete and continuous codes separately
# discrete:
if len(self.model.reg_disc_latent_dist.dists) > 0:
disc_reg_z = self.model.disc_reg_z(reg_z)
disc_reg_dist_info = self.model.disc_reg_dist_info(fake_reg_z_dist_info)
disc_log_q_c_given_x = self.model.reg_disc_latent_dist.logli(disc_reg_z, disc_reg_dist_info)
disc_log_q_c = self.model.reg_disc_latent_dist.logli_prior(disc_reg_z)
disc_cross_ent = tf.reduce_mean(-disc_log_q_c_given_x)
disc_ent = tf.reduce_mean(-disc_log_q_c)
disc_mi_est = disc_ent - disc_cross_ent
mi_est += disc_mi_est
cross_ent += disc_cross_ent
self.log_vars.append(("MI_disc", disc_mi_est))
self.log_vars.append(("CrossEnt_disc", disc_cross_ent))
discriminator_loss -= self.info_reg_coeff * disc_mi_est
generator_loss -= self.info_reg_coeff * disc_mi_est
if len(self.model.reg_cont_latent_dist.dists) > 0:
cont_reg_z = self.model.cont_reg_z(reg_z)
cont_reg_dist_info = self.model.cont_reg_dist_info(fake_reg_z_dist_info)
cont_log_q_c_given_x = self.model.reg_cont_latent_dist.logli(cont_reg_z, cont_reg_dist_info)
cont_log_q_c = self.model.reg_cont_latent_dist.logli_prior(cont_reg_z)
cont_cross_ent = tf.reduce_mean(-cont_log_q_c_given_x)
cont_ent = tf.reduce_mean(-cont_log_q_c)
cont_mi_est = cont_ent - cont_cross_ent
mi_est += cont_mi_est
cross_ent += cont_cross_ent
self.log_vars.append(("MI_cont", cont_mi_est))
self.log_vars.append(("CrossEnt_cont", cont_cross_ent))
discriminator_loss -= self.info_reg_coeff * cont_mi_est
generator_loss -= self.info_reg_coeff * cont_mi_est
for idx, dist_info in enumerate(self.model.reg_latent_dist.split_dist_info(fake_reg_z_dist_info)):
if "stddev" in dist_info:
self.log_vars.append(("max_std_%d" % idx, tf.reduce_max(dist_info["stddev"])))
self.log_vars.append(("min_std_%d" % idx, tf.reduce_min(dist_info["stddev"])))
self.log_vars.append(("MI", mi_est))
self.log_vars.append(("CrossEnt", cross_ent))
all_vars = tf.trainable_variables()
d_vars = [var for var in all_vars if var.name.startswith('d_')]
g_vars = [var for var in all_vars if var.name.startswith('g_')]
self.log_vars.append(("max_real_d", tf.reduce_max(real_d)))
self.log_vars.append(("min_real_d", tf.reduce_min(real_d)))
self.log_vars.append(("max_fake_d", tf.reduce_max(fake_d)))
self.log_vars.append(("min_fake_d", tf.reduce_min(fake_d)))
discriminator_optimizer = tf.train.AdamOptimizer(self.discriminator_learning_rate, beta1=0.5)
self.discriminator_trainer = pt.apply_optimizer(discriminator_optimizer, losses=[discriminator_loss],
var_list=d_vars)
generator_optimizer = tf.train.AdamOptimizer(self.generator_learning_rate, beta1=0.5)
self.generator_trainer = pt.apply_optimizer(generator_optimizer, losses=[generator_loss], var_list=g_vars)
for k, v in self.log_vars:
tf.scalar_summary(k, v)
with pt.defaults_scope(phase=pt.Phase.test):
with tf.variable_scope("model", reuse=True) as scope:
self.visualize_all_factors()
def visualize_all_factors(self):
with tf.Session():
fixed_noncat = np.concatenate([
np.tile(
self.model.nonreg_latent_dist.sample_prior(10).eval(),
[10, 1]
),
self.model.nonreg_latent_dist.sample_prior(self.batch_size - 100).eval(),
], axis=0)
fixed_cat = np.concatenate([
np.tile(
self.model.reg_latent_dist.sample_prior(10).eval(),
[10, 1]
),
self.model.reg_latent_dist.sample_prior(self.batch_size - 100).eval(),
], axis=0)
offset = 0
for dist_idx, dist in enumerate(self.model.reg_latent_dist.dists):
if isinstance(dist, Gaussian):
assert dist.dim == 1, "Only dim=1 is currently supported"
c_vals = []
for idx in xrange(10):
c_vals.extend([-1.0 + idx * 2.0 / 9] * 10)
c_vals.extend([0.] * (self.batch_size - 100))
vary_cat = np.asarray(c_vals, dtype=np.float32).reshape((-1, 1))
cur_cat = np.copy(fixed_cat)
cur_cat[:, offset:offset+1] = vary_cat
offset += 1
elif isinstance(dist, Categorical):
lookup = np.eye(dist.dim, dtype=np.float32)
cat_ids = []
for idx in xrange(10):
cat_ids.extend([idx] * 10)
cat_ids.extend([0] * (self.batch_size - 100))
cur_cat = np.copy(fixed_cat)
cur_cat[:, offset:offset+dist.dim] = lookup[cat_ids]
offset += dist.dim
elif isinstance(dist, Bernoulli):
assert dist.dim == 1, "Only dim=1 is currently supported"
lookup = np.eye(dist.dim, dtype=np.float32)
cat_ids = []
for idx in xrange(10):
cat_ids.extend([int(idx / 5)] * 10)
cat_ids.extend([0] * (self.batch_size - 100))
cur_cat = np.copy(fixed_cat)
cur_cat[:, offset:offset+dist.dim] = np.expand_dims(np.array(cat_ids), axis=-1)
# import ipdb; ipdb.set_trace()
offset += dist.dim
else:
raise NotImplementedError
z_var = tf.constant(np.concatenate([fixed_noncat, cur_cat], axis=1))
_, x_dist_info = self.model.generate(z_var)
# just take the mean image
if isinstance(self.model.output_dist, Bernoulli):
img_var = x_dist_info["p"]
elif isinstance(self.model.output_dist, Gaussian):
img_var = x_dist_info["mean"]
else:
raise NotImplementedError
img_var = self.dataset.inverse_transform(img_var)
rows = 10
img_var = tf.reshape(img_var, [self.batch_size] + list(self.dataset.image_shape))
img_var = img_var[:rows * rows, :, :, :]
imgs = tf.reshape(img_var, [rows, rows] + list(self.dataset.image_shape))
stacked_img = []
for row in xrange(rows):
row_img = []
for col in xrange(rows):
row_img.append(imgs[row, col, :, :, :])
stacked_img.append(tf.concat(1, row_img))
imgs = tf.concat(0, stacked_img)
imgs = tf.expand_dims(imgs, 0)
tf.image_summary("image_%d_%s" % (dist_idx, dist.__class__.__name__), imgs)
def train(self):
self.init_opt()
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(self.log_dir, sess.graph)
saver = tf.train.Saver()
counter = 0
log_vars = [x for _, x in self.log_vars]
log_keys = [x for x, _ in self.log_vars]
for epoch in range(self.max_epoch):
widgets = ["epoch #%d|" % epoch, Percentage(), Bar(), ETA()]
pbar = ProgressBar(maxval=self.updates_per_epoch, widgets=widgets)
pbar.start()
all_log_vals = []
for i in range(self.updates_per_epoch):
pbar.update(i)
x, _ = self.dataset.train.next_batch(self.batch_size)
feed_dict = {self.input_tensor: x}
log_vals = sess.run([self.discriminator_trainer] + log_vars, feed_dict)[1:]
sess.run(self.generator_trainer, feed_dict)
all_log_vals.append(log_vals)
counter += 1
if counter % self.snapshot_interval == 0:
snapshot_name = "%s_%s" % (self.exp_name, str(counter))
fn = saver.save(sess, "%s/%s.ckpt" % (self.checkpoint_dir, snapshot_name))
print("Model saved in file: %s" % fn)
x, _ = self.dataset.train.next_batch(self.batch_size)
summary_str = sess.run(summary_op, {self.input_tensor: x})
summary_writer.add_summary(summary_str, counter)
avg_log_vals = np.mean(np.array(all_log_vals), axis=0)
log_dict = dict(zip(log_keys, avg_log_vals))
log_line = "; ".join("%s: %s" % (str(k), str(v)) for k, v in zip(log_keys, avg_log_vals))
print("Epoch %d | " % (epoch) + log_line)
sys.stdout.flush()
if np.any(np.isnan(avg_log_vals)):
raise ValueError("NaN detected!")
| 11,833 | 44.515385 | 118 |
py
|
InfoGAN
|
InfoGAN-master/infogan/algos/__init__.py
|
from __future__ import print_function
from __future__ import absolute_import
| 76 | 37.5 | 38 |
py
|
InfoGAN
|
InfoGAN-master/infogan/misc/custom_ops.py
|
import prettytensor as pt
import tensorflow as tf
from prettytensor.pretty_tensor_class import Phase
import numpy as np
class conv_batch_norm(pt.VarStoreMethod):
"""Code modification of http://stackoverflow.com/a/33950177"""
def __call__(self, input_layer, epsilon=1e-5, momentum=0.1, name="batch_norm",
in_dim=None, phase=Phase.train):
self.ema = tf.train.ExponentialMovingAverage(decay=0.9)
shape = input_layer.shape
shp = in_dim or shape[-1]
with tf.variable_scope(name) as scope:
self.gamma = self.variable("gamma", [shp], init=tf.random_normal_initializer(1., 0.02))
self.beta = self.variable("beta", [shp], init=tf.constant_initializer(0.))
self.mean, self.variance = tf.nn.moments(input_layer.tensor, [0, 1, 2])
# sigh...tf's shape system is so..
self.mean.set_shape((shp,))
self.variance.set_shape((shp,))
self.ema_apply_op = self.ema.apply([self.mean, self.variance])
if phase == Phase.train:
with tf.control_dependencies([self.ema_apply_op]):
normalized_x = tf.nn.batch_norm_with_global_normalization(
input_layer.tensor, self.mean, self.variance, self.beta, self.gamma, epsilon,
scale_after_normalization=True)
else:
normalized_x = tf.nn.batch_norm_with_global_normalization(
x, self.ema.average(self.mean), self.ema.average(self.variance), self.beta,
self.gamma, epsilon,
scale_after_normalization=True)
return input_layer.with_tensor(normalized_x, parameters=self.vars)
pt.Register(assign_defaults=('phase'))(conv_batch_norm)
@pt.Register(assign_defaults=('phase'))
class fc_batch_norm(conv_batch_norm):
def __call__(self, input_layer, *args, **kwargs):
ori_shape = input_layer.shape
if ori_shape[0] is None:
ori_shape[0] = -1
new_shape = [ori_shape[0], 1, 1, ori_shape[1]]
x = tf.reshape(input_layer.tensor, new_shape)
normalized_x = super(self.__class__, self).__call__(input_layer.with_tensor(x), *args, **kwargs) # input_layer)
return normalized_x.reshape(ori_shape)
def leaky_rectify(x, leakiness=0.01):
assert leakiness <= 1
ret = tf.maximum(x, leakiness * x)
# import ipdb; ipdb.set_trace()
return ret
@pt.Register
class custom_conv2d(pt.VarStoreMethod):
def __call__(self, input_layer, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, in_dim=None, padding='SAME',
name="conv2d"):
with tf.variable_scope(name):
w = self.variable('w', [k_h, k_w, in_dim or input_layer.shape[-1], output_dim],
init=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_layer.tensor, w, strides=[1, d_h, d_w, 1], padding=padding)
biases = self.variable('biases', [output_dim], init=tf.constant_initializer(0.0))
# import ipdb; ipdb.set_trace()
return input_layer.with_tensor(tf.nn.bias_add(conv, biases), parameters=self.vars)
@pt.Register
class custom_deconv2d(pt.VarStoreMethod):
def __call__(self, input_layer, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d"):
output_shape[0] = input_layer.shape[0]
ts_output_shape = tf.pack(output_shape)
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = self.variable('w', [k_h, k_w, output_shape[-1], input_layer.shape[-1]],
init=tf.random_normal_initializer(stddev=stddev))
try:
deconv = tf.nn.conv2d_transpose(input_layer, w,
output_shape=ts_output_shape,
strides=[1, d_h, d_w, 1])
# Support for versions of TensorFlow before 0.7.0
except AttributeError:
deconv = tf.nn.deconv2d(input_layer, w, output_shape=ts_output_shape,
strides=[1, d_h, d_w, 1])
biases = self.variable('biases', [output_shape[-1]], init=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), [-1] + output_shape[1:])
return deconv
@pt.Register
class custom_fully_connected(pt.VarStoreMethod):
def __call__(self, input_layer, output_size, scope=None, in_dim=None, stddev=0.02, bias_start=0.0):
shape = input_layer.shape
input_ = input_layer.tensor
try:
if len(shape) == 4:
input_ = tf.reshape(input_, tf.pack([tf.shape(input_)[0], np.prod(shape[1:])]))
input_.set_shape([None, np.prod(shape[1:])])
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = self.variable("Matrix", [in_dim or shape[1], output_size], dt=tf.float32,
init=tf.random_normal_initializer(stddev=stddev))
bias = self.variable("bias", [output_size], init=tf.constant_initializer(bias_start))
return input_layer.with_tensor(tf.matmul(input_, matrix) + bias, parameters=self.vars)
except Exception:
import ipdb; ipdb.set_trace()
| 5,501 | 44.098361 | 120 |
py
|
InfoGAN
|
InfoGAN-master/infogan/misc/distributions.py
|
from __future__ import print_function
from __future__ import absolute_import
import itertools
import tensorflow as tf
import numpy as np
TINY = 1e-8
floatX = np.float32
class Distribution(object):
@property
def dist_flat_dim(self):
"""
:rtype: int
"""
raise NotImplementedError
@property
def dim(self):
"""
:rtype: int
"""
raise NotImplementedError
@property
def effective_dim(self):
"""
The effective dimension when used for rescaling quantities. This can be different from the
actual dimension when the actual values are using redundant representations (e.g. for categorical
distributions we encode it in onehot representation)
:rtype: int
"""
raise NotImplementedError
def kl_prior(self, dist_info):
return self.kl(dist_info, self.prior_dist_info(dist_info.values()[0].get_shape()[0]))
def logli(self, x_var, dist_info):
"""
:param x_var:
:param dist_info:
:return: log likelihood of the data
"""
raise NotImplementedError
def logli_prior(self, x_var):
return self.logli(x_var, self.prior_dist_info(x_var.get_shape()[0]))
def nonreparam_logli(self, x_var, dist_info):
"""
:param x_var:
:param dist_info:
:return: the non-reparameterizable part of the log likelihood
"""
raise NotImplementedError
def activate_dist(self, flat_dist):
"""
:param flat_dist: flattened dist info without applying nonlinearity yet
:return: a dictionary of dist infos
"""
raise NotImplementedError
@property
def dist_info_keys(self):
"""
:rtype: list[str]
"""
raise NotImplementedError
def entropy(self, dist_info):
"""
:return: entropy for each minibatch entry
"""
raise NotImplementedError
def marginal_entropy(self, dist_info):
"""
:return: the entropy of the mixture distribution averaged over all minibatch entries. Will return in the same
shape as calling `:code:Distribution.entropy`
"""
raise NotImplementedError
def marginal_logli(self, x_var, dist_info):
"""
:return: the log likelihood of the given variable under the mixture distribution averaged over all minibatch
entries.
"""
raise NotImplementedError
def sample(self, dist_info):
raise NotImplementedError
def sample_prior(self, batch_size):
return self.sample(self.prior_dist_info(batch_size))
def prior_dist_info(self, batch_size):
"""
:return: a dictionary containing distribution information about the standard prior distribution, the shape
of which is jointly decided by batch_size and self.dim
"""
raise NotImplementedError
class Categorical(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
@property
def dist_flat_dim(self):
return self.dim
@property
def effective_dim(self):
return 1
def logli(self, x_var, dist_info):
prob = dist_info["prob"]
return tf.reduce_sum(tf.log(prob + TINY) * x_var, reduction_indices=1)
def prior_dist_info(self, batch_size):
prob = tf.ones([batch_size, self.dim]) * floatX(1.0 / self.dim)
return dict(prob=prob)
def marginal_logli(self, x_var, dist_info):
prob = dist_info["prob"]
avg_prob = tf.tile(
tf.reduce_mean(prob, reduction_indices=0, keep_dims=True),
tf.pack([tf.shape(prob)[0], 1])
)
return self.logli(x_var, dict(prob=avg_prob))
def nonreparam_logli(self, x_var, dist_info):
return self.logli(x_var, dist_info)
def kl(self, p, q):
"""
:param p: left dist info
:param q: right dist info
:return: KL(p||q)
"""
p_prob = p["prob"]
q_prob = q["prob"]
return tf.reduce_sum(
p_prob * (tf.log(p_prob + TINY) - tf.log(q_prob + TINY)),
reduction_indices=1
)
def sample(self, dist_info):
prob = dist_info["prob"]
ids = tf.multinomial(tf.log(prob + TINY), num_samples=1)[:, 0]
onehot = tf.constant(np.eye(self.dim, dtype=np.float32))
return tf.nn.embedding_lookup(onehot, ids)
def activate_dist(self, flat_dist):
return dict(prob=tf.nn.softmax(flat_dist))
def entropy(self, dist_info):
prob = dist_info["prob"]
return -tf.reduce_sum(prob * tf.log(prob + TINY), reduction_indices=1)
def marginal_entropy(self, dist_info):
prob = dist_info["prob"]
avg_prob = tf.tile(
tf.reduce_mean(prob, reduction_indices=0, keep_dims=True),
tf.pack([tf.shape(prob)[0], 1])
)
return self.entropy(dict(prob=avg_prob))
@property
def dist_info_keys(self):
return ["prob"]
class Gaussian(Distribution):
def __init__(self, dim, fix_std=False):
self._dim = dim
self._fix_std = fix_std
@property
def dim(self):
return self._dim
@property
def dist_flat_dim(self):
return self._dim * 2
@property
def effective_dim(self):
return self._dim
def logli(self, x_var, dist_info):
mean = dist_info["mean"]
stddev = dist_info["stddev"]
epsilon = (x_var - mean) / (stddev + TINY)
return tf.reduce_sum(
- 0.5 * np.log(2 * np.pi) - tf.log(stddev + TINY) - 0.5 * tf.square(epsilon),
reduction_indices=1,
)
def prior_dist_info(self, batch_size):
mean = tf.zeros([batch_size, self.dim])
stddev = tf.ones([batch_size, self.dim])
return dict(mean=mean, stddev=stddev)
def nonreparam_logli(self, x_var, dist_info):
return tf.zeros_like(x_var[:, 0])
def kl(self, p, q):
p_mean = p["mean"]
p_stddev = p["stddev"]
q_mean = q["mean"]
q_stddev = q["stddev"]
# means: (N*D)
# std: (N*D)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) + ln(\sigma_2/\sigma_1)
numerator = tf.square(p_mean - q_mean) + tf.square(p_stddev) - tf.square(q_stddev)
denominator = 2. * tf.square(q_stddev)
return tf.reduce_sum(
numerator / (denominator + TINY) + tf.log(q_stddev + TINY) - tf.log(p_stddev + TINY),
reduction_indices=1
)
def sample(self, dist_info):
mean = dist_info["mean"]
stddev = dist_info["stddev"]
epsilon = tf.random_normal(tf.shape(mean))
return mean + epsilon * stddev
@property
def dist_info_keys(self):
return ["mean", "stddev"]
def activate_dist(self, flat_dist):
mean = flat_dist[:, :self.dim]
if self._fix_std:
stddev = tf.ones_like(mean)
else:
stddev = tf.sqrt(tf.exp(flat_dist[:, self.dim:]))
return dict(mean=mean, stddev=stddev)
class Uniform(Gaussian):
"""
This distribution will sample prior data from a uniform distribution, but
the prior and posterior are still modeled as a Gaussian
"""
def kl_prior(self):
raise NotImplementedError
# def prior_dist_info(self, batch_size):
# raise NotImplementedError
# def logli_prior(self, x_var):
# #
# raise NotImplementedError
def sample_prior(self, batch_size):
return tf.random_uniform([batch_size, self.dim], minval=-1., maxval=1.)
class Bernoulli(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
@property
def dist_flat_dim(self):
return self._dim
@property
def effective_dim(self):
return self._dim
@property
def dist_info_keys(self):
return ["p"]
def logli(self, x_var, dist_info):
p = dist_info["p"]
return tf.reduce_sum(
x_var * tf.log(p + TINY) + (1.0 - x_var) * tf.log(1.0 - p + TINY),
reduction_indices=1
)
def nonreparam_logli(self, x_var, dist_info):
return self.logli(x_var, dist_info)
def activate_dist(self, flat_dist):
return dict(p=tf.nn.sigmoid(flat_dist))
def sample(self, dist_info):
p = dist_info["p"]
return tf.cast(tf.less(tf.random_uniform(p.get_shape()), p), tf.float32)
def prior_dist_info(self, batch_size):
return dict(p=0.5 * tf.ones([batch_size, self.dim]))
class MeanBernoulli(Bernoulli):
"""
Behaves almost the same as the usual Bernoulli distribution, except that when sampling from it, directly
return the mean instead of sampling binary values
"""
def sample(self, dist_info):
return dist_info["p"]
def nonreparam_logli(self, x_var, dist_info):
return tf.zeros_like(x_var[:, 0])
# class MeanCenteredUniform(MeanBernoulli):
# """
# Behaves almost the same as the usual Bernoulli distribution, except that when sampling from it, directly
# return the mean instead of sampling binary values
# """
class Product(Distribution):
def __init__(self, dists):
"""
:type dists: list[Distribution]
"""
self._dists = dists
@property
def dists(self):
return list(self._dists)
@property
def dim(self):
return sum(x.dim for x in self.dists)
@property
def effective_dim(self):
return sum(x.effective_dim for x in self.dists)
@property
def dims(self):
return [x.dim for x in self.dists]
@property
def dist_flat_dims(self):
return [x.dist_flat_dim for x in self.dists]
@property
def dist_flat_dim(self):
return sum(x.dist_flat_dim for x in self.dists)
@property
def dist_info_keys(self):
ret = []
for idx, dist in enumerate(self.dists):
for k in dist.dist_info_keys:
ret.append("id_%d_%s" % (idx, k))
return ret
def split_dist_info(self, dist_info):
ret = []
for idx, dist in enumerate(self.dists):
cur_dist_info = dict()
for k in dist.dist_info_keys:
cur_dist_info[k] = dist_info["id_%d_%s" % (idx, k)]
ret.append(cur_dist_info)
return ret
def join_dist_infos(self, dist_infos):
ret = dict()
for idx, dist, dist_info_i in zip(itertools.count(), self.dists, dist_infos):
for k in dist.dist_info_keys:
ret["id_%d_%s" % (idx, k)] = dist_info_i[k]
return ret
def split_var(self, x):
"""
Split the tensor variable or value into per component.
"""
cum_dims = list(np.cumsum(self.dims))
out = []
for slice_from, slice_to, dist in zip([0] + cum_dims, cum_dims, self.dists):
sliced = x[:, slice_from:slice_to]
out.append(sliced)
return out
def join_vars(self, xs):
"""
Join the per component tensor variables into a whole tensor
"""
return tf.concat(1, xs)
def split_dist_flat(self, dist_flat):
"""
Split flat dist info into per component
"""
cum_dims = list(np.cumsum(self.dist_flat_dims))
out = []
for slice_from, slice_to, dist in zip([0] + cum_dims, cum_dims, self.dists):
sliced = dist_flat[:, slice_from:slice_to]
out.append(sliced)
return out
def prior_dist_info(self, batch_size):
ret = []
for dist_i in self.dists:
ret.append(dist_i.prior_dist_info(batch_size))
return self.join_dist_infos(ret)
def kl(self, p, q):
ret = tf.constant(0.)
for p_i, q_i, dist_i in zip(self.split_dist_info(p), self.split_dist_info(q), self.dists):
ret += dist_i.kl(p_i, q_i)
return ret
def activate_dist(self, dist_flat):
ret = dict()
for idx, dist_flat_i, dist_i in zip(itertools.count(), self.split_dist_flat(dist_flat), self.dists):
dist_info_i = dist_i.activate_dist(dist_flat_i)
for k, v in dist_info_i.iteritems():
ret["id_%d_%s" % (idx, k)] = v
return ret
def sample(self, dist_info):
ret = []
for dist_info_i, dist_i in zip(self.split_dist_info(dist_info), self.dists):
ret.append(tf.cast(dist_i.sample(dist_info_i), tf.float32))
return tf.concat(1, ret)
def sample_prior(self, batch_size):
ret = []
for dist_i in self.dists:
ret.append(tf.cast(dist_i.sample_prior(batch_size), tf.float32))
return tf.concat(1, ret)
def logli(self, x_var, dist_info):
ret = tf.constant(0.)
for x_i, dist_info_i, dist_i in zip(self.split_var(x_var), self.split_dist_info(dist_info), self.dists):
ret += dist_i.logli(x_i, dist_info_i)
return ret
def marginal_logli(self, x_var, dist_info):
ret = tf.constant(0.)
for x_i, dist_info_i, dist_i in zip(self.split_var(x_var), self.split_dist_info(dist_info), self.dists):
ret += dist_i.marginal_logli(x_i, dist_info_i)
return ret
def entropy(self, dist_info):
ret = tf.constant(0.)
for dist_info_i, dist_i in zip(self.split_dist_info(dist_info), self.dists):
ret += dist_i.entropy(dist_info_i)
return ret
def marginal_entropy(self, dist_info):
ret = tf.constant(0.)
for dist_info_i, dist_i in zip(self.split_dist_info(dist_info), self.dists):
ret += dist_i.marginal_entropy(dist_info_i)
return ret
def nonreparam_logli(self, x_var, dist_info):
ret = tf.constant(0.)
for x_i, dist_info_i, dist_i in zip(self.split_var(x_var), self.split_dist_info(dist_info), self.dists):
ret += dist_i.nonreparam_logli(x_i, dist_info_i)
return ret
| 14,122 | 28.795359 | 117 |
py
|
InfoGAN
|
InfoGAN-master/infogan/misc/utils.py
|
from __future__ import print_function
from __future__ import absolute_import
import errno
import os
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| 309 | 19.666667 | 61 |
py
|
InfoGAN
|
InfoGAN-master/infogan/misc/datasets.py
|
import numpy as np
from tensorflow.examples.tutorials import mnist
import os
import numpy as np
class Dataset(object):
def __init__(self, images, labels=None):
self._images = images.reshape(images.shape[0], -1)
self._labels = labels
self._epochs_completed = -1
self._num_examples = images.shape[0]
# shuffle on first run
self._index_in_epoch = self._num_examples
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self._images[perm]
if self._labels is not None:
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
if self._labels is None:
return self._images[start:end], None
else:
return self._images[start:end], self._labels[start:end]
class MnistDataset(object):
def __init__(self):
data_directory = "MNIST"
if not os.path.exists(data_directory):
os.makedirs(data_directory)
dataset = mnist.input_data.read_data_sets(data_directory)
self.train = dataset.train
# make sure that each type of digits have exactly 10 samples
sup_images = []
sup_labels = []
rnd_state = np.random.get_state()
np.random.seed(0)
for cat in range(10):
ids = np.where(self.train.labels == cat)[0]
np.random.shuffle(ids)
sup_images.extend(self.train.images[ids[:10]])
sup_labels.extend(self.train.labels[ids[:10]])
np.random.set_state(rnd_state)
self.supervised_train = Dataset(
np.asarray(sup_images),
np.asarray(sup_labels),
)
self.test = dataset.test
self.validation = dataset.validation
self.image_dim = 28 * 28
self.image_shape = (28, 28, 1)
def transform(self, data):
return data
def inverse_transform(self, data):
return data
| 2,749 | 30.25 | 71 |
py
|
InfoGAN
|
InfoGAN-master/infogan/misc/__init__.py
|
from __future__ import print_function
from __future__ import absolute_import
| 76 | 37.5 | 38 |
py
|
StarGANv2-VC
|
StarGANv2-VC-main/losses.py
|
#coding:utf-8
import os
import torch
from torch import nn
from munch import Munch
from transforms import build_transforms
import torch.nn.functional as F
import numpy as np
def compute_d_loss(nets, args, x_real, y_org, y_trg, z_trg=None, x_ref=None, use_r1_reg=True, use_adv_cls=False, use_con_reg=False):
args = Munch(args)
assert (z_trg is None) != (x_ref is None)
# with real audios
x_real.requires_grad_()
out = nets.discriminator(x_real, y_org)
loss_real = adv_loss(out, 1)
# R1 regularizaition (https://arxiv.org/abs/1801.04406v4)
if use_r1_reg:
loss_reg = r1_reg(out, x_real)
else:
loss_reg = torch.FloatTensor([0]).to(x_real.device)
# consistency regularization (bCR-GAN: https://arxiv.org/abs/2002.04724)
loss_con_reg = torch.FloatTensor([0]).to(x_real.device)
if use_con_reg:
t = build_transforms()
out_aug = nets.discriminator(t(x_real).detach(), y_org)
loss_con_reg += F.smooth_l1_loss(out, out_aug)
# with fake audios
with torch.no_grad():
if z_trg is not None:
s_trg = nets.mapping_network(z_trg, y_trg)
else: # x_ref is not None
s_trg = nets.style_encoder(x_ref, y_trg)
F0 = nets.f0_model.get_feature_GAN(x_real)
x_fake = nets.generator(x_real, s_trg, masks=None, F0=F0)
out = nets.discriminator(x_fake, y_trg)
loss_fake = adv_loss(out, 0)
if use_con_reg:
out_aug = nets.discriminator(t(x_fake).detach(), y_trg)
loss_con_reg += F.smooth_l1_loss(out, out_aug)
# adversarial classifier loss
if use_adv_cls:
out_de = nets.discriminator.classifier(x_fake)
loss_real_adv_cls = F.cross_entropy(out_de[y_org != y_trg], y_org[y_org != y_trg])
if use_con_reg:
out_de_aug = nets.discriminator.classifier(t(x_fake).detach())
loss_con_reg += F.smooth_l1_loss(out_de, out_de_aug)
else:
loss_real_adv_cls = torch.zeros(1).mean()
loss = loss_real + loss_fake + args.lambda_reg * loss_reg + \
args.lambda_adv_cls * loss_real_adv_cls + \
args.lambda_con_reg * loss_con_reg
return loss, Munch(real=loss_real.item(),
fake=loss_fake.item(),
reg=loss_reg.item(),
real_adv_cls=loss_real_adv_cls.item(),
con_reg=loss_con_reg.item())
def compute_g_loss(nets, args, x_real, y_org, y_trg, z_trgs=None, x_refs=None, use_adv_cls=False):
args = Munch(args)
assert (z_trgs is None) != (x_refs is None)
if z_trgs is not None:
z_trg, z_trg2 = z_trgs
if x_refs is not None:
x_ref, x_ref2 = x_refs
# compute style vectors
if z_trgs is not None:
s_trg = nets.mapping_network(z_trg, y_trg)
else:
s_trg = nets.style_encoder(x_ref, y_trg)
# compute ASR/F0 features (real)
with torch.no_grad():
F0_real, GAN_F0_real, cyc_F0_real = nets.f0_model(x_real)
ASR_real = nets.asr_model.get_feature(x_real)
# adversarial loss
x_fake = nets.generator(x_real, s_trg, masks=None, F0=GAN_F0_real)
out = nets.discriminator(x_fake, y_trg)
loss_adv = adv_loss(out, 1)
# compute ASR/F0 features (fake)
F0_fake, GAN_F0_fake, _ = nets.f0_model(x_fake)
ASR_fake = nets.asr_model.get_feature(x_fake)
# norm consistency loss
x_fake_norm = log_norm(x_fake)
x_real_norm = log_norm(x_real)
loss_norm = ((torch.nn.ReLU()(torch.abs(x_fake_norm - x_real_norm) - args.norm_bias))**2).mean()
# F0 loss
loss_f0 = f0_loss(F0_fake, F0_real)
# style F0 loss (style initialization)
if x_refs is not None and args.lambda_f0_sty > 0 and not use_adv_cls:
F0_sty, _, _ = nets.f0_model(x_ref)
loss_f0_sty = F.l1_loss(compute_mean_f0(F0_fake), compute_mean_f0(F0_sty))
else:
loss_f0_sty = torch.zeros(1).mean()
# ASR loss
loss_asr = F.smooth_l1_loss(ASR_fake, ASR_real)
# style reconstruction loss
s_pred = nets.style_encoder(x_fake, y_trg)
loss_sty = torch.mean(torch.abs(s_pred - s_trg))
# diversity sensitive loss
if z_trgs is not None:
s_trg2 = nets.mapping_network(z_trg2, y_trg)
else:
s_trg2 = nets.style_encoder(x_ref2, y_trg)
x_fake2 = nets.generator(x_real, s_trg2, masks=None, F0=GAN_F0_real)
x_fake2 = x_fake2.detach()
_, GAN_F0_fake2, _ = nets.f0_model(x_fake2)
loss_ds = torch.mean(torch.abs(x_fake - x_fake2))
loss_ds += F.smooth_l1_loss(GAN_F0_fake, GAN_F0_fake2.detach())
# cycle-consistency loss
s_org = nets.style_encoder(x_real, y_org)
x_rec = nets.generator(x_fake, s_org, masks=None, F0=GAN_F0_fake)
loss_cyc = torch.mean(torch.abs(x_rec - x_real))
# F0 loss in cycle-consistency loss
if args.lambda_f0 > 0:
_, _, cyc_F0_rec = nets.f0_model(x_rec)
loss_cyc += F.smooth_l1_loss(cyc_F0_rec, cyc_F0_real)
if args.lambda_asr > 0:
ASR_recon = nets.asr_model.get_feature(x_rec)
loss_cyc += F.smooth_l1_loss(ASR_recon, ASR_real)
# adversarial classifier loss
if use_adv_cls:
out_de = nets.discriminator.classifier(x_fake)
loss_adv_cls = F.cross_entropy(out_de[y_org != y_trg], y_trg[y_org != y_trg])
else:
loss_adv_cls = torch.zeros(1).mean()
loss = args.lambda_adv * loss_adv + args.lambda_sty * loss_sty \
- args.lambda_ds * loss_ds + args.lambda_cyc * loss_cyc\
+ args.lambda_norm * loss_norm \
+ args.lambda_asr * loss_asr \
+ args.lambda_f0 * loss_f0 \
+ args.lambda_f0_sty * loss_f0_sty \
+ args.lambda_adv_cls * loss_adv_cls
return loss, Munch(adv=loss_adv.item(),
sty=loss_sty.item(),
ds=loss_ds.item(),
cyc=loss_cyc.item(),
norm=loss_norm.item(),
asr=loss_asr.item(),
f0=loss_f0.item(),
adv_cls=loss_adv_cls.item())
# for norm consistency loss
def log_norm(x, mean=-4, std=4, dim=2):
"""
normalized log mel -> mel -> norm -> log(norm)
"""
x = torch.log(torch.exp(x * std + mean).norm(dim=dim))
return x
# for adversarial loss
def adv_loss(logits, target):
assert target in [1, 0]
if len(logits.shape) > 1:
logits = logits.reshape(-1)
targets = torch.full_like(logits, fill_value=target)
logits = logits.clamp(min=-10, max=10) # prevent nan
loss = F.binary_cross_entropy_with_logits(logits, targets)
return loss
# for R1 regularization loss
def r1_reg(d_out, x_in):
# zero-centered gradient penalty for real images
batch_size = x_in.size(0)
grad_dout = torch.autograd.grad(
outputs=d_out.sum(), inputs=x_in,
create_graph=True, retain_graph=True, only_inputs=True
)[0]
grad_dout2 = grad_dout.pow(2)
assert(grad_dout2.size() == x_in.size())
reg = 0.5 * grad_dout2.view(batch_size, -1).sum(1).mean(0)
return reg
# for F0 consistency loss
def compute_mean_f0(f0):
f0_mean = f0.mean(-1)
f0_mean = f0_mean.expand(f0.shape[-1], f0_mean.shape[0]).transpose(0, 1) # (B, M)
return f0_mean
def f0_loss(x_f0, y_f0):
"""
x.shape = (B, 1, M, L): predict
y.shape = (B, 1, M, L): target
"""
# compute the mean
x_mean = compute_mean_f0(x_f0)
y_mean = compute_mean_f0(y_f0)
loss = F.l1_loss(x_f0 / x_mean, y_f0 / y_mean)
return loss
| 7,608 | 34.390698 | 132 |
py
|
StarGANv2-VC
|
StarGANv2-VC-main/optimizers.py
|
#coding:utf-8
import os, sys
import os.path as osp
import numpy as np
import torch
from torch import nn
from torch.optim import Optimizer
from functools import reduce
from torch.optim import AdamW
class MultiOptimizer:
def __init__(self, optimizers={}, schedulers={}):
self.optimizers = optimizers
self.schedulers = schedulers
self.keys = list(optimizers.keys())
self.param_groups = reduce(lambda x,y: x+y, [v.param_groups for v in self.optimizers.values()])
def state_dict(self):
state_dicts = [(key, self.optimizers[key].state_dict())\
for key in self.keys]
return state_dicts
def load_state_dict(self, state_dict):
for key, val in state_dict:
try:
self.optimizers[key].load_state_dict(val)
except:
print("Unloaded %s" % key)
def step(self, key=None, scaler=None):
keys = [key] if key is not None else self.keys
_ = [self._step(key, scaler) for key in keys]
def _step(self, key, scaler=None):
if scaler is not None:
scaler.step(self.optimizers[key])
scaler.update()
else:
self.optimizers[key].step()
def zero_grad(self, key=None):
if key is not None:
self.optimizers[key].zero_grad()
else:
_ = [self.optimizers[key].zero_grad() for key in self.keys]
def scheduler(self, *args, key=None):
if key is not None:
self.schedulers[key].step(*args)
else:
_ = [self.schedulers[key].step(*args) for key in self.keys]
def define_scheduler(optimizer, params):
print(params)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=params.get('max_lr', 2e-4),
epochs=params.get('epochs', 200),
steps_per_epoch=params.get('steps_per_epoch', 1000),
pct_start=params.get('pct_start', 0.0),
div_factor=1,
final_div_factor=1)
return scheduler
def build_optimizer(parameters_dict, scheduler_params_dict):
optim = dict([(key, AdamW(params, lr=1e-4, weight_decay=1e-4, betas=(0.0, 0.99), eps=1e-9))
for key, params in parameters_dict.items()])
schedulers = dict([(key, define_scheduler(opt, scheduler_params_dict[key])) \
for key, opt in optim.items()])
multi_optim = MultiOptimizer(optim, schedulers)
return multi_optim
| 2,460 | 32.256757 | 103 |
py
|
StarGANv2-VC
|
StarGANv2-VC-main/meldataset.py
|
#coding: utf-8
import os
import time
import random
import random
import torch
import torchaudio
import numpy as np
import soundfile as sf
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
np.random.seed(1)
random.seed(1)
SPECT_PARAMS = {
"n_fft": 2048,
"win_length": 1200,
"hop_length": 300
}
MEL_PARAMS = {
"n_mels": 80,
"n_fft": 2048,
"win_length": 1200,
"hop_length": 300
}
class MelDataset(torch.utils.data.Dataset):
def __init__(self,
data_list,
sr=24000,
validation=False,
):
_data_list = [l[:-1].split('|') for l in data_list]
self.data_list = [(path, int(label)) for path, label in _data_list]
self.data_list_per_class = {
target: [(path, label) for path, label in self.data_list if label == target] \
for target in list(set([label for _, label in self.data_list]))}
self.sr = sr
self.to_melspec = torchaudio.transforms.MelSpectrogram(**MEL_PARAMS)
self.mean, self.std = -4, 4
self.validation = validation
self.max_mel_length = 192
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
data = self.data_list[idx]
mel_tensor, label = self._load_data(data)
ref_data = random.choice(self.data_list)
ref_mel_tensor, ref_label = self._load_data(ref_data)
ref2_data = random.choice(self.data_list_per_class[ref_label])
ref2_mel_tensor, _ = self._load_data(ref2_data)
return mel_tensor, label, ref_mel_tensor, ref2_mel_tensor, ref_label
def _load_data(self, path):
wave_tensor, label = self._load_tensor(path)
if not self.validation: # random scale for robustness
random_scale = 0.5 + 0.5 * np.random.random()
wave_tensor = random_scale * wave_tensor
mel_tensor = self.to_melspec(wave_tensor)
mel_tensor = (torch.log(1e-5 + mel_tensor) - self.mean) / self.std
mel_length = mel_tensor.size(1)
if mel_length > self.max_mel_length:
random_start = np.random.randint(0, mel_length - self.max_mel_length)
mel_tensor = mel_tensor[:, random_start:random_start + self.max_mel_length]
return mel_tensor, label
def _preprocess(self, wave_tensor, ):
mel_tensor = self.to_melspec(wave_tensor)
mel_tensor = (torch.log(1e-5 + mel_tensor) - self.mean) / self.std
return mel_tensor
def _load_tensor(self, data):
wave_path, label = data
label = int(label)
wave, sr = sf.read(wave_path)
wave_tensor = torch.from_numpy(wave).float()
return wave_tensor, label
class Collater(object):
"""
Args:
adaptive_batch_size (bool): if true, decrease batch size when long data comes.
"""
def __init__(self, return_wave=False):
self.text_pad_index = 0
self.return_wave = return_wave
self.max_mel_length = 192
self.mel_length_step = 16
self.latent_dim = 16
def __call__(self, batch):
batch_size = len(batch)
nmels = batch[0][0].size(0)
mels = torch.zeros((batch_size, nmels, self.max_mel_length)).float()
labels = torch.zeros((batch_size)).long()
ref_mels = torch.zeros((batch_size, nmels, self.max_mel_length)).float()
ref2_mels = torch.zeros((batch_size, nmels, self.max_mel_length)).float()
ref_labels = torch.zeros((batch_size)).long()
for bid, (mel, label, ref_mel, ref2_mel, ref_label) in enumerate(batch):
mel_size = mel.size(1)
mels[bid, :, :mel_size] = mel
ref_mel_size = ref_mel.size(1)
ref_mels[bid, :, :ref_mel_size] = ref_mel
ref2_mel_size = ref2_mel.size(1)
ref2_mels[bid, :, :ref2_mel_size] = ref2_mel
labels[bid] = label
ref_labels[bid] = ref_label
z_trg = torch.randn(batch_size, self.latent_dim)
z_trg2 = torch.randn(batch_size, self.latent_dim)
mels, ref_mels, ref2_mels = mels.unsqueeze(1), ref_mels.unsqueeze(1), ref2_mels.unsqueeze(1)
return mels, labels, ref_mels, ref2_mels, ref_labels, z_trg, z_trg2
def build_dataloader(path_list,
validation=False,
batch_size=4,
num_workers=1,
device='cpu',
collate_config={},
dataset_config={}):
dataset = MelDataset(path_list, validation=validation)
collate_fn = Collater(**collate_config)
data_loader = DataLoader(dataset,
batch_size=batch_size,
shuffle=(not validation),
num_workers=num_workers,
drop_last=True,
collate_fn=collate_fn,
pin_memory=(device != 'cpu'))
return data_loader
| 5,161 | 32.089744 | 100 |
py
|
StarGANv2-VC
|
StarGANv2-VC-main/models.py
|
"""
StarGAN v2
Copyright (c) 2020-present NAVER Corp.
This work is licensed under the Creative Commons Attribution-NonCommercial
4.0 International License. To view a copy of this license, visit
http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""
import os
import os.path as osp
import copy
import math
from munch import Munch
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class DownSample(nn.Module):
def __init__(self, layer_type):
super().__init__()
self.layer_type = layer_type
def forward(self, x):
if self.layer_type == 'none':
return x
elif self.layer_type == 'timepreserve':
return F.avg_pool2d(x, (2, 1))
elif self.layer_type == 'half':
return F.avg_pool2d(x, 2)
else:
raise RuntimeError('Got unexpected donwsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
class UpSample(nn.Module):
def __init__(self, layer_type):
super().__init__()
self.layer_type = layer_type
def forward(self, x):
if self.layer_type == 'none':
return x
elif self.layer_type == 'timepreserve':
return F.interpolate(x, scale_factor=(2, 1), mode='nearest')
elif self.layer_type == 'half':
return F.interpolate(x, scale_factor=2, mode='nearest')
else:
raise RuntimeError('Got unexpected upsampletype %s, expected is [none, timepreserve, half]' % self.layer_type)
class ResBlk(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2),
normalize=False, downsample='none'):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample = DownSample(downsample)
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = self.downsample(x)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
x = self.downsample(x)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x):
x = self._shortcut(x) + self._residual(x)
return x / math.sqrt(2) # unit variance
class AdaIN(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm2d(num_features, affine=False)
self.fc = nn.Linear(style_dim, num_features*2)
def forward(self, x, s):
h = self.fc(s)
h = h.view(h.size(0), h.size(1), 1, 1)
gamma, beta = torch.chunk(h, chunks=2, dim=1)
return (1 + gamma) * self.norm(x) + beta
class AdainResBlk(nn.Module):
def __init__(self, dim_in, dim_out, style_dim=64, w_hpf=0,
actv=nn.LeakyReLU(0.2), upsample='none'):
super().__init__()
self.w_hpf = w_hpf
self.actv = actv
self.upsample = UpSample(upsample)
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out, style_dim)
def _build_weights(self, dim_in, dim_out, style_dim=64):
self.conv1 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_out, dim_out, 3, 1, 1)
self.norm1 = AdaIN(style_dim, dim_in)
self.norm2 = AdaIN(style_dim, dim_out)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
x = self.upsample(x)
if self.learned_sc:
x = self.conv1x1(x)
return x
def _residual(self, x, s):
x = self.norm1(x, s)
x = self.actv(x)
x = self.upsample(x)
x = self.conv1(x)
x = self.norm2(x, s)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x, s):
out = self._residual(x, s)
if self.w_hpf == 0:
out = (out + self._shortcut(x)) / math.sqrt(2)
return out
class HighPass(nn.Module):
def __init__(self, w_hpf, device):
super(HighPass, self).__init__()
self.filter = torch.tensor([[-1, -1, -1],
[-1, 8., -1],
[-1, -1, -1]]).to(device) / w_hpf
def forward(self, x):
filter = self.filter.unsqueeze(0).unsqueeze(1).repeat(x.size(1), 1, 1, 1)
return F.conv2d(x, filter, padding=1, groups=x.size(1))
class Generator(nn.Module):
def __init__(self, dim_in=48, style_dim=48, max_conv_dim=48*8, w_hpf=1, F0_channel=0):
super().__init__()
self.stem = nn.Conv2d(1, dim_in, 3, 1, 1)
self.encode = nn.ModuleList()
self.decode = nn.ModuleList()
self.to_out = nn.Sequential(
nn.InstanceNorm2d(dim_in, affine=True),
nn.LeakyReLU(0.2),
nn.Conv2d(dim_in, 1, 1, 1, 0))
self.F0_channel = F0_channel
# down/up-sampling blocks
repeat_num = 4 #int(np.log2(img_size)) - 4
if w_hpf > 0:
repeat_num += 1
for lid in range(repeat_num):
if lid in [1, 3]:
_downtype = 'timepreserve'
else:
_downtype = 'half'
dim_out = min(dim_in*2, max_conv_dim)
self.encode.append(
ResBlk(dim_in, dim_out, normalize=True, downsample=_downtype))
self.decode.insert(
0, AdainResBlk(dim_out, dim_in, style_dim,
w_hpf=w_hpf, upsample=_downtype)) # stack-like
dim_in = dim_out
# bottleneck blocks (encoder)
for _ in range(2):
self.encode.append(
ResBlk(dim_out, dim_out, normalize=True))
# F0 blocks
if F0_channel != 0:
self.decode.insert(
0, AdainResBlk(dim_out + int(F0_channel / 2), dim_out, style_dim, w_hpf=w_hpf))
# bottleneck blocks (decoder)
for _ in range(2):
self.decode.insert(
0, AdainResBlk(dim_out + int(F0_channel / 2), dim_out + int(F0_channel / 2), style_dim, w_hpf=w_hpf))
if F0_channel != 0:
self.F0_conv = nn.Sequential(
ResBlk(F0_channel, int(F0_channel / 2), normalize=True, downsample="half"),
)
if w_hpf > 0:
device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
self.hpf = HighPass(w_hpf, device)
def forward(self, x, s, masks=None, F0=None):
x = self.stem(x)
cache = {}
for block in self.encode:
if (masks is not None) and (x.size(2) in [32, 64, 128]):
cache[x.size(2)] = x
x = block(x)
if F0 is not None:
F0 = self.F0_conv(F0)
F0 = F.adaptive_avg_pool2d(F0, [x.shape[-2], x.shape[-1]])
x = torch.cat([x, F0], axis=1)
for block in self.decode:
x = block(x, s)
if (masks is not None) and (x.size(2) in [32, 64, 128]):
mask = masks[0] if x.size(2) in [32] else masks[1]
mask = F.interpolate(mask, size=x.size(2), mode='bilinear')
x = x + self.hpf(mask * cache[x.size(2)])
return self.to_out(x)
class MappingNetwork(nn.Module):
def __init__(self, latent_dim=16, style_dim=48, num_domains=2, hidden_dim=384):
super().__init__()
layers = []
layers += [nn.Linear(latent_dim, hidden_dim)]
layers += [nn.ReLU()]
for _ in range(3):
layers += [nn.Linear(hidden_dim, hidden_dim)]
layers += [nn.ReLU()]
self.shared = nn.Sequential(*layers)
self.unshared = nn.ModuleList()
for _ in range(num_domains):
self.unshared += [nn.Sequential(nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, style_dim))]
def forward(self, z, y):
h = self.shared(z)
out = []
for layer in self.unshared:
out += [layer(h)]
out = torch.stack(out, dim=1) # (batch, num_domains, style_dim)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
s = out[idx, y] # (batch, style_dim)
return s
class StyleEncoder(nn.Module):
def __init__(self, dim_in=48, style_dim=48, num_domains=2, max_conv_dim=384):
super().__init__()
blocks = []
blocks += [nn.Conv2d(1, dim_in, 3, 1, 1)]
repeat_num = 4
for _ in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample='half')]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, dim_out, 5, 1, 0)]
blocks += [nn.AdaptiveAvgPool2d(1)]
blocks += [nn.LeakyReLU(0.2)]
self.shared = nn.Sequential(*blocks)
self.unshared = nn.ModuleList()
for _ in range(num_domains):
self.unshared += [nn.Linear(dim_out, style_dim)]
def forward(self, x, y):
h = self.shared(x)
h = h.view(h.size(0), -1)
out = []
for layer in self.unshared:
out += [layer(h)]
out = torch.stack(out, dim=1) # (batch, num_domains, style_dim)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
s = out[idx, y] # (batch, style_dim)
return s
class Discriminator(nn.Module):
def __init__(self, dim_in=48, num_domains=2, max_conv_dim=384, repeat_num=4):
super().__init__()
# real/fake discriminator
self.dis = Discriminator2d(dim_in=dim_in, num_domains=num_domains,
max_conv_dim=max_conv_dim, repeat_num=repeat_num)
# adversarial classifier
self.cls = Discriminator2d(dim_in=dim_in, num_domains=num_domains,
max_conv_dim=max_conv_dim, repeat_num=repeat_num)
self.num_domains = num_domains
def forward(self, x, y):
return self.dis(x, y)
def classifier(self, x):
return self.cls.get_feature(x)
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class Discriminator2d(nn.Module):
def __init__(self, dim_in=48, num_domains=2, max_conv_dim=384, repeat_num=4):
super().__init__()
blocks = []
blocks += [nn.Conv2d(1, dim_in, 3, 1, 1)]
for lid in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample='half')]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, dim_out, 5, 1, 0)]
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.AdaptiveAvgPool2d(1)]
blocks += [nn.Conv2d(dim_out, num_domains, 1, 1, 0)]
self.main = nn.Sequential(*blocks)
def get_feature(self, x):
out = self.main(x)
out = out.view(out.size(0), -1) # (batch, num_domains)
return out
def forward(self, x, y):
out = self.get_feature(x)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
out = out[idx, y] # (batch)
return out
def build_model(args, F0_model, ASR_model):
generator = Generator(args.dim_in, args.style_dim, args.max_conv_dim, w_hpf=args.w_hpf, F0_channel=args.F0_channel)
mapping_network = MappingNetwork(args.latent_dim, args.style_dim, args.num_domains, hidden_dim=args.max_conv_dim)
style_encoder = StyleEncoder(args.dim_in, args.style_dim, args.num_domains, args.max_conv_dim)
discriminator = Discriminator(args.dim_in, args.num_domains, args.max_conv_dim, args.n_repeat)
generator_ema = copy.deepcopy(generator)
mapping_network_ema = copy.deepcopy(mapping_network)
style_encoder_ema = copy.deepcopy(style_encoder)
nets = Munch(generator=generator,
mapping_network=mapping_network,
style_encoder=style_encoder,
discriminator=discriminator,
f0_model=F0_model,
asr_model=ASR_model)
nets_ema = Munch(generator=generator_ema,
mapping_network=mapping_network_ema,
style_encoder=style_encoder_ema)
return nets, nets_ema
| 13,766 | 34.3 | 124 |
py
|
StarGANv2-VC
|
StarGANv2-VC-main/train.py
|
#!/usr/bin/env python3
#coding:utf-8
import os
import os.path as osp
import re
import sys
import yaml
import shutil
import numpy as np
import torch
import click
import warnings
warnings.simplefilter('ignore')
from functools import reduce
from munch import Munch
from meldataset import build_dataloader
from optimizers import build_optimizer
from models import build_model
from trainer import Trainer
from torch.utils.tensorboard import SummaryWriter
from Utils.ASR.models import ASRCNN
from Utils.JDC.model import JDCNet
import logging
from logging import StreamHandler
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = StreamHandler()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
torch.backends.cudnn.benchmark = True #
@click.command()
@click.option('-p', '--config_path', default='Configs/config.yml', type=str)
def main(config_path):
config = yaml.safe_load(open(config_path))
log_dir = config['log_dir']
if not osp.exists(log_dir): os.makedirs(log_dir, exist_ok=True)
shutil.copy(config_path, osp.join(log_dir, osp.basename(config_path)))
writer = SummaryWriter(log_dir + "/tensorboard")
# write logs
file_handler = logging.FileHandler(osp.join(log_dir, 'train.log'))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('%(levelname)s:%(asctime)s: %(message)s'))
logger.addHandler(file_handler)
batch_size = config.get('batch_size', 10)
device = config.get('device', 'cpu')
epochs = config.get('epochs', 1000)
save_freq = config.get('save_freq', 20)
train_path = config.get('train_data', None)
val_path = config.get('val_data', None)
stage = config.get('stage', 'star')
fp16_run = config.get('fp16_run', False)
# load data
train_list, val_list = get_data_path_list(train_path, val_path)
train_dataloader = build_dataloader(train_list,
batch_size=batch_size,
num_workers=4,
device=device)
val_dataloader = build_dataloader(val_list,
batch_size=batch_size,
validation=True,
num_workers=2,
device=device)
# load pretrained ASR model
ASR_config = config.get('ASR_config', False)
ASR_path = config.get('ASR_path', False)
with open(ASR_config) as f:
ASR_config = yaml.safe_load(f)
ASR_model_config = ASR_config['model_params']
ASR_model = ASRCNN(**ASR_model_config)
params = torch.load(ASR_path, map_location='cpu')['model']
ASR_model.load_state_dict(params)
_ = ASR_model.eval()
# load pretrained F0 model
F0_path = config.get('F0_path', False)
F0_model = JDCNet(num_class=1, seq_len=192)
params = torch.load(F0_path, map_location='cpu')['net']
F0_model.load_state_dict(params)
# build model
model, model_ema = build_model(Munch(config['model_params']), F0_model, ASR_model)
scheduler_params = {
"max_lr": float(config['optimizer_params'].get('lr', 2e-4)),
"pct_start": float(config['optimizer_params'].get('pct_start', 0.0)),
"epochs": epochs,
"steps_per_epoch": len(train_dataloader),
}
_ = [model[key].to(device) for key in model]
_ = [model_ema[key].to(device) for key in model_ema]
scheduler_params_dict = {key: scheduler_params.copy() for key in model}
scheduler_params_dict['mapping_network']['max_lr'] = 2e-6
optimizer = build_optimizer({key: model[key].parameters() for key in model},
scheduler_params_dict=scheduler_params_dict)
trainer = Trainer(args=Munch(config['loss_params']), model=model,
model_ema=model_ema,
optimizer=optimizer,
device=device,
train_dataloader=train_dataloader,
val_dataloader=val_dataloader,
logger=logger,
fp16_run=fp16_run)
if config.get('pretrained_model', '') != '':
trainer.load_checkpoint(config['pretrained_model'],
load_only_params=config.get('load_only_params', True))
for _ in range(1, epochs+1):
epoch = trainer.epochs
train_results = trainer._train_epoch()
eval_results = trainer._eval_epoch()
results = train_results.copy()
results.update(eval_results)
logger.info('--- epoch %d ---' % epoch)
for key, value in results.items():
if isinstance(value, float):
logger.info('%-15s: %.4f' % (key, value))
writer.add_scalar(key, value, epoch)
else:
for v in value:
writer.add_figure('eval_spec', v, epoch)
if (epoch % save_freq) == 0:
trainer.save_checkpoint(osp.join(log_dir, 'epoch_%05d.pth' % epoch))
return 0
def get_data_path_list(train_path=None, val_path=None):
if train_path is None:
train_path = "Data/train_list.txt"
if val_path is None:
val_path = "Data/val_list.txt"
with open(train_path, 'r') as f:
train_list = f.readlines()
with open(val_path, 'r') as f:
val_list = f.readlines()
return train_list, val_list
if __name__=="__main__":
main()
| 5,523 | 34.184713 | 90 |
py
|
StarGANv2-VC
|
StarGANv2-VC-main/trainer.py
|
# -*- coding: utf-8 -*-
import os
import os.path as osp
import sys
import time
from collections import defaultdict
import numpy as np
import torch
from torch import nn
from PIL import Image
from tqdm import tqdm
from losses import compute_d_loss, compute_g_loss
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Trainer(object):
def __init__(self,
args,
model=None,
model_ema=None,
optimizer=None,
scheduler=None,
config={},
device=torch.device("cpu"),
logger=logger,
train_dataloader=None,
val_dataloader=None,
initial_steps=0,
initial_epochs=0,
fp16_run=False
):
self.args = args
self.steps = initial_steps
self.epochs = initial_epochs
self.model = model
self.model_ema = model_ema
self.optimizer = optimizer
self.scheduler = scheduler
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.config = config
self.device = device
self.finish_train = False
self.logger = logger
self.fp16_run = fp16_run
def _train_epoch(self):
"""Train model one epoch."""
raise NotImplementedError
@torch.no_grad()
def _eval_epoch(self):
"""Evaluate model one epoch."""
pass
def save_checkpoint(self, checkpoint_path):
"""Save checkpoint.
Args:
checkpoint_path (str): Checkpoint path to be saved.
"""
state_dict = {
"optimizer": self.optimizer.state_dict(),
"steps": self.steps,
"epochs": self.epochs,
"model": {key: self.model[key].state_dict() for key in self.model}
}
if self.model_ema is not None:
state_dict['model_ema'] = {key: self.model_ema[key].state_dict() for key in self.model_ema}
if not os.path.exists(os.path.dirname(checkpoint_path)):
os.makedirs(os.path.dirname(checkpoint_path))
torch.save(state_dict, checkpoint_path)
def load_checkpoint(self, checkpoint_path, load_only_params=False):
"""Load checkpoint.
Args:
checkpoint_path (str): Checkpoint path to be loaded.
load_only_params (bool): Whether to load only model parameters.
"""
state_dict = torch.load(checkpoint_path, map_location="cpu")
for key in self.model:
self._load(state_dict["model"][key], self.model[key])
if self.model_ema is not None:
for key in self.model_ema:
self._load(state_dict["model_ema"][key], self.model_ema[key])
if not load_only_params:
self.steps = state_dict["steps"]
self.epochs = state_dict["epochs"]
self.optimizer.load_state_dict(state_dict["optimizer"])
def _load(self, states, model, force_load=True):
model_states = model.state_dict()
for key, val in states.items():
try:
if key not in model_states:
continue
if isinstance(val, nn.Parameter):
val = val.data
if val.shape != model_states[key].shape:
self.logger.info("%s does not have same shape" % key)
print(val.shape, model_states[key].shape)
if not force_load:
continue
min_shape = np.minimum(np.array(val.shape), np.array(model_states[key].shape))
slices = [slice(0, min_index) for min_index in min_shape]
model_states[key][slices].copy_(val[slices])
else:
model_states[key].copy_(val)
except:
self.logger.info("not exist :%s" % key)
print("not exist ", key)
@staticmethod
def get_gradient_norm(model):
total_norm = 0
for p in model.parameters():
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = np.sqrt(total_norm)
return total_norm
@staticmethod
def length_to_mask(lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
def _get_lr(self):
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
break
return lr
@staticmethod
def moving_average(model, model_test, beta=0.999):
for param, param_test in zip(model.parameters(), model_test.parameters()):
param_test.data = torch.lerp(param.data, param_test.data, beta)
def _train_epoch(self):
self.epochs += 1
train_losses = defaultdict(list)
_ = [self.model[k].train() for k in self.model]
scaler = torch.cuda.amp.GradScaler() if (('cuda' in str(self.device)) and self.fp16_run) else None
use_con_reg = (self.epochs >= self.args.con_reg_epoch)
use_adv_cls = (self.epochs >= self.args.adv_cls_epoch)
for train_steps_per_epoch, batch in enumerate(tqdm(self.train_dataloader, desc="[train]"), 1):
### load data
batch = [b.to(self.device) for b in batch]
x_real, y_org, x_ref, x_ref2, y_trg, z_trg, z_trg2 = batch
# train the discriminator (by random reference)
self.optimizer.zero_grad()
if scaler is not None:
with torch.cuda.amp.autocast():
d_loss, d_losses_latent = compute_d_loss(self.model, self.args.d_loss, x_real, y_org, y_trg, z_trg=z_trg, use_adv_cls=use_adv_cls, use_con_reg=use_con_reg)
scaler.scale(d_loss).backward()
else:
d_loss, d_losses_latent = compute_d_loss(self.model, self.args.d_loss, x_real, y_org, y_trg, z_trg=z_trg, use_adv_cls=use_adv_cls, use_con_reg=use_con_reg)
d_loss.backward()
self.optimizer.step('discriminator', scaler=scaler)
# train the discriminator (by target reference)
self.optimizer.zero_grad()
if scaler is not None:
with torch.cuda.amp.autocast():
d_loss, d_losses_ref = compute_d_loss(self.model, self.args.d_loss, x_real, y_org, y_trg, x_ref=x_ref, use_adv_cls=use_adv_cls, use_con_reg=use_con_reg)
scaler.scale(d_loss).backward()
else:
d_loss, d_losses_ref = compute_d_loss(self.model, self.args.d_loss, x_real, y_org, y_trg, x_ref=x_ref, use_adv_cls=use_adv_cls, use_con_reg=use_con_reg)
d_loss.backward()
self.optimizer.step('discriminator', scaler=scaler)
# train the generator (by random reference)
self.optimizer.zero_grad()
if scaler is not None:
with torch.cuda.amp.autocast():
g_loss, g_losses_latent = compute_g_loss(
self.model, self.args.g_loss, x_real, y_org, y_trg, z_trgs=[z_trg, z_trg2], use_adv_cls=use_adv_cls)
scaler.scale(g_loss).backward()
else:
g_loss, g_losses_latent = compute_g_loss(
self.model, self.args.g_loss, x_real, y_org, y_trg, z_trgs=[z_trg, z_trg2], use_adv_cls=use_adv_cls)
g_loss.backward()
self.optimizer.step('generator', scaler=scaler)
self.optimizer.step('mapping_network', scaler=scaler)
self.optimizer.step('style_encoder', scaler=scaler)
# train the generator (by target reference)
self.optimizer.zero_grad()
if scaler is not None:
with torch.cuda.amp.autocast():
g_loss, g_losses_ref = compute_g_loss(
self.model, self.args.g_loss, x_real, y_org, y_trg, x_refs=[x_ref, x_ref2], use_adv_cls=use_adv_cls)
scaler.scale(g_loss).backward()
else:
g_loss, g_losses_ref = compute_g_loss(
self.model, self.args.g_loss, x_real, y_org, y_trg, x_refs=[x_ref, x_ref2], use_adv_cls=use_adv_cls)
g_loss.backward()
self.optimizer.step('generator', scaler=scaler)
# compute moving average of network parameters
self.moving_average(self.model.generator, self.model_ema.generator, beta=0.999)
self.moving_average(self.model.mapping_network, self.model_ema.mapping_network, beta=0.999)
self.moving_average(self.model.style_encoder, self.model_ema.style_encoder, beta=0.999)
self.optimizer.scheduler()
for key in d_losses_latent:
train_losses["train/%s" % key].append(d_losses_latent[key])
for key in g_losses_latent:
train_losses["train/%s" % key].append(g_losses_latent[key])
train_losses = {key: np.mean(value) for key, value in train_losses.items()}
return train_losses
@torch.no_grad()
def _eval_epoch(self):
use_adv_cls = (self.epochs >= self.args.adv_cls_epoch)
eval_losses = defaultdict(list)
eval_images = defaultdict(list)
_ = [self.model[k].eval() for k in self.model]
for eval_steps_per_epoch, batch in enumerate(tqdm(self.val_dataloader, desc="[eval]"), 1):
### load data
batch = [b.to(self.device) for b in batch]
x_real, y_org, x_ref, x_ref2, y_trg, z_trg, z_trg2 = batch
# train the discriminator
d_loss, d_losses_latent = compute_d_loss(
self.model, self.args.d_loss, x_real, y_org, y_trg, z_trg=z_trg, use_r1_reg=False, use_adv_cls=use_adv_cls)
d_loss, d_losses_ref = compute_d_loss(
self.model, self.args.d_loss, x_real, y_org, y_trg, x_ref=x_ref, use_r1_reg=False, use_adv_cls=use_adv_cls)
# train the generator
g_loss, g_losses_latent = compute_g_loss(
self.model, self.args.g_loss, x_real, y_org, y_trg, z_trgs=[z_trg, z_trg2], use_adv_cls=use_adv_cls)
g_loss, g_losses_ref = compute_g_loss(
self.model, self.args.g_loss, x_real, y_org, y_trg, x_refs=[x_ref, x_ref2], use_adv_cls=use_adv_cls)
for key in d_losses_latent:
eval_losses["eval/%s" % key].append(d_losses_latent[key])
for key in g_losses_latent:
eval_losses["eval/%s" % key].append(g_losses_latent[key])
# if eval_steps_per_epoch % 10 == 0:
# # generate x_fake
# s_trg = self.model_ema.style_encoder(x_ref, y_trg)
# F0 = self.model.f0_model.get_feature_GAN(x_real)
# x_fake = self.model_ema.generator(x_real, s_trg, masks=None, F0=F0)
# # generate x_recon
# s_real = self.model_ema.style_encoder(x_real, y_org)
# F0_fake = self.model.f0_model.get_feature_GAN(x_fake)
# x_recon = self.model_ema.generator(x_fake, s_real, masks=None, F0=F0_fake)
# eval_images['eval/image'].append(
# ([x_real[0, 0].cpu().numpy(),
# x_fake[0, 0].cpu().numpy(),
# x_recon[0, 0].cpu().numpy()]))
eval_losses = {key: np.mean(value) for key, value in eval_losses.items()}
eval_losses.update(eval_images)
return eval_losses
| 11,715 | 40.399293 | 175 |
py
|
StarGANv2-VC
|
StarGANv2-VC-main/transforms.py
|
# -*- coding: utf-8 -*-
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torchaudio
import torchaudio.functional as audio_F
import random
## 1. RandomTimeStrech
class TimeStrech(nn.Module):
def __init__(self, scale):
super(TimeStrech, self).__init__()
self.scale = scale
def forward(self, x):
mel_size = x.size(-1)
x = F.interpolate(x, scale_factor=(1, self.scale), align_corners=False,
recompute_scale_factor=True, mode='bilinear').squeeze()
if x.size(-1) < mel_size:
noise_length = (mel_size - x.size(-1))
random_pos = random.randint(0, x.size(-1)) - noise_length
if random_pos < 0:
random_pos = 0
noise = x[..., random_pos:random_pos + noise_length]
x = torch.cat([x, noise], dim=-1)
else:
x = x[..., :mel_size]
return x.unsqueeze(1)
## 2. PitchShift
class PitchShift(nn.Module):
def __init__(self, shift):
super(PitchShift, self).__init__()
self.shift = shift
def forward(self, x):
if len(x.shape) == 2:
x = x.unsqueeze(0)
x = x.squeeze()
mel_size = x.shape[1]
shift_scale = (mel_size + self.shift) / mel_size
x = F.interpolate(x.unsqueeze(1), scale_factor=(shift_scale, 1.), align_corners=False,
recompute_scale_factor=True, mode='bilinear').squeeze(1)
x = x[:, :mel_size]
if x.size(1) < mel_size:
pad_size = mel_size - x.size(1)
x = torch.cat([x, torch.zeros(x.size(0), pad_size, x.size(2)).to(x.device)], dim=1)
x = x.squeeze()
return x.unsqueeze(1)
## 3. ShiftBias
class ShiftBias(nn.Module):
def __init__(self, bias):
super(ShiftBias, self).__init__()
self.bias = bias
def forward(self, x):
return x + self.bias
## 4. Scaling
class SpectScaling(nn.Module):
def __init__(self, scale):
super(SpectScaling, self).__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
## 5. Time Flip
class TimeFlip(nn.Module):
def __init__(self, length):
super(TimeFlip, self).__init__()
self.length = round(length)
def forward(self, x):
if self.length > 1:
start = np.random.randint(0, x.shape[-1] - self.length)
x_ret = x.clone()
x_ret[..., start:start + self.length] = torch.flip(x[..., start:start + self.length], dims=[-1])
x = x_ret
return x
class PhaseShuffle2d(nn.Module):
def __init__(self, n=2):
super(PhaseShuffle2d, self).__init__()
self.n = n
self.random = random.Random(1)
def forward(self, x, move=None):
# x.size = (B, C, M, L)
if move is None:
move = self.random.randint(-self.n, self.n)
if move == 0:
return x
else:
left = x[:, :, :, :move]
right = x[:, :, :, move:]
shuffled = torch.cat([right, left], dim=3)
return shuffled
def build_transforms():
transforms = [
lambda M: TimeStrech(1+ (np.random.random()-0.5)*M*0.2),
lambda M: SpectScaling(1 + (np.random.random()-1)*M*0.1),
lambda M: PhaseShuffle2d(192),
]
N, M = len(transforms), np.random.random()
composed = nn.Sequential(
*[trans(M) for trans in np.random.choice(transforms, N)]
)
return composed
| 3,552 | 28.363636 | 106 |
py
|
StarGANv2-VC
|
StarGANv2-VC-main/Utils/__init__.py
| 1 | 0 | 0 |
py
|
|
StarGANv2-VC
|
StarGANv2-VC-main/Utils/JDC/model.py
|
"""
Implementation of model from:
Kum et al. - "Joint Detection and Classification of Singing Voice Melody Using
Convolutional Recurrent Neural Networks" (2019)
Link: https://www.semanticscholar.org/paper/Joint-Detection-and-Classification-of-Singing-Voice-Kum-Nam/60a2ad4c7db43bace75805054603747fcd062c0d
"""
import torch
from torch import nn
class JDCNet(nn.Module):
"""
Joint Detection and Classification Network model for singing voice melody.
"""
def __init__(self, num_class=722, seq_len=31, leaky_relu_slope=0.01):
super().__init__()
self.seq_len = seq_len # 31
self.num_class = num_class
# input = (b, 1, 31, 513), b = batch size
self.conv_block = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False), # out: (b, 64, 31, 513)
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.Conv2d(64, 64, 3, padding=1, bias=False), # (b, 64, 31, 513)
)
# res blocks
self.res_block1 = ResBlock(in_channels=64, out_channels=128) # (b, 128, 31, 128)
self.res_block2 = ResBlock(in_channels=128, out_channels=192) # (b, 192, 31, 32)
self.res_block3 = ResBlock(in_channels=192, out_channels=256) # (b, 256, 31, 8)
# pool block
self.pool_block = nn.Sequential(
nn.BatchNorm2d(num_features=256),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.MaxPool2d(kernel_size=(1, 4)), # (b, 256, 31, 2)
nn.Dropout(p=0.5),
)
# maxpool layers (for auxiliary network inputs)
# in = (b, 128, 31, 513) from conv_block, out = (b, 128, 31, 2)
self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 40))
# in = (b, 128, 31, 128) from res_block1, out = (b, 128, 31, 2)
self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 20))
# in = (b, 128, 31, 32) from res_block2, out = (b, 128, 31, 2)
self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 10))
# in = (b, 640, 31, 2), out = (b, 256, 31, 2)
self.detector_conv = nn.Sequential(
nn.Conv2d(640, 256, 1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.Dropout(p=0.5),
)
# input: (b, 31, 512) - resized from (b, 256, 31, 2)
self.bilstm_classifier = nn.LSTM(
input_size=512, hidden_size=256,
batch_first=True, bidirectional=True) # (b, 31, 512)
# input: (b, 31, 512) - resized from (b, 256, 31, 2)
self.bilstm_detector = nn.LSTM(
input_size=512, hidden_size=256,
batch_first=True, bidirectional=True) # (b, 31, 512)
# input: (b * 31, 512)
self.classifier = nn.Linear(in_features=512, out_features=self.num_class) # (b * 31, num_class)
# input: (b * 31, 512)
self.detector = nn.Linear(in_features=512, out_features=2) # (b * 31, 2) - binary classifier
# initialize weights
self.apply(self.init_weights)
def get_feature_GAN(self, x):
seq_len = x.shape[-2]
x = x.float().transpose(-1, -2)
convblock_out = self.conv_block(x)
resblock1_out = self.res_block1(convblock_out)
resblock2_out = self.res_block2(resblock1_out)
resblock3_out = self.res_block3(resblock2_out)
poolblock_out = self.pool_block[0](resblock3_out)
poolblock_out = self.pool_block[1](poolblock_out)
return poolblock_out.transpose(-1, -2)
def forward(self, x):
"""
Returns:
classification_prediction, detection_prediction
sizes: (b, 31, 722), (b, 31, 2)
"""
###############################
# forward pass for classifier #
###############################
x = x.float().transpose(-1, -2)
convblock_out = self.conv_block(x)
resblock1_out = self.res_block1(convblock_out)
resblock2_out = self.res_block2(resblock1_out)
resblock3_out = self.res_block3(resblock2_out)
poolblock_out = self.pool_block[0](resblock3_out)
poolblock_out = self.pool_block[1](poolblock_out)
GAN_feature = poolblock_out.transpose(-1, -2)
poolblock_out = self.pool_block[2](poolblock_out)
# (b, 256, 31, 2) => (b, 31, 256, 2) => (b, 31, 512)
classifier_out = poolblock_out.permute(0, 2, 1, 3).contiguous().view((-1, self.seq_len, 512))
classifier_out, _ = self.bilstm_classifier(classifier_out) # ignore the hidden states
classifier_out = classifier_out.contiguous().view((-1, 512)) # (b * 31, 512)
classifier_out = self.classifier(classifier_out)
classifier_out = classifier_out.view((-1, self.seq_len, self.num_class)) # (b, 31, num_class)
# sizes: (b, 31, 722), (b, 31, 2)
# classifier output consists of predicted pitch classes per frame
# detector output consists of: (isvoice, notvoice) estimates per frame
return torch.abs(classifier_out.squeeze()), GAN_feature, poolblock_out
@staticmethod
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.LSTM) or isinstance(m, nn.LSTMCell):
for p in m.parameters():
if p.data is None:
continue
if len(p.shape) >= 2:
nn.init.orthogonal_(p.data)
else:
nn.init.normal_(p.data)
class ResBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, leaky_relu_slope=0.01):
super().__init__()
self.downsample = in_channels != out_channels
# BN / LReLU / MaxPool layer before the conv layer - see Figure 1b in the paper
self.pre_conv = nn.Sequential(
nn.BatchNorm2d(num_features=in_channels),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.MaxPool2d(kernel_size=(1, 2)), # apply downsampling on the y axis only
)
# conv layers
self.conv = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False),
)
# 1 x 1 convolution layer to match the feature dimensions
self.conv1by1 = None
if self.downsample:
self.conv1by1 = nn.Conv2d(in_channels, out_channels, 1, bias=False)
def forward(self, x):
x = self.pre_conv(x)
if self.downsample:
x = self.conv(x) + self.conv1by1(x)
else:
x = self.conv(x) + x
return x
| 7,157 | 39.670455 | 144 |
py
|
StarGANv2-VC
|
StarGANv2-VC-main/Utils/JDC/__init__.py
| 1 | 0 | 0 |
py
|
|
StarGANv2-VC
|
StarGANv2-VC-main/Utils/ASR/layers.py
|
import math
import torch
from torch import nn
from typing import Optional, Any
from torch import Tensor
import torch.nn.functional as F
import torchaudio
import torchaudio.functional as audio_F
import random
random.seed(0)
def _get_activation_fn(activ):
if activ == 'relu':
return nn.ReLU()
elif activ == 'lrelu':
return nn.LeakyReLU(0.2)
elif activ == 'swish':
return lambda x: x*torch.sigmoid(x)
else:
raise RuntimeError('Unexpected activ type %s, expected [relu, lrelu, swish]' % activ)
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear', param=None):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain, param=param))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class CausualConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=1, dilation=1, bias=True, w_init_gain='linear', param=None):
super(CausualConv, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2) * 2
else:
self.padding = padding * 2
self.conv = nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=self.padding,
dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain, param=param))
def forward(self, x):
x = self.conv(x)
x = x[:, :, :-self.padding]
return x
class CausualBlock(nn.Module):
def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='lrelu'):
super(CausualBlock, self).__init__()
self.blocks = nn.ModuleList([
self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p)
for i in range(n_conv)])
def forward(self, x):
for block in self.blocks:
res = x
x = block(x)
x += res
return x
def _get_conv(self, hidden_dim, dilation, activ='lrelu', dropout_p=0.2):
layers = [
CausualConv(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation),
_get_activation_fn(activ),
nn.BatchNorm1d(hidden_dim),
nn.Dropout(p=dropout_p),
CausualConv(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1),
_get_activation_fn(activ),
nn.Dropout(p=dropout_p)
]
return nn.Sequential(*layers)
class ConvBlock(nn.Module):
def __init__(self, hidden_dim, n_conv=3, dropout_p=0.2, activ='relu'):
super().__init__()
self._n_groups = 8
self.blocks = nn.ModuleList([
self._get_conv(hidden_dim, dilation=3**i, activ=activ, dropout_p=dropout_p)
for i in range(n_conv)])
def forward(self, x):
for block in self.blocks:
res = x
x = block(x)
x += res
return x
def _get_conv(self, hidden_dim, dilation, activ='relu', dropout_p=0.2):
layers = [
ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, dilation=dilation),
_get_activation_fn(activ),
nn.GroupNorm(num_groups=self._n_groups, num_channels=hidden_dim),
nn.Dropout(p=dropout_p),
ConvNorm(hidden_dim, hidden_dim, kernel_size=3, padding=1, dilation=1),
_get_activation_fn(activ),
nn.Dropout(p=dropout_p)
]
return nn.Sequential(*layers)
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
attention_location_n_filters, attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class ForwardAttentionV2(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
attention_location_n_filters, attention_location_kernel_size):
super(ForwardAttentionV2, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float(1e20)
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: prev. and cumulative att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask, log_alpha):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
log_energy = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
#log_energy =
if mask is not None:
log_energy.data.masked_fill_(mask, self.score_mask_value)
#attention_weights = F.softmax(alignment, dim=1)
#content_score = log_energy.unsqueeze(1) #[B, MAX_TIME] -> [B, 1, MAX_TIME]
#log_alpha = log_alpha.unsqueeze(2) #[B, MAX_TIME] -> [B, MAX_TIME, 1]
#log_total_score = log_alpha + content_score
#previous_attention_weights = attention_weights_cat[:,0,:]
log_alpha_shift_padded = []
max_time = log_energy.size(1)
for sft in range(2):
shifted = log_alpha[:,:max_time-sft]
shift_padded = F.pad(shifted, (sft,0), 'constant', self.score_mask_value)
log_alpha_shift_padded.append(shift_padded.unsqueeze(2))
biased = torch.logsumexp(torch.cat(log_alpha_shift_padded,2), 2)
log_alpha_new = biased + log_energy
attention_weights = F.softmax(log_alpha_new, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights, log_alpha_new
class PhaseShuffle2d(nn.Module):
def __init__(self, n=2):
super(PhaseShuffle2d, self).__init__()
self.n = n
self.random = random.Random(1)
def forward(self, x, move=None):
# x.size = (B, C, M, L)
if move is None:
move = self.random.randint(-self.n, self.n)
if move == 0:
return x
else:
left = x[:, :, :, :move]
right = x[:, :, :, move:]
shuffled = torch.cat([right, left], dim=3)
return shuffled
class PhaseShuffle1d(nn.Module):
def __init__(self, n=2):
super(PhaseShuffle1d, self).__init__()
self.n = n
self.random = random.Random(1)
def forward(self, x, move=None):
# x.size = (B, C, M, L)
if move is None:
move = self.random.randint(-self.n, self.n)
if move == 0:
return x
else:
left = x[:, :, :move]
right = x[:, :, move:]
shuffled = torch.cat([right, left], dim=2)
return shuffled
class MFCC(nn.Module):
def __init__(self, n_mfcc=40, n_mels=80):
super(MFCC, self).__init__()
self.n_mfcc = n_mfcc
self.n_mels = n_mels
self.norm = 'ortho'
dct_mat = audio_F.create_dct(self.n_mfcc, self.n_mels, self.norm)
self.register_buffer('dct_mat', dct_mat)
def forward(self, mel_specgram):
if len(mel_specgram.shape) == 2:
mel_specgram = mel_specgram.unsqueeze(0)
unsqueezed = True
else:
unsqueezed = False
# (channel, n_mels, time).tranpose(...) dot (n_mels, n_mfcc)
# -> (channel, time, n_mfcc).tranpose(...)
mfcc = torch.matmul(mel_specgram.transpose(1, 2), self.dct_mat).transpose(1, 2)
# unpack batch
if unsqueezed:
mfcc = mfcc.squeeze(0)
return mfcc
| 13,454 | 36.901408 | 143 |
py
|
StarGANv2-VC
|
StarGANv2-VC-main/Utils/ASR/models.py
|
import math
import torch
from torch import nn
from torch.nn import TransformerEncoder
import torch.nn.functional as F
from .layers import MFCC, Attention, LinearNorm, ConvNorm, ConvBlock
class ASRCNN(nn.Module):
def __init__(self,
input_dim=80,
hidden_dim=256,
n_token=35,
n_layers=6,
token_embedding_dim=256,
):
super().__init__()
self.n_token = n_token
self.n_down = 1
self.to_mfcc = MFCC()
self.init_cnn = ConvNorm(input_dim//2, hidden_dim, kernel_size=7, padding=3, stride=2)
self.cnns = nn.Sequential(
*[nn.Sequential(
ConvBlock(hidden_dim),
nn.GroupNorm(num_groups=1, num_channels=hidden_dim)
) for n in range(n_layers)])
self.projection = ConvNorm(hidden_dim, hidden_dim // 2)
self.ctc_linear = nn.Sequential(
LinearNorm(hidden_dim//2, hidden_dim),
nn.ReLU(),
LinearNorm(hidden_dim, n_token))
self.asr_s2s = ASRS2S(
embedding_dim=token_embedding_dim,
hidden_dim=hidden_dim//2,
n_token=n_token)
def forward(self, x, src_key_padding_mask=None, text_input=None):
x = self.to_mfcc(x)
x = self.init_cnn(x)
x = self.cnns(x)
x = self.projection(x)
x = x.transpose(1, 2)
ctc_logit = self.ctc_linear(x)
if text_input is not None:
_, s2s_logit, s2s_attn = self.asr_s2s(x, src_key_padding_mask, text_input)
return ctc_logit, s2s_logit, s2s_attn
else:
return ctc_logit
def get_feature(self, x):
x = self.to_mfcc(x.squeeze(1))
x = self.init_cnn(x)
x = self.cnns(x)
x = self.projection(x)
return x
def length_to_mask(self, lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1)).to(lengths.device)
return mask
def get_future_mask(self, out_length, unmask_future_steps=0):
"""
Args:
out_length (int): returned mask shape is (out_length, out_length).
unmask_futre_steps (int): unmasking future step size.
Return:
mask (torch.BoolTensor): mask future timesteps mask[i, j] = True if i > j + unmask_future_steps else False
"""
index_tensor = torch.arange(out_length).unsqueeze(0).expand(out_length, -1)
mask = torch.gt(index_tensor, index_tensor.T + unmask_future_steps)
return mask
class ASRS2S(nn.Module):
def __init__(self,
embedding_dim=256,
hidden_dim=512,
n_location_filters=32,
location_kernel_size=63,
n_token=40):
super(ASRS2S, self).__init__()
self.embedding = nn.Embedding(n_token, embedding_dim)
val_range = math.sqrt(6 / hidden_dim)
self.embedding.weight.data.uniform_(-val_range, val_range)
self.decoder_rnn_dim = hidden_dim
self.project_to_n_symbols = nn.Linear(self.decoder_rnn_dim, n_token)
self.attention_layer = Attention(
self.decoder_rnn_dim,
hidden_dim,
hidden_dim,
n_location_filters,
location_kernel_size
)
self.decoder_rnn = nn.LSTMCell(self.decoder_rnn_dim + embedding_dim, self.decoder_rnn_dim)
self.project_to_hidden = nn.Sequential(
LinearNorm(self.decoder_rnn_dim * 2, hidden_dim),
nn.Tanh())
self.sos = 1
self.eos = 2
def initialize_decoder_states(self, memory, mask):
"""
moemory.shape = (B, L, H) = (Batchsize, Maxtimestep, Hiddendim)
"""
B, L, H = memory.shape
self.decoder_hidden = torch.zeros((B, self.decoder_rnn_dim)).type_as(memory)
self.decoder_cell = torch.zeros((B, self.decoder_rnn_dim)).type_as(memory)
self.attention_weights = torch.zeros((B, L)).type_as(memory)
self.attention_weights_cum = torch.zeros((B, L)).type_as(memory)
self.attention_context = torch.zeros((B, H)).type_as(memory)
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
self.unk_index = 3
self.random_mask = 0.1
def forward(self, memory, memory_mask, text_input):
"""
moemory.shape = (B, L, H) = (Batchsize, Maxtimestep, Hiddendim)
moemory_mask.shape = (B, L, )
texts_input.shape = (B, T)
"""
self.initialize_decoder_states(memory, memory_mask)
# text random mask
random_mask = (torch.rand(text_input.shape) < self.random_mask).to(text_input.device)
_text_input = text_input.clone()
_text_input.masked_fill_(random_mask, self.unk_index)
decoder_inputs = self.embedding(_text_input).transpose(0, 1) # -> [T, B, channel]
start_embedding = self.embedding(
torch.LongTensor([self.sos]*decoder_inputs.size(1)).to(decoder_inputs.device))
decoder_inputs = torch.cat((start_embedding.unsqueeze(0), decoder_inputs), dim=0)
hidden_outputs, logit_outputs, alignments = [], [], []
while len(hidden_outputs) < decoder_inputs.size(0):
decoder_input = decoder_inputs[len(hidden_outputs)]
hidden, logit, attention_weights = self.decode(decoder_input)
hidden_outputs += [hidden]
logit_outputs += [logit]
alignments += [attention_weights]
hidden_outputs, logit_outputs, alignments = \
self.parse_decoder_outputs(
hidden_outputs, logit_outputs, alignments)
return hidden_outputs, logit_outputs, alignments
def decode(self, decoder_input):
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
cell_input,
(self.decoder_hidden, self.decoder_cell))
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1),
self.attention_weights_cum.unsqueeze(1)),dim=1)
self.attention_context, self.attention_weights = self.attention_layer(
self.decoder_hidden,
self.memory,
self.processed_memory,
attention_weights_cat,
self.mask)
self.attention_weights_cum += self.attention_weights
hidden_and_context = torch.cat((self.decoder_hidden, self.attention_context), -1)
hidden = self.project_to_hidden(hidden_and_context)
# dropout to increasing g
logit = self.project_to_n_symbols(F.dropout(hidden, 0.5, self.training))
return hidden, logit, self.attention_weights
def parse_decoder_outputs(self, hidden, logit, alignments):
# -> [B, T_out + 1, max_time]
alignments = torch.stack(alignments).transpose(0,1)
# [T_out + 1, B, n_symbols] -> [B, T_out + 1, n_symbols]
logit = torch.stack(logit).transpose(0, 1).contiguous()
hidden = torch.stack(hidden).transpose(0, 1).contiguous()
return hidden, logit, alignments
| 7,272 | 37.893048 | 118 |
py
|
StarGANv2-VC
|
StarGANv2-VC-main/Utils/ASR/__init__.py
| 1 | 0 | 0 |
py
|
|
Signal-is-Harder
|
Signal-is-Harder-main/train_vanilla.py
|
import os
import yaml
import argparse
import wandb
import time
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
from data.util import get_dataset, IdxDataset
from module.util import get_model
from util import set_seed, get_optimizer, evaluate
def main():
# configuration
with open("config.yaml", "r") as f:
config = yaml.safe_load(f)
# manual overwriting of configuration for scripts
# initialize parser
parser = argparse.ArgumentParser()
parser.add_argument("--name", default=None, help = "Name of experiment")
parser.add_argument("--bias_conflicting_perc", default=None, type=float, help = "Percentage of bias conflicting samples in dataset")
parser.add_argument("--severity", default=None, type=int, help = "Severity of bias")
parser.add_argument("--dataset", default=None, help = "Choice of dataset")
parser.add_argument("--model_tag", default=None, help = "Choice of model")
parser.add_argument("--q", default=None, type=float, help = "q for GCE loss")
parser.add_argument("--random_state", default=None, type=int, help="Random state for seed")
parser.add_argument("--results_filename", default=None, help="Name of file to store results")
parser.add_argument("--epochs", default=None, type=int, help="Number of training epochs")
args = parser.parse_args()
# Replace all specified arguments
updateable = [config["name"],config["data"]["bias_conflicting_perc"],config["data"]["severity"],config["data"]["dataset"],config["model"]["tag"],config["loss"]["GCE_q"],config["random_state"],config["results_filename"],config["train"]["epochs"]]
values = []
for i,v in enumerate(vars(args).values()):
if v != None:
values.append(v)
print("Overwriting configuration")
else: values.append(updateable[i])
config["name"],config["data"]["bias_conflicting_perc"],config["data"]["severity"],config["data"]["dataset"],config["model"]["tag"],config["loss"]["GCE_q"],config["random_state"],config["results_filename"],config["train"]["epochs"] = values
# configuration sanity check
if not (
(config["data"]["dataset"] == "colored_mnist" and config["model"]["tag"] == "MLP") or
(config["data"]["dataset"] == "colored_mnist" and config["model"]["tag"] == "MLP_VAE") or
(config["data"]["dataset"] == "cifar10_type0" and config["model"]["tag"] == "ResNet20") or
(config["data"]["dataset"] == "cifar10_type1" and config["model"]["tag"] == "ResNet20")):
print("Are you sure you want to use the dataset "+config["data"]["dataset"]+" with the model "+ config["model"]["tag"]+"?")
# define variables from config
batch_size = config["train"]["batch_size"]
epochs = config["train"]["epochs"]
random_state = config["random_state"]
# wandb support
mode = "online" if config['wandb_logging'] else "disabled"
wandb.init(
project="Interpretable Debiasing",
entity="interpretable-debiasing",
config=config,
mode=mode
)
print("Running experiment: {}".format(config["name"]))
# set seed
set_seed(random_state)
# set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"\nUsing device: {device}")
# load dataset
train_dataset = get_dataset(
config,
dataset_split="train"
)
test_dataset = get_dataset(
config,
dataset_split="eval"
)
train_dataset = IdxDataset(train_dataset)
test_dataset = IdxDataset(test_dataset)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
#num_workers=16,
pin_memory=True,
drop_last=True
)
test_loader = DataLoader(
test_dataset,
batch_size=256,
shuffle=False,
#num_workers=16,
pin_memory=True,
)
# define model
model = get_model(config).to(device)
# define optimizer
optimizer = get_optimizer(model, config)
# define loss function
criterion = torch.nn.CrossEntropyLoss()
# training & validation & test
for epoch in range(epochs):
train(model, train_loader, optimizer, criterion, epoch, epochs, device, config)
#validate()
test_acc = test(model, test_loader, device)
with open(config["results_filename"]+'.txt', 'a') as f:
f.writelines((['{} vanilla: {:8.4f}\n'.format(config["name"], test_acc)]))
timestamp = time.strftime(' %d-%b-%Y_%H:%M', time.localtime())
os.makedirs("./saved_models/vanilla/", exist_ok=True)
torch.save(model.state_dict(), "./saved_models/vanilla/" + config["name"] + timestamp + ".pth")
wandb.finish(quiet=True)
def train(
model,
train_loader,
optimizer,
criterion,
epoch,
epochs,
device,
config
):
"""Main training loop, where the network is trained
Args:
model: baseline model
train_loader: loader with the training data
optimizer: optimizer for backpropagation
criterion: loss function
epoch: current epoch
epochs: max number of epochs
device: current device (cpu or gpu)
"""
train_loader = tqdm(train_loader, position=0, leave=False)
train_loader.set_description(f"Epoch [{epoch}/{epochs}]")
total_acc, total_count = 0, 0
# training loop
model.train()
for idx, (data_index, data, attr) in enumerate(train_loader):
data, attr = data.to(device), attr.to(device)
label = attr[:, 0]
# bias = attr[:, 1]
optimizer.zero_grad()
mean, logvar = model.encoder(data)
logit = model.predict(mean)
loss = criterion(logit,label)
loss.backward()
optimizer.step()
corr = (logit.argmax(1) == label).sum().item()
batch_len = label.size(0)
total_acc += corr
total_count += batch_len
train_loader.set_postfix(loss=loss.item(), acc= corr / batch_len)
wandb.log({"train_loss": loss})
wandb.log({"train_accuracy": total_acc / total_count, "epoch": epoch})
print(
"| epoch {:3d} | training accuracy {:8.3f}".format(
epoch, total_acc / total_count
)
)
def test(model, test_loader, device):
"""Main test loop, where the network is tested in the end
Args:
model: our pytorch model
test_loader: loader with the validation data
device: current device (cpu or gpu)
"""
# testing the model
model.eval()
test_acc_aligned, test_acc_conflicting, test_acc = evaluate(model, test_loader, device)
wandb.log({"conflicting_test_accuracy_vanilla": test_acc_conflicting})
wandb.log({"aligned_test_accuracy_vanilla": test_acc_aligned})
wandb.log({"test_accuracy_vanilla": test_acc})
print("test accuracy {:8.3f}".format(test_acc))
return test_acc
if __name__ == "__main__":
main()
| 7,018 | 32.42381 | 249 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/make_dataset.py
|
'''Modified from https://github.com/alinlab/LfF/blob/master/util.py'''
import os
from re import A
from xmlrpc.client import Boolean
from tqdm import tqdm
import pickle
import numpy as np
import torch
from torchvision.datasets import CIFAR10, MNIST
import torchvision.transforms as T
from data.corrupted_cifar10_protocol import CORRUPTED_CIFAR10_PROTOCOL
from data.colored_mnist_protocol import COLORED_MNIST_PROTOCOL
from data.rotated_mnist_protocol import ROTATED_MNIST_PROTOCOL
from data.shifted_mnist_protocol import SHIFTED_MNIST_PROTOCOL
import yaml
import argparse
from util import set_seed
def make_attr_labels(target_labels, bias_aligned_ratio):
num_classes = target_labels.max().item() + 1
num_samples_per_class = np.array(
[
torch.sum(target_labels == label).item()
for label in range(num_classes)
]
)
ratios_per_class = bias_aligned_ratio * np.eye(num_classes) + (
1 - bias_aligned_ratio
) / (num_classes - 1) * (1 - np.eye(num_classes))
corruption_milestones_per_class = (
num_samples_per_class[:, np.newaxis]
* np.cumsum(ratios_per_class, axis=1)
).round()
attr_labels = torch.zeros_like(target_labels)
for label in range(num_classes):
indices = (target_labels == label).nonzero().squeeze()
corruption_milestones = corruption_milestones_per_class[label]
for corruption_idx, idx in enumerate(indices):
attr_labels[idx] = np.min(
np.nonzero(corruption_milestones > corruption_idx)[0]
).item()
return attr_labels
def make_corrupted_cifar10(
data_dir, skewed_ratio, corruption_names, severity, config, postfix="0"
):
cifar10_dir = os.path.join(data_dir, "CIFAR10")
corrupted_cifar10_dir = os.path.join(
data_dir, f"CorruptedCIFAR10-Type{postfix}-Skewed{skewed_ratio}-Severity{severity}"
)
os.makedirs(corrupted_cifar10_dir, exist_ok=True)
print(corrupted_cifar10_dir)
protocol = CORRUPTED_CIFAR10_PROTOCOL
convert_img = T.Compose([T.ToTensor(), T.ToPILImage()])
attr_names = ["object", "corruption"]
attr_names_path = os.path.join(corrupted_cifar10_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
dataset = CIFAR10(cifar10_dir, train=(split == "train"), download=True)
os.makedirs(os.path.join(corrupted_cifar10_dir, split), exist_ok=True)
if split == "train":
bias_aligned_ratio = 1-skewed_ratio
else:
bias_aligned_ratio = 0.1
corruption_labels = make_attr_labels(
torch.LongTensor(dataset.targets), bias_aligned_ratio
)
images, attrs = [], []
for img, target_label, corruption_label in tqdm(
zip(dataset.data, dataset.targets, corruption_labels),
total=len(corruption_labels),
):
method_name = corruption_names[corruption_label]
corrupted_img = protocol[method_name](convert_img(img), severity+1)
images.append(np.array(corrupted_img).astype(np.uint8))
attrs.append([target_label, corruption_label])
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"] and severity == 4:
import imageio
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
if postfix == '0':
path = config["Dfa"]["data_dir"]+f'/cifar10c/{dfa_ratio:g}pct'
elif postfix == '1': path = config["Dfa"]["data_dir"]+f'/cifar10ct1/{dfa_ratio:g}pct'
else: raise NotImplementedError
attr = np.array(attrs)
imgs = np.array(images)
if split == "train":
for j in range(len(np.unique(attr))):
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] == attr[:,1]))[0]
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
for i in range(len(ind)):
path_img = os.path.join(path, "align", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i],:,:,:])
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] != attr[:,1]))[0]
for i in range(len(ind)):
path_img = os.path.join(path, "conflict", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i],:,:,:])
elif split == "test":
for j in range(len(np.unique(attr))):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for i in range(len(attr[:,0])):
path_img = os.path.join(path, "test", f'{attr[i,0]}', f"{i}_{attr[i,0]}_{attr[i,1]}.png")
imageio.imwrite(path_img, imgs[i,:,:,:])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
else: raise NotImplementedError
image_path = os.path.join(corrupted_cifar10_dir, split, "images.npy")
np.save(image_path, np.array(images).astype(np.uint8))
attr_path = os.path.join(corrupted_cifar10_dir, split, "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
def make_colored_mnist(data_dir, skewed_ratio, severity, config):
mnist_dir = os.path.join(data_dir, "MNIST")
colored_mnist_dir = os.path.join(
data_dir, f"ColoredMNIST-Skewed{skewed_ratio}-Severity{severity}"
)
os.makedirs(colored_mnist_dir, exist_ok=True)
print(colored_mnist_dir)
protocol = COLORED_MNIST_PROTOCOL
attr_names = ["digit", "color"]
attr_names_path = os.path.join(colored_mnist_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
dataset = MNIST(mnist_dir, train=(split == "train"), download=True)
os.makedirs(os.path.join(colored_mnist_dir, split), exist_ok=True)
if split == "train":
bias_aligned_ratio = 1. - skewed_ratio
else:
bias_aligned_ratio = 0.1
color_labels = make_attr_labels(
torch.LongTensor(dataset.targets), bias_aligned_ratio
)
images, attrs = [], []
for img, target_label, color_label in tqdm(
zip(dataset.data, dataset.targets, color_labels),
total=len(color_labels),
):
colored_img = protocol[color_label.item()](img, severity)
# Change RBG from first to last dimension
colored_img = np.moveaxis(np.uint8(colored_img), 0, 2)
images.append(colored_img)
attrs.append([target_label, color_label])
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"] and severity == 4:
import imageio
print("Creating dataset for Dfa too!")
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
path = config["Dfa"]["data_dir"]+f'/cmnist/{dfa_ratio:g}pct'
attr = np.array(attrs)
imgs = np.array(images)
if split == "train":
for j in range(len(np.unique(attr))):
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] == attr[:,1]))[0]
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
for i in range(len(ind)):
path_img = os.path.join(path, "align", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i],:,:,:])
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] != attr[:,1]))[0]
for i in range(len(ind)):
path_img = os.path.join(path, "conflict", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i],:,:,:])
elif split == "test":
for j in range(len(np.unique(attr))):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for i in range(len(attr[:,0])):
path_img = os.path.join(path, "test", f'{attr[i,0]}', f"{i}_{attr[i,0]}_{attr[i,1]}.png")
imageio.imwrite(path_img, imgs[i,:,:,:])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
else: raise NotImplementedError
image_path = os.path.join(colored_mnist_dir, split, "images.npy")
np.save(image_path, np.array(images).astype(np.uint8))
attr_path = os.path.join(colored_mnist_dir, split, "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
def make_rotated_mnist(data_dir, skewed_ratio, severity, config):
mnist_dir = os.path.join(data_dir, "MNIST")
rotated_mnist_dir = os.path.join(
data_dir, f"RotatedMNIST-Skewed{skewed_ratio}-Severity{severity}"
)
os.makedirs(rotated_mnist_dir, exist_ok=True)
print(rotated_mnist_dir)
protocol = ROTATED_MNIST_PROTOCOL
attr_names = ["digit", "rotation"]
attr_names_path = os.path.join(rotated_mnist_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
dataset = MNIST(mnist_dir, train=(split == "train"), download=True)
os.makedirs(os.path.join(rotated_mnist_dir, split), exist_ok=True)
if split == "train":
bias_aligned_ratio = 1. - skewed_ratio
else:
bias_aligned_ratio = 0.5
#Keep only 3 and 8 and change their classes to 0,1
targets = ((dataset.targets==3) | (dataset.targets==8)).nonzero()
data = dataset.data[targets].squeeze(1)
data_labels = dataset.targets[targets].squeeze(1)
data_labels[(data_labels == 3).nonzero()] = 0
data_labels[(data_labels == 8).nonzero()] = 1
rotation_labels = make_attr_labels(
torch.LongTensor(data_labels), bias_aligned_ratio
)
images, attrs = [], []
for img, target_label, rotation_label in tqdm(
zip(data, data_labels, rotation_labels),
total=len(rotation_labels),
):
rotated_img = protocol[rotation_label.item()](img, severity)
images.append(rotated_img)
attrs.append([target_label, rotation_label])
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"] and severity == 4:
import imageio
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
path = config["Dfa"]["data_dir"]+f'/rmnist/{dfa_ratio:g}pct'
attr = np.array(attrs)
imgs = [np.array(image).astype(np.uint8) for image in images]
if split == "train":
for j in range(len(np.unique(attr))):
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] == attr[:,1]))[0]
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
for i in range(len(ind)):
path_img = os.path.join(path, "align", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] != attr[:,1]))[0]
for i in range(len(ind)):
path_img = os.path.join(path, "conflict", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
elif split == "test":
for j in range(len(np.unique(attr))):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for i in range(len(attr[:,0])):
path_img = os.path.join(path, "test", f'{attr[i,0]}', f"{i}_{attr[i,0]}_{attr[i,1]}.png")
imageio.imwrite(path_img, imgs[i])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
else: raise NotImplementedError
image_path = os.path.join(rotated_mnist_dir, split, "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in images])
attr_path = os.path.join(rotated_mnist_dir, split, "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
def make_shifted_mnist(data_dir, skewed_ratio, severity, config):
mnist_dir = os.path.join(data_dir, "MNIST")
shifted_mnist_dir = os.path.join(
data_dir, f"ShiftedMNIST-Skewed{skewed_ratio}-Severity{severity}"
)
os.makedirs(shifted_mnist_dir, exist_ok=True)
print(shifted_mnist_dir)
protocol = SHIFTED_MNIST_PROTOCOL
attr_names = ["digit", "rotation"]
attr_names_path = os.path.join(shifted_mnist_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
dataset = MNIST(mnist_dir, train=(split == "train"), download=True)
os.makedirs(os.path.join(shifted_mnist_dir, split), exist_ok=True)
if split == "train":
bias_aligned_ratio = 1. - skewed_ratio
else:
bias_aligned_ratio = 0.5
#Keep only 3 and 8 and change their classes to 0,1
targets = ((dataset.targets==3) | (dataset.targets==8)).nonzero()
data = dataset.data[targets].squeeze(1)
data_labels = dataset.targets[targets].squeeze(1)
data_labels[(data_labels == 3).nonzero()] = 0
data_labels[(data_labels == 8).nonzero()] = 1
shifted_labels = make_attr_labels(
torch.LongTensor(data_labels), bias_aligned_ratio
)
images, attrs = [], []
for img, target_label, shifted_label in tqdm(
zip(data, data_labels, shifted_labels),
total=len(shifted_labels),
):
shifted_img = protocol[shifted_label.item()](img, severity)
images.append(shifted_img)
attrs.append([target_label, shifted_label])
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"] and severity == 4:
import imageio
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
path = config["Dfa"]["data_dir"]+f'/smnist/{dfa_ratio:g}pct'
attr = np.array(attrs)
imgs = [np.array(image).astype(np.uint8) for image in images]
if split == "train":
for j in range(len(np.unique(attr))):
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] == attr[:,1]))[0]
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
for i in range(len(ind)):
path_img = os.path.join(path, "align", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
ind = np.nonzero((attr[:,0] == j) & (attr[:,0] != attr[:,1]))[0]
for i in range(len(ind)):
path_img = os.path.join(path, "conflict", f'{attr[ind[0]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
elif split == "test":
for j in range(len(np.unique(attr))):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for i in range(len(attr[:,0])):
path_img = os.path.join(path, "test", f'{attr[i,0]}', f"{i}_{attr[i,0]}_{attr[i,1]}.png")
imageio.imwrite(path_img, imgs[i])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
else: raise NotImplementedError
image_path = os.path.join(shifted_mnist_dir, split, "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in images])
attr_path = os.path.join(shifted_mnist_dir, split, "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
def make_camelyon17_type0(data_dir,skewed_ratio,config):
# Type0: Using data from all 4 training hospitals in testset with 50-50 positive and negative ratio
from wilds import get_dataset
from wilds.common.data_loaders import get_train_loader
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, ConcatDataset
from torch.utils.data.dataset import Dataset
# Load the full dataset, and download it if necessary
dataset = get_dataset(dataset="camelyon17", download=True, unlabeled=False, root_dir=data_dir)
# Get the training set
train_data = dataset.get_subset(
"train",
transform=transforms.Compose([transforms.ToTensor()])
)
idval_data = dataset.get_subset(
"id_val",
transform=transforms.Compose([transforms.ToTensor()])
)
oodval_data = dataset.get_subset(
"val",
transform=transforms.Compose([transforms.ToTensor()])
)
full_train_data = ConcatDataset([train_data,idval_data,oodval_data]) # NOT test_data
data_loader = DataLoader(
full_train_data,
shuffle=True,
batch_size=1) # By shuffle all inputs from all datasets get randomly shuffled
pos = np.zeros(2)
while len(np.unique(pos))==1:
pos = np.random.randint(0,4,2)
pos[pos>=2]+=1 # 2 is test hospital
#bias_label = np.zeros(4)
#bias_label[pos] += 1
#assert np.median(bias_label)==0.5
camelyon_dir = os.path.join(
data_dir, f"Camelyon17-Type0-Skewed{skewed_ratio}"
)
os.makedirs(camelyon_dir, exist_ok=True)
print(camelyon_dir)
attr_names = ["tumor", "hospital"]
attr_names_path = os.path.join(camelyon_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
os.makedirs(os.path.join(camelyon_dir, split), exist_ok=True)
test_images, test_attrs = [], []
images, attrs = [], []
bias_aligned_ratio = 1-skewed_ratio
test_count = np.zeros((5,2)) # Count images in testset of all combinations
for idx, (x, y, metadata) in enumerate(tqdm(data_loader)):
if test_count[metadata[:,0].item(),y.item()]<1250: #10'000 testset images
x = np.moveaxis(np.array(x), 1, 3)
x *= 255
test_images.append(np.array(x.squeeze(0)).astype(np.uint8))
test_attrs.append([y.squeeze(), metadata[:,0].squeeze()])
test_count[metadata[:,0].item(),y.item()]+=1
else:
include_align = np.random.binomial(1,bias_aligned_ratio,size=len(x))
include_confl = 1-include_align
pos_domains = np.isin(metadata[:,0],pos)
aligned = np.zeros_like(include_align)
aligned[pos_domains] = (y == 1)[pos_domains]
aligned[~pos_domains] = (y == 0)[~pos_domains]
aligned = aligned.astype(bool)
include_imgs = np.zeros_like(include_align)
include_imgs[aligned] = include_align[aligned]
include_imgs[~aligned] = include_confl[~aligned]
include_imgs = include_imgs.astype(bool)
if include_imgs==0:
continue
x = np.moveaxis(np.array(x), 1, 3)
x *= 255
images.append(np.array(x[include_imgs].squeeze(0)).astype(np.uint8))
attrs.append([y[include_imgs].squeeze(), metadata[:,0][include_imgs].squeeze()])
assert ((test_count==0) | (test_count==1250)).all()
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"]:
import imageio
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
path = config["Dfa"]["data_dir"]+f'/camelyon17_type0/{dfa_ratio:g}pct'
attr = np.array(attrs)
imgs = [image for image in images]
for j in range(2):
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
pos_domains = np.isin(attr[:,1],pos)
ind = np.zeros_like(attr[:,1])
ind[pos_domains] = (attr[:,0] == 1)[pos_domains]
ind[~pos_domains] = (attr[:,0] == 0)[~pos_domains]
ind = np.nonzero(ind)[0]
for i in tqdm(range(len(ind))):
path_img = os.path.join(path, "align", f'{attr[ind[i]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
ind = np.zeros_like(attr[:,1])
ind[pos_domains] = (attr[:,0] != 1)[pos_domains]
ind[~pos_domains] = (attr[:,0] != 0)[~pos_domains]
ind = np.nonzero(ind)[0]
for i in tqdm(range(len(ind))):
path_img = os.path.join(path, "conflict", f'{attr[ind[i]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
#Testset
test_attr = np.array(test_attrs)
test_imgs = [image for image in test_images]
for j in range(2):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for i in tqdm(range(len(test_attr[:,0]))):
path_img = os.path.join(path, "test", f'{test_attr[i,0]}', f"{i}_{test_attr[i,0]}_{test_attr[i,1]}.png")
imageio.imwrite(path_img, test_imgs[i])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
image_path = os.path.join(camelyon_dir, "train", "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in images])
attr_path = os.path.join(camelyon_dir, "train", "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
image_path = os.path.join(camelyon_dir, "test", "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in test_images])
attr_path = os.path.join(camelyon_dir, "test", "attrs.npy")
np.save(attr_path, np.array(test_attrs).astype(np.uint8))
def make_camelyon17_type1(data_dir,skewed_ratio, config):
# Type1: Using data hospital 5 as testset as in original wilds
from wilds import get_dataset
from wilds.common.data_loaders import get_train_loader
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, ConcatDataset
from torch.utils.data.dataset import Dataset
# Load the full dataset, and download it if necessary
dataset = get_dataset(dataset="camelyon17", download=True, unlabeled=False, root_dir=data_dir)
# Get the training set
train_data = dataset.get_subset(
"train",
transform=transforms.Compose([transforms.ToTensor()])
)
idval_data = dataset.get_subset(
"id_val",
transform=transforms.Compose([transforms.ToTensor()])
)
oodval_data = dataset.get_subset(
"val",
transform=transforms.Compose([transforms.ToTensor()])
)
test_data = dataset.get_subset(
"test",
transform=transforms.Compose([transforms.ToTensor()])
)
full_train_data = ConcatDataset([train_data,idval_data,oodval_data]) # NOT test_data bcs this stays testset
data_loader = DataLoader(
full_train_data,
shuffle=True,
batch_size=1) # By shuffle all inputs from all datasets get randomly shuffled
test_loader = DataLoader(
test_data,
shuffle=True,
batch_size=1)
pos = np.zeros(2)
while len(np.unique(pos))==1:
pos = np.random.randint(0,4,2)
pos[pos>=2]+=1 # 2 is test hospital
#bias_label = np.zeros(4)
#bias_label[pos] += 1
#assert np.median(bias_label)==0.5
camelyon_dir = os.path.join(
data_dir, f"Camelyon17-Type1-Skewed{skewed_ratio}"
)
os.makedirs(camelyon_dir, exist_ok=True)
print(camelyon_dir)
attr_names = ["tumor", "hospital"]
attr_names_path = os.path.join(camelyon_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
os.makedirs(os.path.join(camelyon_dir, split), exist_ok=True)
bias_aligned_ratio = 1-skewed_ratio
if split == "train":
images, attrs = [], []
for idx, (x, y, metadata) in enumerate(tqdm(data_loader)):
include_align = np.random.binomial(1,bias_aligned_ratio,size=len(x))
include_confl = 1-include_align
pos_domains = np.isin(metadata[:,0],pos)
aligned = np.zeros_like(include_align)
aligned[pos_domains] = (y == 1)[pos_domains]
aligned[~pos_domains] = (y == 0)[~pos_domains]
aligned = aligned.astype(bool)
include_imgs = np.zeros_like(include_align)
include_imgs[aligned] = include_align[aligned]
include_imgs[~aligned] = include_confl[~aligned]
include_imgs = include_imgs.astype(bool)
if include_imgs==0:
continue
x = np.moveaxis(np.array(x), 1, 3)
x *= 255
images.append(np.array(x[include_imgs].squeeze(0)).astype(np.uint8))
attrs.append([y[include_imgs].squeeze(), metadata[:,0][include_imgs].squeeze()])
else:
images, attrs = [], []
for idx, (x, y, metadata) in enumerate(tqdm(test_loader)):
x = np.moveaxis(np.array(x), 1, 3)
x *= 255
images.append(np.array(x.squeeze(0)).astype(np.uint8))
attrs.append([y.squeeze(), metadata[:,0].squeeze()])
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"]:
import imageio
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
path = config["Dfa"]["data_dir"]+f'/camelyon17_type1/{dfa_ratio:g}pct'
attr = np.array(attrs)
imgs = [image for image in images]
if split == "train":
for j in range(2):
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
pos_domains = np.isin(attr[:,1],pos)
ind = np.zeros_like(attr[:,1])
ind[pos_domains] = (attr[:,0] == 1)[pos_domains]
ind[~pos_domains] = (attr[:,0] == 0)[~pos_domains]
ind = np.nonzero(ind)[0]
for i in tqdm(range(len(ind))):
path_img = os.path.join(path, "align", f'{attr[ind[i]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
ind = np.zeros_like(attr[:,1])
ind[pos_domains] = (attr[:,0] != 1)[pos_domains]
ind[~pos_domains] = (attr[:,0] != 0)[~pos_domains]
ind = np.nonzero(ind)[0]
for i in tqdm(range(len(ind))):
path_img = os.path.join(path, "conflict", f'{attr[ind[i]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
elif split == "test":
for j in range(2):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for i in tqdm(range(len(attr[:,0]))):
path_img = os.path.join(path, "test", f'{attr[i,0]}', f"{i}_{attr[i,0]}_{attr[i,1]}.png")
imageio.imwrite(path_img, imgs[i])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
else: raise NotImplementedError
image_path = os.path.join(camelyon_dir, split, "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in images])
attr_path = os.path.join(camelyon_dir, split, "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
def make_camelyon17_type2(data_dir,skewed_ratio,config):
# Type2: Same as type0 but using first and testset hospital. hospital 1 is mostly positive, hospital 0 is mostly negative
from wilds import get_dataset
from wilds.common.data_loaders import get_train_loader
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, ConcatDataset
from torch.utils.data.dataset import Dataset
# Load the full dataset, and download it if necessary
dataset = get_dataset(dataset="camelyon17", download=True, unlabeled=False, root_dir=data_dir)
# Get the training set
train_data = dataset.get_subset(
"train",
transform=transforms.Compose([transforms.ToTensor()])
)
idval_data = dataset.get_subset(
"id_val",
transform=transforms.Compose([transforms.ToTensor()])
)
oodval_data = dataset.get_subset(
"val",
transform=transforms.Compose([transforms.ToTensor()])
)
test_data = dataset.get_subset(
"test",
transform=transforms.Compose([transforms.ToTensor()])
)
full_train_data = ConcatDataset([train_data,idval_data,oodval_data,test_data])
data_loader = DataLoader(
full_train_data,
shuffle=True,
batch_size=1) # By shuffle all inputs from all datasets get randomly shuffled
pos = np.array([1])
camelyon_dir = os.path.join(
data_dir, f"Camelyon17-Type2-Skewed{skewed_ratio}"
)
os.makedirs(camelyon_dir, exist_ok=True)
print(camelyon_dir)
attr_names = ["tumor", "hospital"]
attr_names_path = os.path.join(camelyon_dir, "attr_names.pkl")
with open(attr_names_path, "wb") as f:
pickle.dump(attr_names, f)
for split in ["train", "test"]:
os.makedirs(os.path.join(camelyon_dir, split), exist_ok=True)
test_images, test_attrs = [], []
images, attrs = [], []
bias_aligned_ratio = 1-skewed_ratio
test_count = np.zeros((5,2)) # Count images in testset of all combinations
for idx, (x, y, metadata) in enumerate(tqdm(data_loader)):
# if not (metadata[:,0].item() in [0,2]): continue
# elif test_count[metadata[:,0].item(),y.item()]<1250: # 5'000 testset images
# x = np.moveaxis(np.array(x), 1, 3)
# x *= 255
# test_count[metadata[:,0].item(),y.item()]+=1
# if metadata[:,0].squeeze() == 2: # Changing bias label s.t. it's 0&1. Only for this setting!
# metadata[:,0] = 1
# test_images.append(np.array(x.squeeze(0)).astype(np.uint8))
# test_attrs.append([y.squeeze(), metadata[:,0].squeeze()])
if test_count[metadata[:,0].item(),y.item()]<1250 and (not metadata[:,0].item() in [0,2]): # 7'500 testset images
x = np.moveaxis(np.array(x), 1, 3)
x *= 255
test_count[metadata[:,0].item(),y.item()]+=1
test_images.append(np.array(x.squeeze(0)).astype(np.uint8))
test_attrs.append([y.squeeze(), metadata[:,0].squeeze()])
elif (not metadata[:,0].item() in [0,2]): continue
else:
if metadata[:,0].squeeze() == 2: # Changing bias label s.t. it's 0&1. Only for this setting!
metadata[:,0] = 1
include_align = np.random.binomial(1,bias_aligned_ratio,size=len(x))
include_confl = 1-include_align
pos_domains = np.isin(metadata[:,0],pos)
aligned = np.zeros_like(include_align)
aligned[pos_domains] = (y == 1)[pos_domains]
aligned[~pos_domains] = (y == 0)[~pos_domains]
aligned = aligned.astype(bool)
include_imgs = np.zeros_like(include_align)
include_imgs[aligned] = include_align[aligned]
include_imgs[~aligned] = include_confl[~aligned]
include_imgs = include_imgs.astype(bool)
if include_imgs==0:
continue
x = np.moveaxis(np.array(x), 1, 3)
x *= 255
images.append(np.array(x[include_imgs].squeeze(0)).astype(np.uint8))
attrs.append([y[include_imgs].squeeze(), metadata[:,0][include_imgs].squeeze()])
assert ((test_count==0) | (test_count==1250)).all()
# For Dfa reproducibility: Separately save data as they expect it. Careful this is hardcoded! Only uses Severity4!
if config["Dfa"]["dataset"]:
import imageio
from distutils.dir_util import copy_tree
dfa_ratio = skewed_ratio * 100
path = config["Dfa"]["data_dir"]+f'/camelyon17_type2/{dfa_ratio:g}pct'
attr = np.array(attrs)
imgs = [image for image in images]
for j in range(2):
os.makedirs(os.path.join(path, "align", f'{j}'), exist_ok=True)
for f in os.listdir(os.path.join(path, "align", f'{j}')): # Remove already existing files
os.remove(os.path.join(os.path.join(path, "align", f'{j}'), f))
os.makedirs(os.path.join(path, "conflict", f'{j}'), exist_ok=True)
for f in os.listdir(os.path.join(path, "conflict", f'{j}')): # Remove already existing files
os.remove(os.path.join(os.path.join(path, "conflict", f'{j}'), f))
pos_domains = np.isin(attr[:,1],pos)
ind = np.zeros_like(attr[:,1])
ind[pos_domains] = (attr[:,0] == 1)[pos_domains]
ind[~pos_domains] = (attr[:,0] == 0)[~pos_domains]
ind = np.nonzero(ind)[0]
for i in tqdm(range(len(ind))):
path_img = os.path.join(path, "align", f'{attr[ind[i]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
ind = np.zeros_like(attr[:,1])
ind[pos_domains] = (attr[:,0] != 1)[pos_domains]
ind[~pos_domains] = (attr[:,0] != 0)[~pos_domains]
ind = np.nonzero(ind)[0]
for i in tqdm(range(len(ind))):
path_img = os.path.join(path, "conflict", f'{attr[ind[i]][0]}', f"{i}_{attr[ind[i],0]}_{attr[ind[i],1]}.png")
imageio.imwrite(path_img, imgs[ind[i]])
#Testset
test_attr = np.array(test_attrs)
test_imgs = [image for image in test_images]
for j in range(2):
os.makedirs(os.path.join(path, "test", f'{j}'), exist_ok=True)
for f in os.listdir(os.path.join(path, "test", f'{j}')): # Remove already existing files
os.remove(os.path.join(os.path.join(path, "test", f'{j}'), f))
for i in tqdm(range(len(test_attr[:,0]))):
path_img = os.path.join(path, "test", f'{test_attr[i,0]}', f"{i}_{test_attr[i,0]}_{test_attr[i,1]}.png")
imageio.imwrite(path_img, test_imgs[i])
#Create Pseudovalidation set as it's never used
os.makedirs(os.path.join(path, "valid"), exist_ok=True)
copy_tree(os.path.join(path, "test"), os.path.join(path, "valid"))
image_path = os.path.join(camelyon_dir, "train", "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in images])
attr_path = os.path.join(camelyon_dir, "train", "attrs.npy")
np.save(attr_path, np.array(attrs).astype(np.uint8))
image_path = os.path.join(camelyon_dir, "test", "images.npy")
np.save(image_path, [np.array(image).astype(np.uint8) for image in test_images])
attr_path = os.path.join(camelyon_dir, "test", "attrs.npy")
np.save(attr_path, np.array(test_attrs).astype(np.uint8))
def make(make_target):
# configuration
with open("config.yaml", "r") as f:
config = yaml.safe_load(f)
# manual overwriting of configuration for scripts
# initialize parser
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default=None, help = "Choice of dataset")
parser.add_argument("--random_state", default=None, type=int, help = "Random Seed")
parser.add_argument("--Dfa_dataset", default=None, type=bool, help = "Create dataset for Dfa too?")
args = parser.parse_args()
# replace all specified arguments
updateable = [config["data"]["dataset"],config["random_state"],config["Dfa"]["dataset"]]
values = []
for i,v in enumerate(vars(args).values()):
if v != None:
values.append(v)
print("Overwriting configuration")
else: values.append(updateable[i])
config["data"]["dataset"], config["random_state"], config["Dfa"]["dataset"] = values
if make_target == None:
make_target = config["data"]["dataset"]
data_dir = config["user"]["data_dir"]
random_state = config["random_state"]
# Reproducibility
set_seed(random_state)
for skewed_ratio in [2e-1, 1e-1, 5e-2, 2e-2, 1e-2, 5e-3]:
#for severity in [1, 2, 3, 4]: This if from LfF but we only look at severity 4 here!
for severity in [4]:
if make_target == "colored_mnist":
make_colored_mnist(data_dir=data_dir, skewed_ratio=skewed_ratio, severity=severity, config=config)
if make_target == "cifar10_type0":
make_corrupted_cifar10(
data_dir=data_dir,
corruption_names=[
"Snow",
"Frost",
"Fog",
"Brightness",
"Contrast",
"Spatter",
"Elastic",
"JPEG",
"Pixelate",
"Saturate",
],
skewed_ratio=skewed_ratio,
severity=severity,
config=config,
postfix="0"
)
if make_target == "cifar10_type1":
make_corrupted_cifar10(
data_dir=data_dir,
corruption_names=[
"Gaussian Noise",
"Shot Noise",
"Impulse Noise",
"Speckle Noise",
"Gaussian Blur",
"Defocus Blur",
"Glass Blur",
"Motion Blur",
"Zoom Blur",
"Original",
],
skewed_ratio=skewed_ratio,
severity=severity,
config=config,
postfix="1"
)
if make_target == "rotated_mnist":
make_rotated_mnist(data_dir=data_dir, skewed_ratio=skewed_ratio, severity=severity, config=config)
if make_target == "shifted_mnist":
make_shifted_mnist(data_dir=data_dir, skewed_ratio=skewed_ratio, severity=severity, config=config)
if make_target == "camelyon17_type0":
make_camelyon17_type0(data_dir=data_dir, skewed_ratio=skewed_ratio, config=config)
if make_target == "camelyon17_type1":
make_camelyon17_type1(data_dir=data_dir, skewed_ratio=skewed_ratio, config=config)
if make_target == "camelyon17_type2":
make_camelyon17_type2(data_dir=data_dir, skewed_ratio=skewed_ratio, config=config)
make(make_target=None)
| 41,294 | 46.194286 | 133 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/util.py
|
import os
import random
import numpy as np
import torch
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import wandb
from torchvision.transforms.functional import normalize
def set_seed(seed):
"""
Set all random seeds
Args:
seed (int): integer for reproducible experiments
"""
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def get_optimizer(model, config):
"""Resolve the optimizer according to the configs
Args:
model: model on which the optimizer is applied on
config: configuration dict
returns:
optimizer
"""
if config["optimizer"]["name"] == "SGD":
optimizer = torch.optim.SGD(
model.parameters(),
lr=config["optimizer"]["lr"],
momentum=config["optimizer"]["momentum"],
weight_decay=config["optimizer"]["weight_decay"],
)
elif config["optimizer"]["name"] == "Adam":
optimizer = torch.optim.Adam(
model.parameters(),
lr=config["optimizer"]["lr"],
betas=config["optimizer"]["betas"],
weight_decay=config["optimizer"]["weight_decay"],
)
else: raise NotImplementedError("Optimizer not implemented.")
return optimizer
class GeneralizedCELoss(nn.Module):
def __init__(self, config):
super(GeneralizedCELoss, self).__init__()
self.q = config["loss"]["GCE_q"]
def forward(self, logits, targets):
p = F.softmax(logits, dim=1)
if np.isnan(p.mean().item()):
raise NameError("GCE_p")
Yg = torch.gather(p, 1, torch.unsqueeze(targets, 1))
# modify gradient of cross entropy
loss_weight = (Yg.squeeze().detach()**self.q) # Do we really need *self.q? I think like now is correct.
if np.isnan(Yg.mean().item()):
raise NameError("GCE_Yg")
# note that we don't return the average but the loss for each datum separately
loss = F.cross_entropy(logits, targets, reduction="none") * loss_weight
return loss
# evaluation code for testset
def evaluate(model, test_loader, device):
model.eval()
test_loader = tqdm(test_loader, position=0, leave=False)
total_correct_aligned, total_num_aligned = 0, 0
total_correct_conflicting, total_num_conflicting = 0, 0
for idx, (data_index, data, attr) in enumerate(test_loader):
label = attr[:, 0]
bias = attr[:, 1]
data = data.to(device)
label = label.to(device)
bias = bias.to(device)
with torch.no_grad():
# For evaluation take mean directly to not unnecessarily introduce variance
parameters = model.encoder(data)
assert len(parameters) == 2 # No new outputs of encoders
pred = model.predict(parameters[0]).argmax(1)
correct = (pred == label).long()
aligned = (label == bias).long()
total_correct_aligned += correct[aligned==True].sum()
total_correct_conflicting += correct[aligned==False].sum()
total_num_aligned += correct[aligned==True].size(0)
total_num_conflicting += correct[aligned==False].size(0)
acc_aligned = total_correct_aligned/float(total_num_aligned)
acc_conflicting = total_correct_conflicting/float(total_num_conflicting)
acc = (total_correct_aligned+total_correct_conflicting)/(float(total_num_aligned)+float(total_num_conflicting))
model.train()
return acc_aligned, acc_conflicting, acc
def evaluate_batch(logit, attr, loss):
label = attr[:, 0]
bias = attr[:, 1]
pred = logit.data.argmax(1)
correct = (pred == label).long()
aligned = (label == bias).long()
aligned_len = aligned.sum()
conflicting_len = (1-aligned).sum()
batch_len = label.size(0)
assert(batch_len == aligned_len + conflicting_len)
corr_aligned = correct[aligned==True].sum()
corr_conflicting = correct[aligned==False].sum()
corr = correct.sum()
assert(corr == corr_aligned + corr_conflicting)
loss_aligned = loss[aligned==True].mean()
loss_conflicting = loss[aligned==False].mean()
loss = loss.mean()
return corr_aligned, corr_conflicting, corr, loss_aligned, loss_conflicting, loss, aligned_len, conflicting_len, batch_len
def save_img(model_s, model_b, data_loader, config, device):
# Logging image
set_seed(config["random_state"])
model_s.eval()
model_b.eval()
sample1, sample2 = random.sample(list(data_loader), 2)
data = torch.stack((sample1[1][0],sample2[1][0]))
data = data.to(device)
z_s, logits_s, mean_s, logvar_s = model_s(data)
z_b, logits_b, mean_b, logvar_b = model_b(data)
mean = torch.cat((mean_s, mean_b), dim=1)
swap1 = torch.cat((mean_s[0],mean_b[1]))
swap2 = torch.cat((mean_s[1],mean_b[0]))
x_reconstructed = model_s.reconstruct(mean)
mean[0] = swap1
mean[1] = swap2
swap_reconstr = model_s.reconstruct(mean)
if config["data"]["dataset"] == "colored_mnist":
data = data.view(2,3,28,28)
x_reconstructed = x_reconstructed.view(2,3,28,28)
swap_reconstr = swap_reconstr.view(2,3,28,28)
elif (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
import torchvision
save_img = torchvision.utils.make_grid([data[0],x_reconstructed[0],swap_reconstr[0],
data[1],x_reconstructed[1],swap_reconstr[1]],nrow=3)
save_img = wandb.Image(save_img, caption="Left: Original image, Middle: Reconstructed image, Right: Keeping signal, swapping bias")
wandb.log({"Visualization": save_img})
def save_img_adv(model_s, model_b, data_loader, epoch, config, device, training=False):
# Logging image
model_s.eval()
model_b.eval()
rand_batches = random.sample(list(data_loader), 5)
if training == True:
data_batches = [item[2] for item in rand_batches] # 1 higher index because we also have subsetindex in trainset
attr = [item[3] for item in rand_batches]
data = torch.stack([item[0] for item in data_batches])
label = torch.stack([item[0,0] for item in attr])
else:
data_batches = [item[1] for item in rand_batches]
attr = [item[2] for item in rand_batches]
data_unpacked = list()
attr_unpacked = list()
for index, item in enumerate(attr):
idx = torch.where(item[:,0] == item[:,1])[0][0]
data_unpacked.append(data_batches[index][idx])
attr_unpacked.append(item[idx])
data = torch.stack(data_unpacked)
label = torch.stack(attr_unpacked)[:,0]
data = data.to(device)
label = label.to(device)
assert data.shape[0:2] ==torch.Size([5, 3])
z_s, logits_s, mean_s, logvar_s = model_s(data)
z_b, logits_b, mean_b, logvar_b = model_b(data)
attack = DeepFool(model_b.classifier,device,steps=10,overshoot=config["perturb"]["overshoot"])
mean_b_adv, label_adv = attack.forward(mean_b, label)
mean = torch.cat((mean_s, mean_b), dim=1)
mean_adv = torch.cat((mean_s, mean_b_adv), dim=1)
x_reconstructed = model_s.reconstruct(mean)
x_adv_reconstr = model_s.reconstruct(mean_adv)
if config["data"]["dataset"] == "colored_mnist":
data = data.view(5,3,28,28)
x_reconstructed = x_reconstructed.view(5,3,28,28)
x_adv_reconstr = x_adv_reconstr.view(5,3,28,28)
elif (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
import torchvision
imgs = torch.cat((data, x_reconstructed,x_adv_reconstr))
save_img = torchvision.utils.make_grid(imgs,nrow=5)
save_img = wandb.Image(save_img, caption="Top: Original image, Middle: Reconstructed image, Bottom: Reconstructed adv. perturbation")
if training == True:
wandb.log({"Adversarial Visualization Training": save_img, "epoch": epoch})
else:
wandb.log({"Adversarial Visualization": save_img, "epoch": epoch})
model_s.train()
model_b.train()
## DeepFool is adapted from https://adversarial-attacks-pytorch.readthedocs.io/en/latest/_modules/torchattacks/attacks/deepfool.html
class DeepFool:
r"""
'DeepFool: A Simple and Accurate Method to Fool Deep Neural Networks'
[https://arxiv.org/abs/1511.04599]
Distance Measure : L2
Arguments:
model (nn.Module): model to attack.
steps (int): number of steps. (Default: 50)
overshoot (float): parameter for enhancing the noise. (Default: 0.02)
Adaptation Note: This algorithm is designed for the purpose of this work.
Therefore it does not work with bounded images but unbounded latent dimensions.
We still call the input "image" for consistency with the original algorithm.
Additionally the algorithm is optimized by approximating the closest hyperplane by the second highest predicted class.
This is in order to reduce computational complexity 50fold as it allows parallelizing.
In my tests the approximation gave the optimal(Oracle being original DeepFool) solution in 4/5 of cases.
Examples::
>>> attack = DeepFool(model, steps=50, overshoot=0.02)
>>> adv_images = attack(images, labels)
"""
def __init__(self, model, device, steps=50, overshoot=0.02):
self.model = model
self.steps = steps
self.overshoot = overshoot
self.device = device
self.num_classes = model.num_classes
def forward(self, images, labels, target_true_label=False):
adv_images = images.clone().detach().to(self.device)
adv_images.requires_grad = True
labels = labels.clone().detach().to(self.device)
batch_size = len(adv_images)
correct = torch.tensor([True]*batch_size).to(self.device)
if target_true_label:
target_label = labels
else:
target_labels = torch.ones([batch_size,self.num_classes])/(self.num_classes-1)
target_labels = target_labels.to(self.device)
for i in range(self.num_classes):
target_labels[labels == i,i] = 0
target_label = torch.multinomial(target_labels,1).squeeze(-1)
curr_steps = 0
# Note that with this implementation it's possible that the target label switches between iterations
while (True in correct) and (curr_steps < self.steps):
if adv_images.grad is not None:
adv_images.grad.zero_()
logits = self.model(adv_images[correct]) # Forward pass only for correct classifications
values, predicted = torch.topk(logits, 2, dim=1) # Predicted label
#target_label[correct] = predicted[:,0]
correct_new = (predicted[:,0]!=target_label[correct])
#correct_new = (predicted[:,0]==labels[correct])
# Some indexing to backprop only wrt correctly classified labels
#diff = values[:,1] - logits.gather(1,labels[correct].unsqueeze(1)).squeeze(-1) # second highest label as target
diff = logits.gather(1,target_label[correct].unsqueeze(1)).squeeze(-1) - values[:,0] # target label as target
diff[~correct_new] = 0
diff_backprop = diff.sum() # "Trick" to backprop wrt all inputs: Summing individual differences
diff_backprop.backward()
delta = (torch.abs(diff[correct_new])/(torch.norm(adv_images.grad[correct][correct_new], p=2,dim=tuple(range(adv_images.ndim)[1:]))**2+1e-8)).view(-1,*(1,)*(adv_images.dim()-1)) * adv_images.grad[correct][correct_new]
assert not torch.isnan(delta).any()
correct[correct.clone()] = correct_new
with torch.no_grad():
adv_images[correct] = (adv_images[correct] + (1+self.overshoot)*delta).detach()
curr_steps += 1
return adv_images, target_label
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=3, verbose=False, delta=0, path_s = 'checkpoint.pt', path_b = None, trace_func=print, saveEveryEpoch=False):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
trace_func (function): trace print function.
Default: print
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.path_s = path_s
self.path_b = path_b
self.trace_func = trace_func
self.saveEveryEpoch = saveEveryEpoch
def __call__(self, val_loss, model_s, model_b, epoch):
score = -val_loss
if self.saveEveryEpoch:
path_s = self.path_s[:-3] + "_epoch_" + str(epoch) + ".pt"
path_b = self.path_b[:-3] + "_epoch_" + str(epoch) + ".pt"
self.save_checkpoint(val_loss, model_s, model_b, path_s, path_b)
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model_s, model_b, self.path_s, self.path_b)
elif score < self.best_score + self.delta:
self.counter += 1
self.trace_func(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
self.trace_func(f'Early Stopping biased model at epoch {self.counter}')
else:
self.best_score = score
self.save_checkpoint(val_loss, model_s, model_b, self.path_s, self.path_b)
self.counter = 0
def save_checkpoint(self, val_loss, model_s, model_b, path_s, path_b):
"""Saves model when validation loss decreases."""
if self.verbose:
self.trace_func(f'Biased val loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving models...\n')
torch.save(model_s.state_dict(), path_s)
torch.save(model_b.state_dict(), path_b)
self.val_loss_min = val_loss
def get_reconst_loss(model_s, model_b, data_loader, device, config, mode="train"):
"""Main test loop, where the network is tested in the end
Args:
model: our pytorch model
val_loader: loader with the validation data
device: current device (cpu or gpu)
"""
# testing the model
model_s.eval()
model_b.eval()
data_loader = tqdm(data_loader, position=0, leave=False)
total_loss = 0
if mode=="train":
for idx, (subset_idx, full_idx, data, attr) in enumerate(data_loader):
label = attr[:, 0]
data = data.to(device)
label = label.to(device)
with torch.no_grad():
# For evaluation take mean directly to not unnecessarily introduce variance
parameters = model_s.encoder(data)
parameters_b = model_b.encoder(data)
assert len(parameters) == 2 # No new outputs of encoders
x_reconst = model_s.decoder(torch.cat((parameters[0],parameters_b[0]),dim=1))
if config["data"]["dataset"] == "colored_mnist":
reconst_loss = F.binary_cross_entropy(x_reconst, data, reduction='none').sum(dim=(1,2,3))
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
elif (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data_backtransformed = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
reconst_loss = F.mse_loss(x_reconst, data_backtransformed, reduction='none').sum(dim=(1,2,3))
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
elif (config["data"]["dataset"] == "camelyon17_type0" or config["data"]["dataset"] == "camelyon17_type1" or config["data"]["dataset"] == "camelyon17_type2"):
reconst_loss = F.mse_loss(x_reconst, data, reduction='none').sum(dim=(1,2,3))
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
else: raise NotImplementedError("reconst_loss")
total_loss += reconst_loss.sum()
elif mode=="test":
for idx, (full_idx, data, attr) in enumerate(data_loader):
label = attr[:, 0]
data = data.to(device)
label = label.to(device)
with torch.no_grad():
# For evaluation take mean directly to not unnecessarily introduce variance
parameters = model_s.encoder(data)
parameters_b = model_b.encoder(data)
assert len(parameters) == 2 # No new outputs of encoders
x_reconst = model_s.decoder(torch.cat((parameters[0],parameters_b[0]),dim=1))
if config["data"]["dataset"] == "colored_mnist":
reconst_loss = F.binary_cross_entropy(x_reconst, data, reduction='none').sum(dim=(1,2,3))
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
elif (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data_backtransformed = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
reconst_loss = F.mse_loss(x_reconst, data_backtransformed, reduction='none').sum(dim=(1,2,3))
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
elif (config["data"]["dataset"] == "camelyon17_type0" or config["data"]["dataset"] == "camelyon17_type1" or config["data"]["dataset"] == "camelyon17_type2"):
reconst_loss = F.mse_loss(x_reconst, data, reduction='none').sum(dim=(1,2,3))
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
else: raise NotImplementedError("reconst_loss")
total_loss += reconst_loss.sum()
total_loss /= len(data_loader.iterable.dataset)
print("total reconstruction loss of VAE: {:8.6f}".format(total_loss))
return total_loss
def early_stop(model_s, model_b, val_loader_biased, stopping_criteria, scheduler_s, scheduler_b, epoch, device, config):
"""Main test loop, where the network is tested in the end
Args:
model: our pytorch model
val_loader: loader with the validation data
device: current device (cpu or gpu)
"""
# testing the model
model_b.eval()
model_s.eval()
val_loader_biased = tqdm(val_loader_biased, position=0, leave=False)
total_loss = 0
total_reconst_loss = 0
for idx, (subset_idx, full_idx, data, attr) in enumerate(val_loader_biased):
label = attr[:, 0]
data = data.to(device)
label = label.to(device)
with torch.no_grad():
# For evaluation take mean directly to not unnecessarily introduce variance
parameters_b = model_b.encoder(data)
assert len(parameters_b) == 2 # No new outputs of encoders
logits_b = model_b.predict(parameters_b[0])
parameters_s = model_s.encoder(data)
assert len(parameters_s) == 2 # No new outputs of encoders
logits_s = model_s.predict(parameters_s[0])
loss_s = F.cross_entropy(logits_s, label,reduction="none")
loss_b = F.cross_entropy(logits_b, label,reduction="none")
mean = torch.cat((parameters_s[0], parameters_b[0]), dim=1)
logvar = torch.cat((parameters_s[1], parameters_b[1]), dim=1)
x_reconst = model_s.reconstruct(mean)
p = F.softmax(logits_b, dim=1)
if np.isnan(p.mean().item()):
raise NameError("GCE_p")
Yg = torch.gather(p, 1, torch.unsqueeze(label, 1))
# modify gradient of cross entropy
loss_weight = (Yg.squeeze().detach()) # Do we really need *self.q? I think like now is correct.
if np.isnan(Yg.mean().item()):
raise NameError("GCE_Yg")
# note that we don't return the average but the loss for each datum separately
loss_b = (loss_b * loss_weight**config["loss"]["GCE_q"])
loss_s = (loss_s * (1-loss_weight)**config["loss"]["GCE_q"])
# VAE losses
# Compute reconstruction loss and kl divergence for both encoders together
# Sum over dimensions, average over batch to have loss weighting hyperparameters being independent of batch size
if (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data_backtransformed = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
reconst_loss = F.mse_loss(x_reconst, data_backtransformed, reduction='none').sum(dim=(1,2,3))
elif config["data"]["dataset"] == "colored_mnist":
reconst_loss = F.binary_cross_entropy(x_reconst, data, reduction='none').sum(dim=(1,2,3))
elif (config["data"]["dataset"] == "camelyon17_type0" or config["data"]["dataset"] == "camelyon17_type1"or config["data"]["dataset"] == "camelyon17_type2"):
reconst_loss = F.mse_loss(x_reconst, data, reduction='none').sum(dim=(1,2,3))
else: raise NotImplementedError("reconst_loss")
kl_div = - 0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp(),dim=1)
# Rescaling both VAE losses in order to be invariant to image resolution in hyperparametertuning.
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
kl_div /= x_reconst.view(len(x_reconst),-1).shape[1]
reconst_loss *=config["loss"]["reconst_weight"]
total_loss = total_loss + loss_s.sum() + loss_b.sum() + reconst_loss.sum() + kl_div.sum()
total_reconst_loss += reconst_loss.sum()
total_loss /= len(val_loader_biased.iterable.dataset)
total_reconst_loss /= len(val_loader_biased.iterable.dataset)
# scheduling
if config["optimizer"]["lr_decay"]:
scheduler_s.step(total_loss)
scheduler_b.step(total_loss)
wandb.log({"loss_biasedval": total_loss, 'reconstruction_loss_biasedval': total_reconst_loss, "epoch": epoch})
print("total validation loss {:8.3f}".format(total_loss))
stopping_criteria(total_loss, model_s, model_b, epoch)
model_s.train()
model_b.train()
return stopping_criteria.early_stop
def capture_dataset(data_loader,config):
imgs = []
for idx, (subset_idx, full_idx, data, attr) in enumerate(data_loader):
if (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
label = attr[:, 0]
bias = attr[:,1]
aligned = (label==bias)
data_aligned = data[aligned]
label_aligned = label[aligned]
for j in range(3):
for i in np.unique(label_aligned):
imgs.append(data_aligned[label_aligned==i][j])
import torchvision
save_img = torchvision.utils.make_grid(imgs,nrow=len(np.unique(label_aligned)))
save_img = wandb.Image(save_img)
wandb.log({"Dataset": save_img})
break
def bias_visualization(model_s, model_b, data_loader, config, device):
# Visualizing Bias.
model_s.eval()
model_b.eval()
rand_batches = random.sample(list(data_loader), 5)
data_batches = [item[2] for item in rand_batches]
attr = [item[3] for item in rand_batches]
data_unpacked = list()
attr_unpacked = list()
for index, item in enumerate(attr):
# Extract 5 bias-conflicting images (w/o using bias label as it's theoretically unknown)
batch = data_batches[index].to(device)
item = item.to(device)
parameters_b = model_b.encoder(batch)
assert len(parameters_b) == 2 # No new outputs of encoders
pred_b = model_b.predict(parameters_b[0]).argmax(1)
correct_b = (pred_b == item[:,0]).long()
parameters_s = model_s.encoder(batch)
assert len(parameters_s) == 2 # No new outputs of encoders
pred_s = model_s.predict(parameters_s[0]).argmax(1)
correct_s = (pred_s == item[:,0]).long()
bias_aligned = (correct_s*correct_b).bool()
data_unpacked.append(data_batches[index][bias_aligned.cpu()][0])
attr_unpacked.append(item[bias_aligned.cpu()][0])
data = torch.stack(data_unpacked)
label = torch.stack(attr_unpacked)[:,0]
data = data.to(device)
label = label.to(device)
assert data.shape[0:2] ==torch.Size([5, 3])
z_s, logits_s, mean_s, logvar_s = model_s(data)
z_b, logits_b, mean_b, logvar_b = model_b(data)
attack = DeepFool(model_b.classifier,device,steps=20,overshoot=config["perturb"]["overshoot"])
mean_b_adv, label_adv = attack.forward(mean_b, label)
mean = torch.cat((mean_s, mean_b), dim=1)
mean_adv = torch.cat((mean_s, mean_b_adv), dim=1)
x_reconstructed = model_s.reconstruct(mean)
x_adv_reconstr = model_s.reconstruct(mean_adv)
##### FOR DFA THEN FIND SAMPLES WHERE BIAS PREDICTS LABEL OF DATA#####
# Create bias-aligned samples by finding samples whose bias dimensions makes biased classifier predict correct label.
j = 0
mean_b_swap = torch.zeros_like(mean_b)
while j<5:
rand_batch = random.sample(list(data_loader), 1)
batch_data = rand_batch[0][2].to(device)
#batch_label = rand_batch[0][2][:,0].to(device)
parameters_b = model_b.encoder(batch_data)
assert len(parameters_b) == 2 # No new outputs of encoders
pred_b = model_b.predict(parameters_b[0]).argmax(1)
corr_bias = (pred_b == label_adv[j])
if corr_bias.sum()>0:
mean_b_swap[j] = parameters_b[0][corr_bias][0]
j+=1
mean_swap = torch.cat((mean_s, mean_b_swap), dim=1)
x_swap_reconstr = model_s.reconstruct(mean_swap)
if config["data"]["dataset"] == "colored_mnist":
data = data.view(5,3,28,28)
x_reconstructed = x_reconstructed.view(5,3,28,28)
x_adv_reconstr = x_adv_reconstr.view(5,3,28,28)
x_swap_reconstr = x_swap_reconstr.view(5,3,28,28)
elif (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
import torchvision
imgs = torch.cat((data, x_reconstructed,x_adv_reconstr))
save_img = torchvision.utils.make_grid(imgs,nrow=5)
save_img = wandb.Image(save_img, caption="Top: Original image, Middle: Reconstructed bias-conflicting image, Bottom: Reconstructed bias-aligned image by adv. perturbation")
wandb.log({"Adversarial Visualization Ours": save_img})
imgs = torch.cat((data, x_reconstructed,x_swap_reconstr))
save_img = torchvision.utils.make_grid(imgs,nrow=5)
save_img = wandb.Image(save_img, caption="Top: Original image, Middle: Reconstructed bias-conflicting image, Bottom: Reconstructed bias-aligned image by swapping")
wandb.log({"Adversarial Visualization DisEnt": save_img})
model_s.train()
model_b.train()
| 28,920 | 45.646774 | 229 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/train.py
|
import os
import yaml
import argparse
import wandb
import time
import sys
from tqdm import tqdm
import numpy as np
from uuid import uuid4
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.transforms.functional import normalize
from data.util import get_dataset, IdxDataset, IdxDataset2
from module.util import get_model
from util import set_seed, get_optimizer, evaluate, \
GeneralizedCELoss, evaluate_batch, save_img, save_img_adv, \
EarlyStopping, early_stop, capture_dataset, bias_visualization
def main():
# configuration
with open("config.yaml", "r") as f:
config = yaml.safe_load(f)
# manual overwriting of configuration for scripts
# initialize parser
print(sys.argv)
parser = argparse.ArgumentParser()
parser.add_argument("--name", default=None, help = "Name of experiment")
parser.add_argument("--bias_conflicting_perc", default=None, type=float, help = "Percentage of bias conflicting samples in dataset")
parser.add_argument("--severity", default=None, type=int, help = "Severity of bias")
parser.add_argument("--dataset", default=None, help = "Choice of dataset")
parser.add_argument("--model_tag", default=None, help = "Choice of model")
parser.add_argument("--q", default=None, type=float, help = "q for GCE loss")
parser.add_argument("--random_state", default=None, type=int, help="Random state for seed")
parser.add_argument("--results_filename", default=None, help="Name of file to store results")
parser.add_argument("--VAE_weight", default=None, type=float, help="Weight of KL&Reconstruction loss")
parser.add_argument("--reconst_weight", default=None, type=float, help="Weight of Reconstruction loss")
args = parser.parse_args()
# Replace all specified arguments
updateable = [config["name"],config["data"]["bias_conflicting_perc"],config["data"]["severity"],config["data"]["dataset"],config["model"]["tag"],config["loss"]["GCE_q"],config["random_state"],config["results_filename"],config["loss"]["VAE_weight"],config["loss"]["reconst_weight"]]
values = []
for i,v in enumerate(vars(args).values()):
if v != None:
values.append(v)
print("Overwriting configuration")
else: values.append(updateable[i])
config["name"],config["data"]["bias_conflicting_perc"],config["data"]["severity"],config["data"]["dataset"],config["model"]["tag"],config["loss"]["GCE_q"],config["random_state"],config["results_filename"],config["loss"]["VAE_weight"],config["loss"]["reconst_weight"] = values
# configuration sanity check
if not (
(config["data"]["dataset"] == "colored_mnist" and config["model"]["tag"] == "MLP") or
(config["data"]["dataset"] == "colored_mnist" and config["model"]["tag"] == "MLP_VAE") or
(config["data"]["dataset"] == "cifar10_type0" and config["model"]["tag"] == "ResNet20") or
(config["data"]["dataset"] == "cifar10_type1" and config["model"]["tag"] == "ResNet20") or
(config["data"]["dataset"] == "cifar10_type1" and config["model"]["tag"] == "ResNet20") or
(config["data"]["dataset"] == "cifar10_type0" and config["model"]["tag"] == "ResNet_VAE") or
(config["data"]["dataset"] == "cifar10_type1" and config["model"]["tag"] == "ResNet_VAE") or
(config["data"]["dataset"] == "camelyon17_type0" and config["model"]["tag"] == "ResNet_VAE") or
(config["data"]["dataset"] == "camelyon17_type1" and config["model"]["tag"] == "ResNet_VAE")
):
print("Are you sure you want to use the dataset "+config["data"]["dataset"]+" with the model "+ config["model"]["tag"]+"?")
# define variables from config
batch_size = config["train"]["batch_size"]
epochs = config["train"]["epochs"]
random_state = config["random_state"]
# wandb support
mode = "online" if config['wandb_logging'] else "disabled"
#wandb.login(key="e34806ecc80c88cfb408eda2e5848fa494272f15")
wandb.init(
project="Signalisharder",
entity="username",
config=config,
mode=mode
)
wandb.run.name = wandb.run.name.split("-")[-1] + "-"+config['name']
#wandb.run.save()
print("Running experiment: {}".format(config["name"]))
# set seed
set_seed(random_state)
# set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"\nUsing device: {device}")
# load dataset
train_dataset = get_dataset(
config,
dataset_split="train"
)
test_dataset = get_dataset(
config,
dataset_split="eval"
)
# Adapt train dataset twice to get indices of subset as well as full dataset
train_dataset = IdxDataset(train_dataset)
test_dataset = IdxDataset(test_dataset)
train_dataset_splitted = IdxDataset2(train_dataset)
train_loader = DataLoader(
train_dataset_splitted,
batch_size=batch_size,
shuffle=True,
num_workers=1,
pin_memory=True,
drop_last=True
)
# train_loader = DataLoader(
# train_dataset,
# batch_size=batch_size,
# shuffle=True,
# #num_workers=16,
# pin_memory=True,
# )
test_loader = DataLoader(
test_dataset,
batch_size=256,
shuffle=False,
num_workers=1,
pin_memory=True,
)
# define signal and bias model and model for approximating disentaglement loss
model_s = get_model(config).to(device)
model_b = get_model(config).to(device)
# Decoder of bias network not used
for p in model_b.decoder.parameters():
p.requires_grad = False
# define optimizer
optimizer_s = get_optimizer(model_s, config)
optimizer_b = get_optimizer(model_b, config)
# define scheduler
if config["model"]["tag"] == "MLP_VAE":
patience = config["early_stop"]["patience_MLP"]
elif config["model"]["tag"] == "ResNet_VAE":
patience = config["early_stop"]["patience_ResNet"]
else: raise NotImplementedError("Patience")
scheduler_s = optim.lr_scheduler.ReduceLROnPlateau(optimizer_s, verbose=True, patience = int(patience/2)-1, factor=config["optimizer"]["lr_gamma"],threshold=0.00001)
scheduler_b = optim.lr_scheduler.ReduceLROnPlateau(optimizer_b, verbose=True, patience = int(patience/2)-1, factor=config["optimizer"]["lr_gamma"],threshold=0.00001)
# define loss function
criterion_s = nn.CrossEntropyLoss(reduction='none')
criterion_b = GeneralizedCELoss(config)
# early stopping
os.makedirs("./saved_models/ours_s/", exist_ok=True)
os.makedirs("./saved_models/ours_b/", exist_ok=True)
timestamp = time.strftime(' %d-%b-%Y_%H:%M', time.localtime())
id = str(uuid4())
save_path_s = "./saved_models/ours_s/" + config["name"] + timestamp + id + ".pt"
save_path_b = "./saved_models/ours_b/" + config["name"] + timestamp + id + ".pt"
stopping_criteria_vae = EarlyStopping(patience=patience, verbose=True, path_s = save_path_s, path_b = save_path_b, delta=0.00001) # Same as scheduler
early_stop_v = False
# training & validation & test
for epoch in range(epochs):
if early_stop_v == False:
train_dataset_splitted.make_train()
train(model_s, model_b, train_loader, early_stop_v, optimizer_s, optimizer_b, criterion_s, criterion_b, epoch, epochs, device, config)
train_dataset_splitted.make_biased_val() # Biased validation set to determine early stopping of vae
early_stop_v = early_stop(model_s, model_b, train_loader, stopping_criteria_vae, scheduler_s, scheduler_b, epoch, device, config)
if early_stop_v == True: # Reverse models to early stopping points
model_s.load_state_dict(torch.load(save_path_s, map_location=device))
model_b.load_state_dict(torch.load(save_path_b, map_location=device))
for p in model_s.encoder.parameters(): # Freeze signal part of VAE.
p.requires_grad = False
for p in model_s.decoder.parameters():
p.requires_grad = False
for p in model_b.parameters():
p.requires_grad = False
train_dataset_splitted.make_biased_val()
else: break
validate(model_s, model_b, test_loader, epoch, device, config) # Important: Until having made a decision we use the test set as validation set for model analysis!
#capture_dataset(train_loader, config)
test_acc_s, test_acc_b = test(model_s, model_b, test_loader, epochs, device, config)
# train_reconst_loss = get_reconst_loss(model_s, model_b, train_loader, device, config, mode = "train")
# test_reconst_loss = get_reconst_loss(model_s, model_b, test_loader, device, config, mode = "test")
# Saving result & Checkpoint
with open(config["results_filename"]+'.txt', 'a') as f:
f.writelines((['{} signal: {:8.4f}\n'.format(config["name"], test_acc_s)]))
# f.writelines((['{} biased: {:8.4f}\n'.format(config["name"], test_acc_b)]))
# f.writelines((['{} train_reconst: {:8.4f}\n'.format(config["name"], train_reconst_loss)]))
# f.writelines((['{} test_reconst: {:8.4f}\n'.format(config["name"], test_reconst_loss)]))
# Save images to wandb
save_img(model_s, model_b, test_loader, config, device)
for i in range(5):
save_img_adv(model_s, model_b, test_loader, epoch, config, device, training=False)
bias_visualization(model_s, model_b, train_loader, config, device) #Using biased validation set to have (enough) bias-aliged images
wandb.finish(quiet=True)
os.remove(save_path_s)
os.remove(save_path_b)
def train(
model_s,
model_b,
train_loader,
early_stop_v,
optimizer_s,
optimizer_b,
criterion_s,
criterion_b,
epoch,
epochs,
device,
config
):
"""Main training loop, where the network is trained
Args:
UPDATE ARG DESCRIPTION
model: baseline model
train_loader: loader with the training data
optimizer: optimizer for backpropagation
criterion: loss function
epoch: current epoch
epochs: max number of epochs
device: current device (cpu or gpu)
"""
train_loader = tqdm(train_loader, position=0, leave=False)
train_loader.set_description(f"Epoch [{epoch}/{epochs}]")
total_corr_aligned_s, total_corr_conflicting_s, total_corr_s, total_count_aligned, total_count_conflicting, total_count = 0, 0, 0, 0, 0, 0
total_corr_aligned_b, total_corr_conflicting_b, total_corr_b, = 0, 0, 0
total_corr_aligned_s_adv, total_corr_conflicting_s_adv, total_corr_s_adv, = 0, 0, 0
# training loop
model_s.train()
model_b.train()
if early_stop_v:
model_b.eval()
for idx, (subset_idx, full_idx, data, attr) in enumerate(train_loader):
data, attr = data.to(device), attr.to(device)
label = attr[:, 0] # Assuming label is in first column and bias in second of variable attr!
# bias = attr[:, 1]
# Getting predictions
z_s, logits_s, mean_s, logvar_s = model_s(data)
z_b, logits_b, mean_b, logvar_b = model_b(data)
# z_s_avging_pos = z_s[label.bool()][torch.randperm(sum(label==1))]
# z_s_avging_neg = z_s[~label.bool()][torch.randperm(sum(label==0))]
# z_s[label.bool()] = (z_s[label.bool()] + z_s_avging_pos.detach())/2 ###Detach yes/no?
# z_s[~label.bool()] = (z_s[~label.bool()] + z_s_avging_neg.detach())/2 ###Detach yes/no?
# logits_s = model_s.predict(z_s)
z = torch.cat((z_s, z_b), dim=1)
mean = torch.cat((mean_s, mean_b), dim=1)
logvar = torch.cat((logvar_s, logvar_b), dim=1)
x_reconst = model_s.reconstruct(z)
# VAE losses
# Compute reconstruction loss and kl divergence for both encoders together
# Sum over dimensions, average over batch to have loss weighting hyperparameters being independent of batch size
if (config["data"]["dataset"] == "cifar10_type0" or config["data"]["dataset"] == "cifar10_type1"): # Backtransform preprocessing standardization for CE
data_backtransformed = normalize(data,-np.divide([0.4914, 0.4822, 0.4465],[0.2023, 0.1994, 0.2010]), np.divide([1,1,1],[0.2023, 0.1994, 0.2010]))
reconst_loss = F.mse_loss(x_reconst, data_backtransformed, reduction='none').sum(dim=(1,2,3))
elif config["data"]["dataset"] == "colored_mnist":
reconst_loss = F.binary_cross_entropy(x_reconst, data, reduction='none').sum(dim=(1,2,3))
elif (config["data"]["dataset"] == "camelyon17_type0" or config["data"]["dataset"] == "camelyon17_type1" or config["data"]["dataset"] == "camelyon17_type2"):
reconst_loss = F.mse_loss(x_reconst, data, reduction='none').sum(dim=(1,2,3))
else: raise NotImplementedError("reconst_loss")
kl_div = - 0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp(),dim=1)
# Rescaling both VAE losses in order to be invariant to image resolution in hyperparametertuning.
reconst_loss /= x_reconst.view(len(x_reconst),-1).shape[1]
kl_div /= x_reconst.view(len(x_reconst),-1).shape[1]
reconst_loss *=config["loss"]["reconst_weight"]
# 1-yhat_b instead of RDS
prob_b = F.softmax(logits_b, dim=1)
if np.isnan(prob_b.mean().item()):
raise NameError("prob_b")
y_hat_b = torch.gather(prob_b, 1, torch.unsqueeze(label, 1)).squeeze().detach().cpu()
if np.isnan(y_hat_b.mean().item()):
raise NameError("y_hat_b")
loss_weight = (1-y_hat_b)**config["loss"]["GCE_q"] # 1-yhat for hard-to-learn samples.
rel_diff_score = loss_weight.detach().to(device)
# Calculate and weigh classifier losses
loss_indiv_s = criterion_s(logits_s,label)* rel_diff_score
loss_indiv_b = criterion_b(logits_b,label)
# Evaluate metrics for logging and backpropagating
corr_aligned_s, corr_conflicting_s, corr_s, loss_aligned_s, loss_conflicting_s, loss_s, aligned_len, conflicting_len, batch_len = evaluate_batch(logits_s,attr,loss_indiv_s)
corr_aligned_b, corr_conflicting_b, corr_b, loss_aligned_b, loss_conflicting_b, loss_b = evaluate_batch(logits_b,attr,loss_indiv_b)[0:6]
if torch.isnan(loss_s):
raise NameError('loss_s_update')
if torch.isnan(loss_b):
raise NameError('loss_b_update')
# Backprop model
optimizer_s.zero_grad()
optimizer_b.zero_grad()
loss = loss_s + loss_b + reconst_loss.mean() + kl_div.mean()
loss.backward()
optimizer_s.step()
optimizer_b.step()
# Calculate metrics for logging
total_corr_aligned_s += corr_aligned_s
total_corr_conflicting_s += corr_conflicting_s
total_corr_s += corr_s
total_corr_aligned_b += corr_aligned_b
total_corr_conflicting_b += corr_conflicting_b
total_corr_b += corr_b
total_count_aligned += aligned_len
total_count_conflicting += conflicting_len
total_count += batch_len
train_loader.set_postfix({"loss_s": "{:.3f}".format(loss_s.item()), "loss_b": "{:.3f}".format(loss_b.item()),
"acc_s": "{:.3f}".format(corr_s.item() / batch_len), "acc_b": "{:.3f}".format(corr_b.item() / batch_len)})
wandb.log({"loss_s": loss_s, "loss_s_align": loss_aligned_s, "loss_s_conflict": loss_conflicting_s, "reconstruction_loss": reconst_loss.mean()})
wandb.log({"loss_b": loss_b, "loss_b_align": loss_aligned_b, "loss_b_conflict": loss_conflicting_b, "loss": loss})
if config["wandb_logging"]:
save_img_adv(model_s, model_b, train_loader, epoch, config, device, training=True)
wandb.log({"acc_s_train": total_corr_s / total_count, "acc_s_train_align": total_corr_aligned_s / total_count_aligned,
"acc_s_train_conflict": total_corr_conflicting_s / total_count_conflicting, "epoch": epoch})
wandb.log({"acc_b_train": total_corr_b / total_count, "acc_b_train_align": total_corr_aligned_b / total_count_aligned,
"acc_b_train_conflict": total_corr_conflicting_b / total_count_conflicting, "epoch": epoch})
wandb.log({"acc_s_train_adv": total_corr_s_adv / total_count, "acc_s_train_align_adv": total_corr_aligned_s_adv / total_count_aligned,
"acc_s_train_conflict_adv": total_corr_conflicting_s_adv / total_count_conflicting, "epoch": epoch})
print(
"| epoch {:3d} | training accuracy_biased {:8.3f}".format(
epoch, total_corr_b / total_count
)
)
def validate(model_s, model_b, val_loader, epoch, device, config):
"""Main test loop, where the network is tested in the end
Args:
model: our pytorch model
val_loader: loader with the validation data
device: current device (cpu or gpu)
"""
# testing the model
model_s.eval()
model_b.eval()
val_acc_aligned_s, val_acc_conflicting_s, val_acc_s = evaluate(model_s, val_loader, device)
val_acc_aligned_b, val_acc_conflicting_b, val_acc_b = evaluate(model_b, val_loader, device)
if config["loss"]["perturbation"]:
save_img_adv(model_s, model_b, val_loader, epoch, config, device)
wandb.log({"acc_s_val": val_acc_s, "acc_s_val_align": val_acc_aligned_s, "acc_s_val_conflict": val_acc_conflicting_s, "epoch": epoch})
wandb.log({"acc_b_val": val_acc_b, "acc_b_val_align": val_acc_aligned_b, "acc_b_val_conflict": val_acc_conflicting_b, "epoch": epoch})
print("validation accuracy of unbiased model {:8.3f}".format(val_acc_s))
print("validation accuracy of biased model {:8.3f}".format(val_acc_b))
def test(model_s, model_b, test_loader, epochs, device, config):
"""Main test loop, where the network is tested in the end
Args:
model: our pytorch model
test_loader: loader with the validation data
device: current device (cpu or gpu)
"""
# testing the model
model_s.eval()
model_b.eval()
test_acc_aligned_s, test_acc_conflicting_s, test_acc_s = evaluate(model_s, test_loader, device)
test_acc_aligned_b, test_acc_conflicting_b, test_acc_b = evaluate(model_b, test_loader, device)
wandb.log({"acc_s_test": test_acc_s, "acc_s_test_align": test_acc_aligned_s, "acc_s_test_conflict": test_acc_conflicting_s, "epoch": epochs})
wandb.log({"acc_b_test": test_acc_b, "acc_b_test_align": test_acc_aligned_b, "acc_b_test_conflict": test_acc_conflicting_b, "epoch": epochs})
print("test accuracy of unbiased model {:8.3f}".format(test_acc_s))
print("test accuracy of biased model {:8.3f}".format(test_acc_b))
return test_acc_s, test_acc_b
if __name__ == "__main__":
main()
| 18,917 | 48.010363 | 285 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/module/resnet_vae.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torchvision.models import resnet18
from module.resnet import resnet20
class ResNet_VAE(nn.Module):
def __init__(self, num_classes = 10, bottleneck=512):
super(ResNet_VAE, self).__init__()
self.encoder = ResNet_Encoder(bottleneck)
self.decoder = ResNet18Dec(bottleneck=bottleneck)
self.classifier = ResNet_Classifier(num_classes = num_classes, bottleneck=bottleneck)
def forward(self, x):
mean, logvar = self.encoder(x)
z = self.reparameterization(mean, logvar)
logits = self.predict(z)
return z, logits, mean, logvar
def reconstruct(self, z):
x_recon = self.decoder(z)
return x_recon
def predict(self, z):
logits = self.classifier(z)
return logits
def reparameterization(self, mu, logvar):
std = logvar.mul(0.5).exp_()
z = torch.distributions.Normal(mu, std+1e-8).rsample()
return z
class ResNet_Classifier(nn.Module):
def __init__(self, num_classes = 10, bottleneck = 64):
super(ResNet_Classifier, self).__init__()
self.num_classes = num_classes
self.classifier = nn.Sequential(
nn.ReLU(),
nn.Linear(bottleneck,num_classes)
)
def forward(self, z):
logits = self.classifier(z)
return logits
class ResNet_Encoder(nn.Module):
def __init__(self, bottleneck=512):
super(ResNet_Encoder, self).__init__()
resnet = resnet18() # Make sure to put bottleneck = 512
self.conv1 = nn.Conv2d(3, 64, kernel_size=4,
stride=2, padding=1, bias=False)
self.encoder = torch.nn.Sequential(self.conv1,*(list(resnet.children())[1:3]),*(list(resnet.children())[4:-2]))
##############################################
self.conv_mean = nn.Conv2d(bottleneck, bottleneck, kernel_size=2, stride=1, padding=0)
self.conv_logvar = nn.Conv2d(bottleneck, bottleneck, kernel_size=2, stride=1, padding=0)
self.fc_mean = nn.Linear(bottleneck,bottleneck)
self.fc_logvar = nn.Linear(bottleneck,bottleneck)
def forward(self, x):
x = self.encoder(x)
mean = self.conv_mean(x)
logvar = self.conv_logvar(x)
mean = mean.squeeze()
logvar = logvar.squeeze()
logvar = logvar.clamp(max=5) # Numerical stability. Equals max(std)==12.1825
return mean, logvar
class ResizeConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, scale_factor, mode='nearest'):
super().__init__()
self.scale_factor = scale_factor
self.mode = mode
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=1)
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
x = self.conv(x)
return x
class BasicBlockDec(nn.Module):
def __init__(self, in_planes, stride=1):
super().__init__()
planes = int(in_planes/stride)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
# self.bn1 could have been placed here, but that messes up the order of the layers when printing the class
if stride == 1:
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
else:
self.conv1 = ResizeConv2d(in_planes, planes, kernel_size=3, scale_factor=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential(
ResizeConv2d(in_planes, planes, kernel_size=3, scale_factor=stride),
nn.BatchNorm2d(planes)
)
def forward(self, x):
out = torch.relu(self.bn2(self.conv2(x)))
out = self.bn1(self.conv1(out))
out += self.shortcut(x)
out = torch.relu(out)
return out
class ResNet18Dec(nn.Module):
def __init__(self, num_Blocks=[2,2,2,2], bottleneck=512, nc=3):
super().__init__()
self.in_planes = 2*bottleneck
self.linear = nn.Linear(2*bottleneck, 2*bottleneck)
self.layer4 = self._make_layer(BasicBlockDec, int(bottleneck), num_Blocks[3], stride=2)
self.layer3 = self._make_layer(BasicBlockDec, int(bottleneck/2), num_Blocks[2], stride=2)
self.layer2 = self._make_layer(BasicBlockDec, int(bottleneck/4), num_Blocks[1], stride=2)
self.layer1 = self._make_layer(BasicBlockDec, int(bottleneck/8), num_Blocks[0], stride=2)
self.conv1 = ResizeConv2d(int(bottleneck/8), nc, kernel_size=3, scale_factor=2)
def _make_layer(self, BasicBlockDec, planes, num_Blocks, stride):
strides = [stride] + [1]*(num_Blocks-1)
layers = []
for stride in reversed(strides):
layers += [BasicBlockDec(self.in_planes, stride)]
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, z):
#z = F.relu(self.linear(z))
z = z.view(z.size(0), z.size(1), 1, 1)
#x = F.interpolate(x, scale_factor=2)
x = self.layer4(z)
x = self.layer3(x)
x = self.layer2(x)
x = self.layer1(x)
x = torch.sigmoid(self.conv1(x))
#x = x.view(x.size(0), 3, 32, 32)
return x
| 5,542 | 33.216049 | 119 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/module/mlp_vae.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP_VAE(nn.Module):
def __init__(self, num_classes = 10, bottleneck = 16):
super(MLP_VAE, self).__init__()
self.encoder = MLP_Encoder(bottleneck = bottleneck)
self.decoder = MLP_Decoder(bottleneck = bottleneck)
self.classifier = MLP_Classifier(num_classes = num_classes, bottleneck = bottleneck)
def forward(self, x):
mean, logvar = self.encoder(x)
z = self.reparameterization(mean, logvar)
logits = self.predict(z)
return z, logits, mean, logvar
def reconstruct(self, z):
x_recon = self.decoder(z)
return x_recon
def predict(self, z):
logits = self.classifier(z)
return logits
def reparameterization(self, mu, logvar):
std = logvar.mul(0.5).exp_()
z = torch.distributions.Normal(mu, std+1e-8).rsample()
return z
class MLP_Encoder(nn.Module):
def __init__(self, bottleneck = 16):
super(MLP_Encoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(3*28*28, 100),
nn.ReLU(),
nn.Linear(100, 100),
nn.ReLU()
)
self.fc_mean = nn.Linear(100, bottleneck)
self.fc_logvar = nn.Linear(100, bottleneck)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.encoder(x)
mean = self.fc_mean(x)
logvar = self.fc_logvar(x)
return mean, logvar
class MLP_Decoder(nn.Module):
def __init__(self, bottleneck = 16):
super(MLP_Decoder, self).__init__()
self.decoder = nn.Sequential(
nn.Linear(bottleneck*2, 512), # Combined representations of signal and bias encoder
nn.ReLU(),
nn.Linear(512, 1024),
nn.ReLU(),
nn.Linear(1024, 3*28*28)
)
def forward(self, z):
z = self.decoder(z)
x_hat = torch.sigmoid(z)
x_hat = x_hat.view(x_hat.size(0),3,28,28)
return x_hat
class MLP_Classifier(nn.Module):
def __init__(self, num_classes = 10, bottleneck = 16):
super(MLP_Classifier, self).__init__()
self.classifier = nn.Sequential(
nn.ReLU(),
nn.Linear(bottleneck,num_classes),
)
self.num_classes = num_classes #Necessary for DeepFool2
def forward(self, z):
logits = self.classifier(z)
return logits
| 2,499 | 25.041667 | 96 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/module/resnet.py
|
''' From https://github.com/alinlab/LfF/blob/master/module/resnet.py '''
"""
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
__all__ = [
"ResNet",
"resnet20",
"resnet32",
"resnet44",
"resnet56",
"resnet110",
"resnet1202",
]
def _weights_init(m):
classname = m.__class__.__name__
# print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option="A"):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == "A":
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(
lambda x: F.pad(
x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4),
"constant",
0,
)
)
elif option == "B":
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(
3, 16, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
#self.fc = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def extract(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
#out = F.avg_pool2d(out, out.size()[3])
#feat = out.view(out.size(0), -1)
return out
def predict(self, x):
prediction = self.fc(x)
return prediction
def forward(self, x, mode=None):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = F.avg_pool2d(out, out.size()[3])
# out = out.view(out.size(0), -1)
#out = self.avgpool(out)
#out = out.view(out.size(0), -1)
#final_out = self.fc(out)
return out
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print(
"Total layers",
len(
list(
filter(
lambda p: p.requires_grad and len(p.data.size()) > 1,
net.parameters(),
)
)
),
)
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith("resnet"):
print(net_name)
test(globals()[net_name]())
print()
| 6,138 | 27.290323 | 78 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/module/mlp.py
|
''' From https://github.com/alinlab/LfF/blob/master/module/MLP.py '''
import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, num_classes = 10):
super(MLP, self).__init__()
self.feature = nn.Sequential(
nn.Linear(3 * 28*28, 100),
nn.ReLU(),
nn.Linear(100, 100),
nn.ReLU(),
nn.Linear(100, 100),
nn.ReLU()
)
self.classifier = nn.Linear(100, num_classes)
def forward(self, x, return_feat=False):
x = x.view(x.size(0), -1)# / 255
feat = x = self.feature(x)
x = self.classifier(x)
if return_feat:
return x, feat
else:
return x
| 721 | 25.740741 | 69 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/module/util.py
|
''' Modified from https://github.com/alinlab/LfF/blob/master/module/util.py '''
import torch.nn as nn
from module.resnet import resnet20
from module.mlp import MLP
from module.mlp_vae import MLP_VAE
from module.resnet_vae import ResNet_VAE
from torchvision.models import resnet18, resnet50
def get_model(config):
model_tag = config["model"]["tag"]
dataset = config["data"]["dataset"]
if dataset in {"colored_mnist", "cifar10_type0", "cifar10_type1"}:
num_classes = 10
elif dataset in {"camelyon17_type0", "camelyon17_type1", "camelyon17_type2"}:
num_classes = 2
else: raise NotImplementedError("Dataset is not integrated.")
if model_tag == "ResNet20":
return resnet20(num_classes)
elif model_tag == "ResNet18":
model = resnet18(pretrained=True)
print("Pretrained&frozen ResNet18")
for param in model.parameters():
param.requires_grad = False
model.fc = nn.Linear(512, num_classes)
model.fc.weight.requires_grad = True
model.fc.bias.requires_grad = True
return model
elif model_tag == "ResNet50":
model = resnet50(pretrained=True)
print("Pretrained&frozen ResNet50")
for param in model.parameters():
param.requires_grad = False
model.fc = nn.Linear(2048, num_classes)
model.fc.weight.requires_grad = True
model.fc.bias.requires_grad = True
return model
elif model_tag == "MLP":
return MLP(num_classes=num_classes)
elif model_tag == "MLP_VAE":
return MLP_VAE(num_classes=num_classes,bottleneck=config["model"]["bottleneck_MLP"])
elif model_tag == "ResNet_VAE":
return ResNet_VAE(num_classes=num_classes,bottleneck = config["model"]["bottleneck_ResNet"])
else:
raise NotImplementedError("Model not implemented.")
# def get_disentangler(config):
# model_tag = config["model"]["tag"]
# if model_tag == "MLP":
# return FFVAE_Disentangler()
# elif model_tag == "MLP_VAE":
# return FFVAE_Disentangler()
# else:
# raise NotImplementedError("Model not implemented.")
| 2,150 | 36.736842 | 100 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/data/rotated_mnist_protocol.py
|
import os
from functools import partial
import torchvision.transforms.functional as F
dir_path = os.path.dirname(os.path.realpath(__file__))
def rotate(raw_image, severity, attribute_label):
if severity==0:
raise NotImplementedError("Need severity != 0")
rotation = 90/(5-severity)
if attribute_label == 0:
return raw_image
elif attribute_label == 1:
image = F.rotate(raw_image.unsqueeze(0).float(),rotation).squeeze(0)
return image
else: raise NotImplementedError("Only 2class-dataset")
ROTATED_MNIST_PROTOCOL = dict()
for i in range(2):
ROTATED_MNIST_PROTOCOL[i] = partial(rotate, attribute_label = i)
| 662 | 32.15 | 76 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/data/attr_dataset.py
|
'''Modified from https://github.com/alinlab/LfF/blob/master/data/attr_dataset.py'''
import os
import pickle
import torch
import numpy as np
from torch.utils.data import Dataset
class AttributeDataset(Dataset):
def __init__(self, root, split, query_attr_idx=None, transform=None):
super(AttributeDataset, self).__init__()
data_path = os.path.join(root, split, "images.npy")
self.data = np.load(data_path)
attr_path = os.path.join(root, split, "attrs.npy")
self.attr = torch.LongTensor(np.load(attr_path))
attr_names_path = os.path.join(root, "attr_names.pkl")
with open(attr_names_path, "rb") as f:
self.attr_names = pickle.load(f)
self.num_attrs = self.attr.size(1)
self.set_query_attr_idx(query_attr_idx)
self.transform = transform
def set_query_attr_idx(self, query_attr_idx):
if query_attr_idx is None:
query_attr_idx = torch.arange(self.num_attrs)
self.query_attr = self.attr[:, query_attr_idx]
def __len__(self):
return self.attr.size(0)
def __getitem__(self, index):
image, attr = self.data[index], self.query_attr[index]
if self.transform is not None:
image = self.transform(image)
return image, attr
| 1,335 | 31.585366 | 83 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/data/shifted_mnist_protocol.py
|
import os
from functools import partial
import torchvision.transforms.functional as F
dir_path = os.path.dirname(os.path.realpath(__file__))
def shift(raw_image, severity, attribute_label):
if severity==0:
raise NotImplementedError("Need severity != 0")
translation = 8/(5-severity)
if attribute_label == 0:
image = F.affine(raw_image.unsqueeze(0).float(),scale=1,shear=0,angle=0,translate=(translation,translation/2)).squeeze(0)
return image
elif attribute_label == 1:
image = F.affine(raw_image.unsqueeze(0).float(),scale=1,shear=0,angle=0,translate=(-translation,-translation/2)).squeeze(0)
return image
else: raise NotImplementedError("Only 2class-dataset")
SHIFTED_MNIST_PROTOCOL = dict()
for i in range(2):
SHIFTED_MNIST_PROTOCOL[i] = partial(shift, attribute_label = i)
| 845 | 35.782609 | 131 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/data/corrupted_cifar10_protocol.py
|
'''Modified from https://github.com/alinlab/LfF/blob/master/data/corrupted_cifar10_protocol.py'''
# -*- coding: utf-8 -*-
import os
from PIL import Image
import os.path
import time
import numpy as np
from PIL import Image
# /////////////// Distortion Helpers ///////////////
import skimage as sk
from skimage.filters import gaussian
from io import BytesIO
from wand.image import Image as WandImage
from wand.api import library as wandlibrary
import wand.color as WandColor
import ctypes
from PIL import Image as PILImage
import cv2
from scipy.ndimage import zoom as scizoom
from scipy.ndimage.interpolation import map_coordinates
from tqdm import tqdm
import warnings
warnings.simplefilter("ignore", UserWarning)
resource_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"resource"
)
def disk(radius, alias_blur=0.1, dtype=np.float32):
if radius <= 8:
L = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
L = np.arange(-radius, radius + 1)
ksize = (5, 5)
X, Y = np.meshgrid(L, L)
aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
# Tell Python about the C method
wandlibrary.MagickMotionBlurImage.argtypes = (
ctypes.c_void_p, # wand
ctypes.c_double, # radius
ctypes.c_double, # sigma
ctypes.c_double,
) # angle
# Extend wand.image.Image class to include method signature
class MotionImage(WandImage):
def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):
wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)
# modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
def plasma_fractal(mapsize=32, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert mapsize & (mapsize - 1) == 0
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(
-wibble, wibble, array.shape
)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[
stepsize // 2 : mapsize : stepsize,
stepsize // 2 : mapsize : stepsize,
] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[
stepsize // 2 : mapsize : stepsize,
stepsize // 2 : mapsize : stepsize,
]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[
0:mapsize:stepsize, stepsize // 2 : mapsize : stepsize
] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[
stepsize // 2 : mapsize : stepsize, 0:mapsize:stepsize
] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
def clipped_zoom(img, zoom_factor):
h = img.shape[0]
# ceil crop height(= crop width)
ch = int(np.ceil(h / zoom_factor))
top = (h - ch) // 2
img = scizoom(
img[top : top + ch, top : top + ch],
(zoom_factor, zoom_factor, 1),
order=1,
)
# trim off any extra pixels
trim_top = (img.shape[0] - h) // 2
return img[trim_top : trim_top + h, trim_top : trim_top + h]
# /////////////// End Distortion Helpers ///////////////
# /////////////// Distortions ///////////////
def gaussian_noise(x, severity=1):
c = [0.04, 0.06, 0.08, 0.09, 0.10][severity - 1]
x = np.array(x) / 255.0
return np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
def shot_noise(x, severity=1):
c = [500, 250, 100, 75, 50][severity - 1]
x = np.array(x) / 255.0
return np.clip(np.random.poisson(x * c) / c, 0, 1) * 255
def impulse_noise(x, severity=1):
c = [0.01, 0.02, 0.03, 0.05, 0.07][severity - 1]
x = sk.util.random_noise(np.array(x) / 255.0, mode="s&p", amount=c)
return np.clip(x, 0, 1) * 255
def speckle_noise(x, severity=1):
c = [0.06, 0.1, 0.12, 0.16, 0.2][severity - 1]
x = np.array(x) / 255.0
return np.clip(x + x * np.random.normal(size=x.shape, scale=c), 0, 1) * 255
def gaussian_blur(x, severity=1):
c = [0.4, 0.6, 0.7, 0.8, 1,][severity - 1]
x = gaussian(np.array(x) / 255.0, sigma=c, multichannel=True)
return np.clip(x, 0, 1) * 255
def glass_blur(x, severity=1):
# sigma, max_delta, iterations
c = [(0.05, 1, 1), (0.25, 1, 1), (0.4, 1, 1), (0.25, 1, 2), (0.4, 1, 2)][
severity - 1
]
x = np.uint8(
gaussian(np.array(x) / 255.0, sigma=c[0], multichannel=True) * 255
)
# locally shuffle pixels
for i in range(c[2]):
for h in range(32 - c[1], c[1], -1):
for w in range(32 - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
return (
np.clip(gaussian(x / 255.0, sigma=c[0], multichannel=True), 0, 1) * 255
)
def defocus_blur(x, severity=1):
c = [(0.3, 0.4), (0.4, 0.5), (0.5, 0.6), (1, 0.2), (1.5, 0.1)][severity - 1]
x = np.array(x) / 255.0
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x32x32 -> 32x32x3
return np.clip(channels, 0, 1) * 255
def motion_blur(x, severity=1):
c = [(6, 1), (6, 1.5), (6, 2), (8, 2), (9, 2.5)][severity - 1]
output = BytesIO()
x.save(output, format="PNG")
x = MotionImage(blob=output.getvalue())
x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))
x = cv2.imdecode(
np.fromstring(x.make_blob(), np.uint8), cv2.IMREAD_UNCHANGED
)
if x.shape != (32, 32):
return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB
else: # greyscale to RGB
return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255)
def zoom_blur(x, severity=1):
c = [
np.arange(1, 1.06, 0.01),
np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.01),
np.arange(1, 1.26, 0.01),
][severity - 1]
x = (np.array(x) / 255.0).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
return np.clip(x, 0, 1) * 255
def fog(x, severity=1):
c = [(0.2, 3), (0.5, 3), (0.75, 2.5), (1, 2), (1.5, 1.75)][severity - 1]
x = np.array(x) / 255.0
max_val = x.max()
x += c[0] * plasma_fractal(wibbledecay=c[1])[:32, :32][..., np.newaxis]
return np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
def frost(x, severity=1):
c = [(1, 0.2), (1, 0.3), (0.9, 0.4), (0.85, 0.4), (0.75, 0.45)][
severity - 1
]
idx = np.random.randint(5)
filename = [
f"{resource_path}/frost1.png",
f"{resource_path}/frost2.png",
f"{resource_path}/frost3.png",
f"{resource_path}/frost4.jpg",
f"{resource_path}/frost5.jpg",
f"{resource_path}/frost6.jpg",
][idx]
frost = cv2.imread(filename)
frost = cv2.resize(frost, (0, 0), fx=0.2, fy=0.2)
# randomly crop and convert to rgb
x_start, y_start = (
np.random.randint(0, frost.shape[0] - 32),
np.random.randint(0, frost.shape[1] - 32),
)
frost = frost[x_start : x_start + 32, y_start : y_start + 32][
..., [2, 1, 0]
]
return np.clip(c[0] * np.array(x) + c[1] * frost, 0, 255)
def snow(x, severity=1):
c = [
(0.1, 0.2, 1, 0.6, 8, 3, 0.95),
(0.1, 0.2, 1, 0.5, 10, 4, 0.9),
(0.15, 0.3, 1.75, 0.55, 10, 4, 0.9),
(0.25, 0.3, 2.25, 0.6, 12, 6, 0.85),
(0.3, 0.3, 1.25, 0.65, 14, 12, 0.8),
][severity - 1]
x = np.array(x, dtype=np.float32) / 255.0
snow_layer = np.random.normal(
size=x.shape[:2], loc=c[0], scale=c[1]
) # [:2] for monochrome
snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2])
snow_layer[snow_layer < c[3]] = 0
snow_layer = PILImage.fromarray(
(np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode="L"
)
output = BytesIO()
snow_layer.save(output, format="PNG")
snow_layer = MotionImage(blob=output.getvalue())
snow_layer.motion_blur(
radius=c[4], sigma=c[5], angle=np.random.uniform(-135, -45)
)
snow_layer = (
cv2.imdecode(
np.fromstring(snow_layer.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED,
)
/ 255.0
)
snow_layer = snow_layer[..., np.newaxis]
x = c[6] * x + (1 - c[6]) * np.maximum(
x, cv2.cvtColor(x, cv2.COLOR_RGB2GRAY).reshape(32, 32, 1) * 1.5 + 0.5
)
return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
def spatter(x, severity=1):
c = [
(0.62, 0.1, 0.7, 0.7, 0.5, 0),
(0.65, 0.1, 0.8, 0.7, 0.5, 0),
(0.65, 0.3, 1, 0.69, 0.5, 0),
(0.65, 0.1, 0.7, 0.69, 0.6, 1),
(0.65, 0.1, 0.5, 0.68, 0.6, 1),
][severity - 1]
x = np.array(x, dtype=np.float32) / 255.0
liquid_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1])
liquid_layer = gaussian(liquid_layer, sigma=c[2])
liquid_layer[liquid_layer < c[3]] = 0
if c[5] == 0:
liquid_layer = (liquid_layer * 255).astype(np.uint8)
dist = 255 - cv2.Canny(liquid_layer, 50, 150)
dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)
_, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)
dist = cv2.blur(dist, (3, 3)).astype(np.uint8)
dist = cv2.equalizeHist(dist)
# ker = np.array([[-1,-2,-3],[-2,0,0],[-3,0,1]], dtype=np.float32)
# ker -= np.mean(ker)
ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
dist = cv2.filter2D(dist, cv2.CV_8U, ker)
dist = cv2.blur(dist, (3, 3)).astype(np.float32)
m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)
m /= np.max(m, axis=(0, 1))
m *= c[4]
# water is pale turqouise
color = np.concatenate(
(
175 / 255.0 * np.ones_like(m[..., :1]),
238 / 255.0 * np.ones_like(m[..., :1]),
238 / 255.0 * np.ones_like(m[..., :1]),
),
axis=2,
)
color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)
x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)
return (
cv2.cvtColor(np.clip(x + m * color, 0, 1), cv2.COLOR_BGRA2BGR) * 255
)
else:
m = np.where(liquid_layer > c[3], 1, 0)
m = gaussian(m.astype(np.float32), sigma=c[4])
m[m < 0.8] = 0
# m = np.abs(m) ** (1/c[4])
# mud brown
color = np.concatenate(
(
63 / 255.0 * np.ones_like(x[..., :1]),
42 / 255.0 * np.ones_like(x[..., :1]),
20 / 255.0 * np.ones_like(x[..., :1]),
),
axis=2,
)
color *= m[..., np.newaxis]
x *= 1 - m[..., np.newaxis]
return np.clip(x + color, 0, 1) * 255
def contrast(x, severity=1):
c = [0.75, 0.5, 0.4, 0.3, 0.15, 0.12][severity - 1]
x = np.array(x) / 255.0
means = np.mean(x, axis=(0, 1), keepdims=True)
return np.clip((x - means) * c + means, 0, 1) * 255
def brightness(x, severity=1):
c = [0.05, 0.1, 0.15, 0.2, 0.3][severity - 1]
x = np.array(x) / 255.0
x = sk.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def saturate(x, severity=1):
c = [(0.3, 0), (0.1, 0), (1.5, 0), (2, 0.1), (2.5, 0.2)][severity - 1]
x = np.array(x) / 255.0
x = sk.color.rgb2hsv(x)
x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def jpeg_compression(x, severity=1):
c = [80, 65, 58, 50, 40][severity - 1]
output = BytesIO()
x.save(output, "JPEG", quality=c)
x = np.asarray(PILImage.open(output))
return x
def pixelate(x, severity=1):
c = [0.95, 0.9, 0.85, 0.75, 0.65][severity - 1]
x = x.resize((int(32 * c), int(32 * c)), PILImage.BOX)
x = x.resize((32, 32), PILImage.BOX)
return x
# mod of https://gist.github.com/erniejunior/601cdf56d2b424757de5
def elastic_transform(image, severity=1):
IMSIZE = 32
c = [
(IMSIZE * 0, IMSIZE * 0, IMSIZE * 0.08),
(IMSIZE * 0.05, IMSIZE * 0.2, IMSIZE * 0.07),
(IMSIZE * 0.08, IMSIZE * 0.06, IMSIZE * 0.06),
(IMSIZE * 0.1, IMSIZE * 0.04, IMSIZE * 0.05),
(IMSIZE * 0.1, IMSIZE * 0.03, IMSIZE * 0.03),
][severity - 1]
image = np.array(image, dtype=np.float32) / 255.0
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32(
[
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size,
]
)
pts2 = pts1 + np.random.uniform(-c[2], c[2], size=pts1.shape).astype(
np.float32
)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(
image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101
)
dx = (
gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode="reflect",
truncate=3,
)
* c[0]
).astype(np.float32)
dy = (
gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode="reflect",
truncate=3,
)
* c[0]
).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(
np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2])
)
indices = (
np.reshape(y + dy, (-1, 1)),
np.reshape(x + dx, (-1, 1)),
np.reshape(z, (-1, 1)),
)
return (
np.clip(
map_coordinates(image, indices, order=1, mode="reflect").reshape(
shape
),
0,
1,
)
* 255
)
# /////////////// End Distortions ///////////////
import collections
CORRUPTED_CIFAR10_PROTOCOL = collections.OrderedDict()
CORRUPTED_CIFAR10_PROTOCOL["Gaussian Noise"] = gaussian_noise
CORRUPTED_CIFAR10_PROTOCOL["Shot Noise"] = shot_noise
CORRUPTED_CIFAR10_PROTOCOL["Impulse Noise"] = impulse_noise
CORRUPTED_CIFAR10_PROTOCOL["Speckle Noise"] = speckle_noise
CORRUPTED_CIFAR10_PROTOCOL["Gaussian Blur"] = gaussian_blur
CORRUPTED_CIFAR10_PROTOCOL["Defocus Blur"] = defocus_blur
CORRUPTED_CIFAR10_PROTOCOL["Glass Blur"] = glass_blur
CORRUPTED_CIFAR10_PROTOCOL["Motion Blur"] = motion_blur
CORRUPTED_CIFAR10_PROTOCOL["Zoom Blur"] = zoom_blur
CORRUPTED_CIFAR10_PROTOCOL["Snow"] = snow
CORRUPTED_CIFAR10_PROTOCOL["Frost"] = frost
CORRUPTED_CIFAR10_PROTOCOL["Fog"] = fog
CORRUPTED_CIFAR10_PROTOCOL["Brightness"] = brightness
CORRUPTED_CIFAR10_PROTOCOL["Contrast"] = contrast
CORRUPTED_CIFAR10_PROTOCOL["Elastic"] = elastic_transform
CORRUPTED_CIFAR10_PROTOCOL["Pixelate"] = pixelate
CORRUPTED_CIFAR10_PROTOCOL["JPEG"] = jpeg_compression
CORRUPTED_CIFAR10_PROTOCOL["Spatter"] = spatter
CORRUPTED_CIFAR10_PROTOCOL["Saturate"] = saturate
CORRUPTED_CIFAR10_PROTOCOL["Original"] = lambda image, severity: np.array(image)
| 16,647 | 28.942446 | 97 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/data/util.py
|
'''Modified from https://github.com/alinlab/LfF/blob/master/data/util.py'''
import os
import numpy as np
import torch
from torch.utils.data.dataset import Dataset, Subset
from torch.utils.data import Sampler, random_split
from torchvision import transforms as T
from data.attr_dataset import AttributeDataset
from functools import reduce
class IdxDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return (idx, *self.dataset[idx])
class IdxDataset2(Dataset):
def __init__(self, dataset):
self.full_dataset = dataset
train_set_size = int(len(self.full_dataset) * 0.9)
valid_set_size = len(self.full_dataset) - train_set_size
self.train_set, self.valid_set = random_split(self.full_dataset, [train_set_size, valid_set_size])
self.dataset = self.train_set
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return (idx, *self.dataset[idx])
def make_train(self):
self.dataset = self.train_set
def make_biased_val(self):
self.dataset = self.valid_set
def make_fulltrain(self):
self.dataset = self.full_dataset
class ZippedDataset(Dataset):
def __init__(self, datasets):
super(ZippedDataset, self).__init__()
self.dataset_sizes = [len(d) for d in datasets]
self.datasets = datasets
def __len__(self):
return max(self.dataset_sizes)
def __getitem__(self, idx):
items = []
for dataset_idx, dataset_size in enumerate(self.dataset_sizes):
items.append(self.datasets[dataset_idx][idx % dataset_size])
item = [torch.stack(tensors, dim=0) for tensors in zip(*items)]
return item
transforms = {
"ColoredMNIST": {
"train": T.Compose([T.ToTensor()]),
"eval": T.Compose([T.ToTensor()])
},
"CorruptedCIFAR10": {
"train": T.Compose(
[
T.ToPILImage(),
T.RandomResizedCrop(32,scale=(0.5, 1.0)), #Scale of randomcrop+padding=4 would equal 0.765625
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"eval": T.Compose(
[
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
},
"Camelyon17": {
"train": T.Compose(
[
T.ToPILImage(),
T.CenterCrop(32),
T.RandomResizedCrop(32,scale=(0.5, 1.0)), #Scale of randomcrop+padding=4 would equal 0.765625
T.RandomHorizontalFlip(),
T.ToTensor(),
]
),
"eval": T.Compose(
[
T.ToPILImage(),
T.CenterCrop(32),
T.ToTensor(),
]
),
},
}
def get_dataset_tag(config):
bias_confl_perc = config["data"]["bias_conflicting_perc"]
severity = config["data"]["severity"]
dataset = config["data"]["dataset"]
if dataset == "colored_mnist":
dataset_tag = f"ColoredMNIST-Skewed{bias_confl_perc}-Severity{severity}"
elif dataset == "cifar10_type0":
dataset_tag = f"CorruptedCIFAR10-Type0-Skewed{bias_confl_perc}-Severity{severity}"
elif dataset == "cifar10_type1":
dataset_tag = f"CorruptedCIFAR10-Type1-Skewed{bias_confl_perc}-Severity{severity}"
elif dataset == "camelyon17_type0":
dataset_tag = f"Camelyon17-Type0-Skewed{bias_confl_perc}"
elif dataset == "camelyon17_type1":
dataset_tag = f"Camelyon17-Type1-Skewed{bias_confl_perc}"
elif dataset == "camelyon17_type2":
dataset_tag = f"Camelyon17-Type2-Skewed{bias_confl_perc}"
else:
raise NotImplementedError("Dataset not implemented.")
return dataset_tag
def get_dataset(config, dataset_split):
dataset_tag = get_dataset_tag(config)
dataset_category = dataset_tag.split("-")[0]
data_dir = config["user"]["data_dir"]
root = os.path.join(data_dir, dataset_tag)
transform = transforms[dataset_category][dataset_split]
dataset_split = "test" if (dataset_split == "eval") else dataset_split
dataset = AttributeDataset(
root=root, split=dataset_split, transform=transform
)
return dataset
| 4,488 | 29.746575 | 109 |
py
|
Signal-is-Harder
|
Signal-is-Harder-main/data/colored_mnist_protocol.py
|
'''Modified from https://github.com/alinlab/LfF/blob/master/data/colored_mnist_protocol.py'''
import os
import torch
import numpy as np
from functools import partial
dir_path = os.path.dirname(os.path.realpath(__file__))
colors_path = os.path.join(dir_path, "resource", "colors.th")
mean_color = torch.load(colors_path)
def colorize(raw_image, severity, attribute_label):
std_color = [0.05, 0.02, 0.01, 0.005, 0.002][severity-1]
image = (
torch.clamp(mean_color[attribute_label]
+ torch.randn((3, 1, 1)) * std_color, 0.0, 1.0)
) * raw_image.unsqueeze(0).float()
return image
COLORED_MNIST_PROTOCOL = dict()
for i in range(10):
COLORED_MNIST_PROTOCOL[i] = partial(colorize, attribute_label = i)
| 742 | 31.304348 | 93 |
py
|
NNRec
|
NNRec-master/nn/__init__.py
| 0 | 0 | 0 |
py
|
|
NNRec
|
NNRec-master/nn/blocks/networkConfigParser.py
|
import yaml
from nn import *
from activations import *
from exceptions import Exception
from utils import *
# from cfRBM import ModelArgs
class NetworkConfigParser(object):
@classmethod
def getDataInfo(cls, path):
with open(path) as fp:
data = yaml.load(fp)
data_info = data["data"]
train_path = data_info["train"]
test_path = data_info["test"]
save_path = data_info["save"]
return (train_path, test_path, save_path)
@classmethod
def constructModelArgs(cls, path, ModelArgs):
kwargs = {}
with open(path) as fp:
data = yaml.load(fp)
params = data["params"]
if "reg_bias" in params:
kwargs["regularize_bias"] = params["reg_bias"]
if "momentum" in params:
kwargs["momentum"] = params["momentum"]
if "mean" in params:
kwargs["mean"] = params["mean"]
if "beta" in params:
kwargs["beta"] = params["beta"]
if "mean_normalization" in params:
kwargs["mean"] = params["mean_normalization"]
if "learn_rate" in params:
kwargs["learn_rate"] = params["learn_rate"]
if "num_threads" in params:
kwargs["num_threads"] = params["num_threads"]
kwargs["lamda"] = params["lamda"]
kwargs["max_iter"] = params["max_iter"]
if "optimizer" in params:
kwargs["optimizer"] = params["optimizer"]
if "batch_size" in params:
kwargs["batch_size"] = params["batch_size"]
args = ModelArgs(**kwargs)
return args
@classmethod
def constructNetwork(cls, path):
nn = NN()
with open(path) as fp:
data = yaml.load(fp)
layers = data["layers"]
layer_ids = layers.keys()
layer_ids.sort()
for layer_id in layer_ids:
layer_info = layers[layer_id]
layer = cls._constructLayer(layer_info)
nn.addLayer(layer)
nn.finalize()
return nn
@classmethod
def _constructLayer(cls, layer_info):
num_nodes = layer_info["num_nodes"]
activation = layer_info["activation"].lower()
if "partial" in layer_info:
isPartial = layer_info["partial"]
else:
isPartial = False
if "dropout" in layer_info:
dropout = layer_info["dropout"]
else:
dropout = 0.0
if "sparsity" in layer_info:
sparsity = layer_info["sparsity"]
else:
sparsity = None
if "binary" in layer_info:
binary = layer_info["binary"]
else:
binary = False
layer_type = layer_info["type"].lower()
activation = cls._getActivation(activation)
ltype = cls._getLayerType(layer_type)
layer = Layer(num_nodes, activation, ltype)
if isPartial:
layer.setPartial()
if dropout:
layer.setDropout(dropout)
if sparsity:
layer.setSparsity(sparsity)
if binary:
layer.setBinary()
return layer
@classmethod
def _getLayerType(cls, layer_type):
if layer_type == "input":
return LayerType.INPUT
elif layer_type == "hidden":
return LayerType.HIDDEN
elif layer_type == "output":
return LayerType.OUTPUT
else:
raise Exception("Unknown Layer Type")
@classmethod
def _getActivation(cls, activation):
if activation == "sigmoid":
return Sigmoid()
elif activation == "identity":
return Identity()
elif activation == "relu":
return RELU()
elif activation == "nrelu":
return NRELU()
elif activation == "tanh":
return Tanh()
else:
raise Exception("Unknown Activation Function")
@classmethod
def validateNetwork(cls, network, modelArgs):
pass
if __name__ == '__main__':
# parser = NetworkConfigParser()
data_info = NetworkConfigParser.getDataInfo("config/net.yaml")
modelArgs = NetworkConfigParser.constructModelArgs("config/net.yaml")
nn = NetworkConfigParser.constructNetwork("config/net.yaml")
print nn, modelArgs, data_info
| 4,429 | 30.41844 | 73 |
py
|
NNRec
|
NNRec-master/nn/blocks/nn.py
|
import numpy as np
from copy import deepcopy
class LayerType(object):
INPUT = 0
HIDDEN = 1
OUTPUT = 2
class Layer:
def __init__(self, num_units, activation, layerType,
dropout=None, sparsity=None, partial=False, isBiasEnabled=True):
self.num_units = num_units
self.activation = activation
self.mf = True
self.dropout = dropout
self.layerType = layerType
self.sparsity = sparsity
self.partial = partial
self.isBiasEnabled = True
self.binary = False
self.setBias()
def setBias(self):
self.bias = np.random.randn(1, self.num_units) * 0.001
def setSparsity(self, value):
self.sparsity = value
def isSparse(self):
return ((self.sparsity is not None) and (self.sparsity != 1))
def setPartial(self):
self.partial = True
def isPartial(self):
return self.partial
def setBinary(self):
self.binary = True
def isBinary(self):
return (self.binary == True)
def setDropout(self, p):
self.dropout = p
def hasDropout(self):
return ((self.dropout is not None) and (self.dropout != 0.0))
def hasBias(self):
return (hasattr(self, "bias") and (self.bias is not None))
def removeBias(self):
self.isBiasEnabled = False
self.bias = np.zeros((1, self.num_units))
def unsetMeanField(self):
self.mf = False
def copy(self):
return deepcopy(self)
def __str__(self):
layerinfo = "Number of Units = %d ; Layer type = %s\n" % (self.num_units,
self.activation)
drp = self.dropout if self.dropout else 0
sps = self.sparsity if self.sparsity else 0
additional_info = "Sparsity %f \t Dropout %f \t Partial %r " % (
sps, drp, self.partial)
return layerinfo + additional_info
class NN(object):
def __init__(self):
self.layers = []
self.weights = []
def _add_weights(self, n1, n2):
w_vis2hid = 0.01 * np.random.randn(n1, n2)
self.weights.append(w_vis2hid)
def addLayer(self, layer1):
self.layers.append(layer1)
if (len(self.layers) > 1):
self._add_weights(
self.layers[-2].num_units, self.layers[-1].num_units)
def getWeightByIndex(self, index):
return self.weights[index]
def setLimits(self):
self.weights_limit = [0]
self.bias_limit = [0]
for l in range(len(self.layers) - 1):
self.weights_limit.append(
self.weights_limit[-1] + self.layers[l].num_units *
self.layers[l + 1].num_units)
self.bias_limit.append(
self.bias_limit[-1] + self.layers[l + 1].num_units)
def getFlattenParams(self):
params = []
map(lambda x: params.append(x.flatten()), self.weights)
map(lambda x: params.append(x.bias.flatten()), self.layers[1:])
return np.concatenate(params)
def finalize(self):
self.setLimits()
def setDropout(self, layerIndex, dropout_prob):
self.layers[layerIndex].setDropout(dropout_prob)
self.weights[layerIndex] *= (1 / (1 - dropout_prob))
def __str__(self):
representation = ""
for i, l in enumerate(self.layers):
representation += "Layer = %d ; " % i + str(l) + "\n"
return representation
| 3,494 | 26.519685 | 82 |
py
|
NNRec
|
NNRec-master/nn/blocks/activations.py
|
import numpy as np
from cython_activations import *
class Activation(object):
"""docstring for Activation"""
def activation(self, x):
pass
def derivative(self, x):
pass
def binarize(self, x):
return x
class Identity(Activation):
"""docstring for Identity"""
def activation(self, x):
return x
def derivative(self, x):
return 1
class Sigmoid(Activation):
"""docstring for Sigmoid"""
def activation(self, x):
if len(x.shape) == 2:
return cy_sigmoid(x)
else:
return cy_sigmoid1d(x)
def derivative(self, x):
return np.multiply(x, 1 - x)
def binarize(self, x):
return 1.0 * (x > 0.5)
class RELU(Activation):
"""docstring for RELU"""
def activation(self, x):
return x * (x > 0)
def derivative(self, x):
return (x > 0) * 1
class NRELU(Activation):
"""docstring for NRELU"""
def activation(self, x):
if len(x.shape) == 2:
sigma = cy_sigmoid(x)
else:
sigma = cy_sigmoid1d(x)
x += np.random.randn(x.shape[0], x.shape[1]) * np.sqrt(sigma)
return x * (x > 0)
def derivative(self, x):
return (x > 0) * 1
class Tanh(Activation):
"""docstring for RELU"""
def activation(self, x):
return np.tanh(x)
def derivative(self, x):
return (1 - np.power(x, 2))
def binarize(self, x):
return 1.0 * (x > 0)
| 1,496 | 16.406977 | 69 |
py
|
NNRec
|
NNRec-master/nn/blocks/__init__.py
| 0 | 0 | 0 |
py
|
|
NNRec
|
NNRec-master/nn/blocks/setup_activations.py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
ext_module = Extension(
"cython_activations",
["cython_activations.pyx"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
include_dirs=[np.get_include()]
)
setup(
name = 'activations',
cmdclass = {'build_ext': build_ext},
ext_modules = [ext_module],
include_dirs=[np.get_include()]
)
| 470 | 22.55 | 41 |
py
|
NNRec
|
NNRec-master/nn/cfrbm/cfRBM.py
|
import numpy as np
from cython_rbm_matmul import cython_binarizeSparseMatrix, multiplyOuterSparseLayer
from utils.metrics.evaluate import EvaluateRBM
class ModelArgs(object):
"""docstring for ModelArgs"""
def __init__(self, learn_rate=0.001, regularize_bias=True,
momentum=0.6, lamda=0.001, CD=1, num_threads=10, max_iter=200,
k=5, mapping=None, min_learn_rate=10e-6,
batch_size=None):
super(ModelArgs, self).__init__()
self.learn_rate = learn_rate
self.regularize_bias = regularize_bias
self.CD = CD
self.momentum = momentum
self.lamda = lamda
self.num_threads = num_threads
self.max_iter = max_iter
self.k = 5
self.mapping = mapping
self.min_learn_rate = min_learn_rate
self.batch_size = batch_size
def __str__(self):
string = ""
for key in self.__dict__.keys():
string += "%s: %s\t" % (key, str(self.__dict__[key]))
return string
def binarizeSparseMatrix(x, k, mapping):
m, n = x.shape
return cython_binarizeSparseMatrix(x.data, x.indices, x.indptr,
m, n, k, mapping)
class RBM(object):
"""docstring for RBM"""
def __init__(self, nn, modelArgs, debug=True):
super(RBM, self).__init__()
self.nn = nn
self.modelArgs = modelArgs
self.debug = debug
ratings_array = modelArgs.mapping.keys()
ratings_array.sort()
ratings_array = np.array(ratings_array)
self.ratings_array = ratings_array.reshape((modelArgs.k, 1))
def getHiddenActivation(self, x):
hidden = x * self.nn.weights[0] + self.nn.layers[1].bias
hidden = self.nn.layers[1].activation.activation(hidden)
if self.nn.layers[1].isBinary():
hidden = self.nn.layers[1].activation.binarize(hidden)
return hidden
def getVisibleActivation(self, x, target, ncpus=16):
visible = multiplyOuterSparseLayer(x, self.nn.weights[0].T,
self.nn.layers[0].bias,
target.data,
target.indices,
target.indptr,
ncpus)
return self.nn.layers[0].activation.activation(visible)
def __binary2Ratings(self, prediction):
n = self.modelArgs.k
m = int(len(prediction) / n)
prediction = prediction.reshape(m, n)
normalizer = prediction.sum(axis=1).reshape(m, 1)
prediction = prediction / normalizer
rating = np.dot(prediction, self.ratings_array)
return np.ravel(rating)
def predict(self, train, test, normalize=True):
hidden = self.getHiddenActivation(train)
visible = self.getVisibleActivation(hidden, test)
# visible = np.exp(visible)
if normalize:
prediction = self.__binary2Ratings(visible)
else:
prediction = visible
return prediction
class RbmOptimizer(object):
"""docstring for RbmOptimizer"""
def __init__(self, RBM):
super(RbmOptimizer, self).__init__()
self.RBM = RBM
def train(self, train, test, rtest):
self.nn = self.RBM.nn
learn_rate = self.RBM.modelArgs.learn_rate
max_iter = self.RBM.modelArgs.max_iter
CD = self.RBM.modelArgs.CD
lamda = self.RBM.modelArgs.lamda
momentum = self.RBM.modelArgs.momentum
min_learn_rate = self.RBM.modelArgs.min_learn_rate
dW_old = np.zeros(self.nn.weights[0].shape)
dv_old = np.zeros(self.nn.layers[0].bias.shape)
dh_old = np.zeros(self.nn.layers[1].bias.shape)
evaluate = EvaluateRBM(self.RBM)
vispos = train
visneg = train.copy()
for i in range(max_iter):
if i > 50:
CD = 3
momentum = 0.9
hidpos = self.RBM.getHiddenActivation(vispos)
hidneg = hidpos
for j in range(CD):
visneg_data = self.RBM.getVisibleActivation(hidneg, vispos)
visneg.data = visneg_data
hidneg = self.RBM.getHiddenActivation(visneg)
dW = momentum * dW_old + learn_rate *\
((vispos.T * hidpos) -
(visneg.T * hidneg) - lamda * self.nn.weights[0])
dvbias = momentum * dv_old + 0.1 * learn_rate *\
((vispos - visneg).sum(axis=0) -
lamda * self.nn.layers[0].bias)
dhbias = momentum * dh_old + learn_rate *\
((hidpos - hidneg).sum(axis=0) -
lamda * self.nn.layers[1].bias)
dW_old = dW
dv_old = dvbias
dh_old = dhbias
self.nn.weights[0] += dW
self.nn.layers[0].bias += dvbias
self.nn.layers[1].bias += dhbias
if i % 5 == 0:
learn_rate = max(learn_rate * 0.95, min_learn_rate)
print evaluate.calculateRMSEandMAE(train, test, rtest)
def minibatchTrain(self, train, test, rtest, batch_size):
self.nn = self.RBM.nn
slearn_rate = self.RBM.modelArgs.learn_rate
max_iter = self.RBM.modelArgs.max_iter
CD = self.RBM.modelArgs.CD
lamda = self.RBM.modelArgs.lamda
momentum = self.RBM.modelArgs.momentum
min_learn_rate = self.RBM.modelArgs.min_learn_rate
dW_old = np.zeros(self.nn.weights[0].shape)
dv_old = np.zeros(self.nn.layers[0].bias.shape)
dh_old = np.zeros(self.nn.layers[1].bias.shape)
evaluate = EvaluateRBM(self.RBM)
m, n = train.shape
batches = range(0, m, batch_size)
if batches[-1] != m:
if (m - batches[-1]) < (batch_size / 2.0):
batches[-1] = m
else:
batches.append(m)
for i in range(max_iter):
if i > 50:
CD = 3
momentum = 0.9
for j in range(len(batches) - 1):
start = batches[j]
end = batches[j + 1]
learn_rate = slearn_rate / (end - start)
learn_rate = max(learn_rate, min_learn_rate)
vispos = train[start:end, :]
visneg = vispos.copy()
hidpos = self.RBM.getHiddenActivation(vispos)
hidneg = hidpos
for k in range(CD):
visneg_data = self.RBM.getVisibleActivation(hidneg, vispos)
visneg.data = visneg_data
hidneg = self.RBM.getHiddenActivation(visneg)
dW = momentum * dW_old + learn_rate *\
((vispos.T * hidpos) -
(visneg.T * hidneg) - lamda * self.nn.weights[0])
dvbias = momentum * dv_old + learn_rate *\
((vispos - visneg).sum(axis=0) -
lamda * self.nn.layers[0].bias)
dhbias = momentum * dh_old + 0.1 * learn_rate *\
((hidpos - hidneg).sum(axis=0) -
lamda * self.nn.layers[1].bias)
dW_old = dW
dv_old = dvbias
dh_old = dhbias
self.nn.weights[0] += dW
self.nn.layers[0].bias += dvbias
self.nn.layers[1].bias += dhbias
if i % 5 == 0:
slearn_rate *= 0.95
print evaluate.calculateRMSEandMAE(train, test, rtest)
def sgdTrain(self, train, test, rtest):
self.nn = self.RBM.nn
learn_rate = self.RBM.modelArgs.learn_rate
max_iter = self.RBM.modelArgs.max_iter
CD = self.RBM.modelArgs.CD
lamda = self.RBM.modelArgs.lamda
momentum = self.RBM.modelArgs.momentum
dW_old = np.zeros(self.nn.weights[0].shape)
dv_old = np.zeros(self.nn.layers[0].bias.shape)
dh_old = np.zeros(self.nn.layers[1].bias.shape)
evaluate = EvaluateRBM(self.RBM)
# traindata = train.data
# testdata = test.data
m, n = train.shape
for i in range(max_iter):
if i > 50:
CD = 3
momentum = 0.9
for j in range(m - 1):
vispos = train.getrow(j)
visneg = vispos.copy()
hidpos = self.RBM.getHiddenActivation(vispos)
hidneg = hidpos
for k in range(CD):
visneg_data = self.RBM.getVisibleActivation(hidneg, vispos)
visneg.data = visneg_data
hidneg = self.RBM.getHiddenActivation(visneg)
dW = momentum * dW_old + learn_rate *\
((vispos.T * hidpos) -
(visneg.T * hidneg) - lamda * self.nn.weights[0])
dvbias = momentum * dv_old + learn_rate *\
((vispos - visneg).sum(axis=0) -
lamda * self.nn.layers[0].bias)
dhbias = momentum * dh_old + 0.1 * learn_rate *\
((hidpos - hidneg).sum(axis=0) -
lamda * self.nn.layers[1].bias)
dW_old = dW
dv_old = dvbias
dh_old = dhbias
self.nn.weights[0] += dW
self.nn.layers[0].bias += dvbias
self.nn.layers[1].bias += dhbias
if i % 5 == 0:
slearn_rate *= 0.95
print evaluate.calculateRMSEandMAE(train, test, rtest)
| 9,604 | 36.084942 | 83 |
py
|
NNRec
|
NNRec-master/nn/cfrbm/learner.py
|
from cfRBM import *
from dataUtils.data import loadTrainTest
# from nn.blocks.nn import Layer, NN, LayerType
from nn.blocks.activations import *
from nn.blocks.networkConfigParser import NetworkConfigParser
import yaml
def train(config_path):
configparser = NetworkConfigParser()
nn = configparser.constructNetwork(config_path)
modelArgs = configparser.constructModelArgs(config_path, ModelArgs)
train_path, test_path, save_path = configparser.getDataInfo(config_path)
print nn
data = yaml.load(open(config_path))
params = data["params"]
k = params["k"]
n_vis = int(nn.layers[0].num_units / k)
train, test, cold_ratings = loadTrainTest(train_path, test_path,
shape=(None, n_vis))
min_rating, max_rating = train.data.min(), train.data.max()
increment = 1
mapping = dict(zip(np.arange(min_rating, max_rating + increment,
increment), np.arange(k)))
modelArgs.mapping = mapping
modelArgs.k = k
bintrain = binarizeSparseMatrix(train, k, mapping)
bintest = binarizeSparseMatrix(test, k, mapping)
del train
model = RBM(nn, modelArgs)
optimizer = RbmOptimizer(model)
optimizer.minibatchTrain(bintrain, bintest, test, modelArgs.batch_size)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Description')
parser.add_argument(
'--config', '-c', help='configuration file', required=True)
args = parser.parse_args()
config_path = args.config
train(config_path)
| 1,589 | 33.565217 | 76 |
py
|
NNRec
|
NNRec-master/nn/cfrbm/setup_rbm_matmul.py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
ext_module = Extension(
"cython_rbm_matmul",
["cython_rbm_matmul.pyx"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
include_dirs=[np.get_include()]
)
setup(
name='cython helpers',
cmdclass={'build_ext': build_ext},
ext_modules=[ext_module],
include_dirs=[np.get_include()]
)
| 465 | 22.3 | 41 |
py
|
NNRec
|
NNRec-master/nn/autorec/generateNNFeatures.py
|
import numpy as np
from nn.blocks.networkConfigParser import NetworkConfigParser
from modelLoader import loadModel, LoadDataAndMapping
def dumpArray(array, outpath, mapping):
fp = open(outpath, "wb")
m, n = array.shape
for i in range(m):
for j in range(n):
value = array[i, j]
if value != 0:
fp.write("%s\t%d\t%f\n" % (mapping[i], j, array[i, j]))
fp.close()
def dumpFeatures(config_path, mtype, outpath):
model = loadModel(config_path)
train, test, usermap, itemmap = LoadDataAndMapping(config_path)
target_layer = 1
targetLayerData = model.getActivationOfLayer(train, target_layer)
if mtype == "user":
dumpArray(targetLayerData, outpath, usermap)
if mtype == "item":
dumpArray(targetLayerData, outpath, itemmap)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Description')
parser.add_argument(
'--config', '-c', help='configuration file', required=True)
parser.add_argument(
'--mtype', '-m', help='configuration file', required=True)
parser.add_argument(
'--outfile', '-o', help='configuration file', required=True)
args = parser.parse_args()
config_path = args.config
mtype = args.mtype
outfile = args.outfile
dumpFeatures(config_path, mtype, outfile)
| 1,370 | 32.439024 | 71 |
py
|
NNRec
|
NNRec-master/nn/autorec/optimizers.py
|
import scipy.optimize
from climin import *
import itertools
# from sklearn.utils import shuffle
from lossDeriv import *
class LBFGS(object):
"""docstring for LBFGS"""
def __init__(self, ae, evaluate, theta, lossDeriv, train, test,
nn, modelArgs, iterCounter, batch_size, max_iter):
super(LBFGS, self).__init__()
self.ae = ae
self.evaluate = evaluate
self.theta = theta
self.lossDeriv = lossDeriv
self.train = train
self.test = test
self.nn = nn
self.modelArgs = modelArgs
self.iterCounter = iterCounter
self.batch_size = batch_size
self.max_iter = max_iter
def __iter__(self):
return self
def next(self):
outLayer = self.ae.nn.layers[-1]
def cbk(x):
if (self.iterCounter.count % 5) == 0:
self.ae.setParameters(x)
if outLayer.isPartial():
rmse, mae = self.evaluate.calculateRMSEandMAE(
self.train, self.test)
else:
rmse, mae = self.evaluate.calculateRMSEandMAE(
self.test, self.test)
print 'Iteration : %d '\
'Test RMSE: %f MAE: %f' % (
self.iterCounter.count, rmse, mae)
opt_solution = scipy.optimize.minimize(self.lossDeriv,
self.theta,
args=(
self.train,
self.ae.nn, self.modelArgs,
self.iterCounter,
self.batch_size),
method = 'L-BFGS-B',
jac = True, callback=cbk,
options =
{'maxiter': self.max_iter,
"disp": 0})
opt_theta = opt_solution.x
self.ae.setParameters(opt_theta)
raise StopIteration("End of the iteration")
def getMiniBatchParamsIterator(train, nn, modelArgs, iterCounter,
batch_size, fn):
m, n = train.shape
batches = range(0, m, batch_size)
if batches[-1] != m:
batches.append(m)
while True:
# train = shuffle(train)
for i in range(len(batches) - 1):
start = batches[i]
end = batches[i + 1]
batch_data = train[start:end, :]
yield ([batch_data, nn, modelArgs, iterCounter, batch_size,
fn], {})
def fprime(theta, user_item_rating, NN, modelArg, counter, batch_size, fn):
cost, deriv = fn(
theta, user_item_rating, NN, modelArg, counter, batch_size)
return deriv
def getOptimizer(optimize, ae, evaluate, theta, train, test,
nn, modelArgs, iterCounter, batch_size, max_iter):
if optimize == "lbfgs":
optimizer = LBFGS(ae, evaluate, theta, getCostDeriv, train, test,
nn, modelArgs, iterCounter, batch_size, max_iter)
elif optimize == "rprop":
args = itertools.repeat(
([train, ae.nn, modelArgs, iterCounter, batch_size, getCostDeriv],
{}))
optimizer = rprop.Rprop(theta, fprime, args=args)
elif optimize == "rmsprop":
args = getMiniBatchParamsIterator(
train, ae.nn, modelArgs, iterCounter, batch_size,
getCostDerivBatch)
optimizer = rmsprop.RmsProp(
theta, fprime, 0.001, decay=0.0, step_adapt=False, step_rate_min=0,
step_rate_max=5.0, args=args)
else:
raise NotImplementedError("%s optimizer not implemented" % optimize)
return optimizer
| 3,937 | 35.803738 | 79 |
py
|
NNRec
|
NNRec-master/nn/autorec/ae_utils.py
|
class Counter(object):
"""docstring for Counter"""
def __init__(self):
super(Counter, self).__init__()
self.count = 0
def increment(self):
self.count += 1
class ModelArgs(object):
"""docstring for ModelArgs"""
def __init__(self, learn_rate=0.001, lamda=1.0, regularize_bias=True,
isDenoising=False, noisePercent=0.0, beta=None, momentum=0.8,
num_threads=16, mean=0.0, max_iter=200, optimizer=None,
batch_size=20000):
super(ModelArgs, self).__init__()
self.learn_rate = learn_rate
self.lamda = lamda
self.regularize_bias = regularize_bias
self.isDenoising = isDenoising
self.noisePercent = noisePercent
self.beta = beta
self.momentum = momentum
self.num_threads = num_threads
self.mean = mean
self.max_iter = max_iter
self.optimizer = optimizer
self.batch_size = batch_size
def __str__(self):
string = ""
for key in self.__dict__.keys():
string += "%s: %s\t" % (key, str(self.__dict__[key]))
return string
| 1,152 | 27.825 | 78 |
py
|
NNRec
|
NNRec-master/nn/autorec/learner.py
|
from utils.metrics.evaluate import EvaluateNN
from nn.blocks.networkConfigParser import NetworkConfigParser
from lossDeriv import *
from dataUtils.data import loadTrainTest
from ae import AE
from optimizers import getOptimizer
from ae_utils import Counter, ModelArgs
def train(config_path):
modelArgs = NetworkConfigParser.constructModelArgs(config_path, ModelArgs)
nn = NetworkConfigParser.constructNetwork(config_path)
train_path, test_path, save_path = NetworkConfigParser.getDataInfo(
config_path)
print nn
# TODO : Arguments
num_hid = nn.layers[1].num_units
shape = (None, nn.layers[0].num_units)
train, test, cold = loadTrainTest(train_path, test_path,
shape=shape)
ae = AE(nn, modelArgs)
evaluate = EvaluateNN(ae)
theta = ae.nn.getFlattenParams()
ae.setParameters(theta)
iterCounter = Counter()
optimizer = getOptimizer(modelArgs.optimizer, ae, evaluate, theta,
train, test, nn, modelArgs, iterCounter,
modelArgs.batch_size,
modelArgs.max_iter[0])
optimizer.step_grow = 5.0
k = 0
for info in optimizer:
print "Iteration %d" % k
if k == 5:
optimizer.step_grow = 1.2
if k % 5 == 0:
ae.setParameters(theta)
rmse, mae = evaluate.calculateRMSEandMAE(train, test)
print "Fold :%d Test RMSE: %f Test MAE: %f" % (i,
rmse, mae)
if k > modelArgs.max_iter[0]:
break
k += 1
if save_path:
_theta = ae.getParameters()
np.save(save_path, _theta)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Description')
parser.add_argument(
'--config', '-c', help='configuration file', required=True)
args = parser.parse_args()
config_path = args.config
i = 1
train(config_path)
| 2,029 | 33.40678 | 78 |
py
|
NNRec
|
NNRec-master/nn/autorec/ae.py
|
import numpy as np
import cPickle as pkl
from cython_matmul import *
from lossDeriv import *
from nn.blocks.activations import *
from nn.blocks.nn import *
class AE:
def __init__(self, nn, modelArgs, debug=True):
self.nn = nn
self.debug = debug
self.modelArgs = modelArgs
self.nn.setLimits()
def setParameters(self, theta):
weights = []
for i in range(len(self.nn.weights_limit) - 1):
weight = theta[self.nn.weights_limit[i]:self.nn.weights_limit[
i + 1]].reshape(self.nn.weights[i].shape)
weights.append(weight)
biases = []
offset = self.nn.weights_limit[-1]
for i in range(len(self.nn.bias_limit) - 1):
bias = theta[offset + self.nn.bias_limit[i]:offset +
self.nn.bias_limit[i + 1]]
bias = bias.reshape(self.nn.layers[i + 1].bias.shape)
biases.append(bias)
self.nn.weights = weights
self.nn.biases = biases
def getParameters(self):
params = []
for weight in self.nn.weights:
params.append(weight.flatten())
for bias in self.nn.biases:
params.append(bias.flatten())
return np.concatenate(params)
def predict(self, train, test):
inputActivation = train
for i in range(len(self.nn.layers) - 2):
if scipy.sparse.isspmatrix(inputActivation):
forward = inputActivation * self.nn.weights[i]
else:
forward = np.dot(inputActivation, self.nn.weights[i])
if self.nn.layers[i].dropout is not None:
forward *= (1 - self.nn.layers[i].dropout)
inputActivation = self.nn.layers[
i + 1].activation.activation(forward + self.nn.biases[i])
if self.nn.layers[i + 1].isBinary():
inputActivation = self.nn.layers[
i + 1].activation.binarize(inputActivation)
output_layer = self.nn.layers[-1]
if output_layer.isPartial():
output = multiplyOuterSparseLayer(inputActivation,
self.nn.weights[-1],
self.nn.biases[-1], test.data,
test.indices, test.indptr,
self.modelArgs.num_threads)
else:
output = np.dot(
inputActivation, self.nn.weights[-1]) + self.nn.biases[-1]
if self.nn.layers[-2].hasDropout():
output *= (1 - self.nn.layers[-2].dropout)
output = output_layer.activation.activation(output)
if self.modelArgs.mean > 0.0:
output += self.modelArgs.mean
if output_layer.isPartial():
_max, _min = train.data.max(), train.data.min()
output[output > _max] = _max
output[output < _min] = _min
output = scipy.sparse.csr_matrix((output, test.indices,
test.indptr), shape=test.shape)
return output
def getActivationOfLayer(self, train, layerno):
inputActivation = train
assert((layerno > 0) and (layerno < len(self.nn.layers)))
for i in range(layerno):
if scipy.sparse.isspmatrix(inputActivation):
forward = inputActivation * self.nn.weights[i]
else:
forward = np.dot(inputActivation, self.nn.weights[i])
if self.nn.layers[i].dropout is not None:
forward *= (1 - self.nn.layers[i].dropout)
inputActivation = self.nn.layers[
i + 1].activation.activation(forward + self.nn.biases[i])
return inputActivation
def saveModel(self, path):
print "Saving model to path : ", path
pkl.dump(self, open(path, "wb"))
| 3,899 | 37.613861 | 77 |
py
|
NNRec
|
NNRec-master/nn/autorec/lossDeriv.py
|
import numpy as np
import scipy.sparse
from nn.blocks.cython_activations import *
from cython_matmul import *
from nn.blocks.nn import LayerType
# from sklearn.utils import shuffle
from copy import deepcopy
EPS = 10e-15
def _getLossUpdateDerivative(batch_data, weights, biases,
dWeights, dBiases, NN, modelArg):
batch_shape = batch_data.shape
######################Forward pass######################
fActivation = []
layerInput = batch_data
cost = 0.0
for l, layer in enumerate(NN.layers):
if layer.layerType == LayerType.INPUT:
activation = layerInput
elif layer.layerType == LayerType.HIDDEN:
if scipy.sparse.isspmatrix(layerInput):
x = layerInput * weights[l - 1] + biases[l - 1]
else:
x = np.dot(layerInput, weights[l - 1]) + biases[l - 1]
activation = layer.activation.activation(x)
elif layer.layerType == LayerType.OUTPUT:
if layer.isPartial():
x = multiplyOuterSparseLayer(layerInput, weights[l - 1],
biases[l - 1],
batch_data.data,
batch_data.indices,
batch_data.indptr,
modelArg.num_threads)
activation = layer.activation.activation(x)
activation = scipy.sparse.csr_matrix((activation,
batch_data.indices,
batch_data.indptr),
shape=batch_shape)
else:
x = np.dot(layerInput, weights[l - 1]) + biases[l - 1]
activation = layer.activation.activation(x)
if (layer.dropout is not None) and (layer.dropout != 0):
dropout(activation, layer.dropout)
fActivation.append(activation)
# binarize for the forward propagation
if layer.isBinary():
layerInput = layer.activation.binarize(activation)
else:
layerInput = activation
######################Calculate error######################
# sparse csr matrix
if NN.layers[-1].isPartial():
diff = fActivation[-1].data - batch_data.data
else:
diff = fActivation[-1] - batch_data
sum_of_squares_error = 0.5 * np.sum(np.power(diff, 2))
cost += sum_of_squares_error
######################BackPropagation######################
l = len(NN.layers) - 1
for layer in NN.layers[::-1]:
if layer.layerType == LayerType.OUTPUT:
if layer.isPartial():
delta = np.multiply(
diff, layer.activation.derivative(fActivation[l].data))
delta = scipy.sparse.csr_matrix((delta,
batch_data.indices,
batch_data.indptr),
shape=batch_shape)
else:
delta = np.multiply(
diff, layer.activation.derivative(fActivation[l]))
if (scipy.sparse.isspmatrix(fActivation[l - 1]) or
scipy.sparse.isspmatrix(delta)):
wderiv = fActivation[l - 1].T * delta
else:
wderiv = np.dot(fActivation[l - 1].T, delta)
bderiv = delta.sum(axis=0)
dWeights[l - 1] += wderiv
dBiases[l - 1] += bderiv
if layer.layerType == LayerType.HIDDEN:
if layer.isSparse():
rho_hat = fActivation[l].sum(
axis=0) / fActivation[l].shape[0]
rho = layer.sparsity
KL_divergence = modelArg.beta * np.sum(
rho * np.log(rho / rho_hat) +
(1 - rho) * np.log((1 - rho) / ((1 - rho_hat) + EPS)))
cost += KL_divergence
KL_grad = modelArg.beta * \
(-(rho / rho_hat) +
((1 - rho) / ((1 - rho_hat) + EPS)))
if scipy.sparse.issparse(delta):
if layer.isSparse():
delta = np.multiply(
delta * weights[l].T + KL_grad,
layer.activation.derivative(fActivation[l]))
else:
delta = np.multiply(
delta * weights[l].T,
layer.activation.derivative(fActivation[l]))
else:
if layer.isSparse():
delta = np.multiply(
np.dot(delta, weights[l].T) + KL_grad,
layer.activation.derivative(fActivation[l]))
else:
delta = np.multiply(
np.dot(delta, weights[l].T),
layer.activation.derivative(fActivation[l]))
if (scipy.sparse.isspmatrix(fActivation[l - 1])
or scipy.sparse.isspmatrix(delta)):
wderiv = fActivation[l - 1].T * delta
else:
wderiv = np.dot(fActivation[l - 1].T, delta)
dWeights[l - 1] += wderiv
if layer.isBiasEnabled:
bderiv = delta.sum(axis=0)
dBiases[l - 1] += bderiv
l = l - 1
return cost
def getCostDeriv(theta, user_item_rating, NN,
modelArg, counter, batch_size):
counter.increment()
##################################### Unrolling/ Initialization ##########
weights = []
for i in range(len(NN.weights_limit) - 1):
weight = theta[NN.weights_limit[i]:NN.weights_limit[i + 1]]
weight = weight.reshape(NN.weights[i].shape)
weights.append(weight)
biases = []
offset = NN.weights_limit[-1]
for i in range(len(NN.bias_limit) - 1):
bias = theta[offset + NN.bias_limit[i]:offset +
NN.bias_limit[i + 1]].reshape(NN.layers[i + 1].bias.shape)
biases.append(bias)
dWeights = []
for weight in weights:
dWeights.append(np.zeros(shape=weight.shape))
dBiases = []
for bias in biases:
dBiases.append(np.zeros(shape=bias.shape))
##################################### Batch loop #########################
m, n = user_item_rating.shape
batches = range(0, m, batch_size)
if batches[-1] != m:
batches.append(m)
cost = 0.0
for i in range(len(batches) - 1):
start = batches[i]
end = batches[i + 1]
batch_data = user_item_rating[start:end, :]
loss = _getLossUpdateDerivative(batch_data, weights, biases,
dWeights, dBiases, NN, modelArg)
cost += loss
if not modelArg.regularize_bias:
weight_decay = reduce(
lambda x, y: x + y, map(lambda z:
np.power(
weights[z], 2).sum() *
modelArg.lamda[z],
range(len(weights))))
else:
weight_decay = reduce(
lambda x, y: x + y, map(lambda z:
np.power(
weights[z], 2).sum() *
modelArg.lamda[z],
range(len(weights))))
weight_decay = reduce(
lambda x, y: x + y, map(lambda z:
np.power(
biases[z], 2).sum() *
modelArg.lamda[z],
range(len(biases))))
cost += weight_decay
for i in range(len(dWeights)):
# dWeights[i] += modelArg.lamda * weights[i]
dWeights[i] += 2 * modelArg.lamda[i] * weights[i]
if modelArg.regularize_bias:
for i in range(len(dBiases)):
# dBiases[i] += modelArg.lamda * biases[i]
dBiases[i] += 2 * modelArg.lamda[i] * biases[i]
theta_grad = np.concatenate(map(lambda x: x.flatten(), dWeights + dBiases))
return [cost, theta_grad]
def getCostDerivBatch(theta, user_item_rating, NN,
modelArg, counter, batch_size):
counter.increment()
# user_item_rating = shuffle(user_item_rating)
##################################### Unrolling/ Initialization ##########
weights = []
for i in range(len(NN.weights_limit) - 1):
weight = theta[NN.weights_limit[i]:NN.weights_limit[i + 1]]
weight = weight.reshape(NN.weights[i].shape)
weights.append(weight)
biases = []
offset = NN.weights_limit[-1]
for i in range(len(NN.bias_limit) - 1):
bias = theta[offset + NN.bias_limit[i]:offset +
NN.bias_limit[i + 1]].reshape(NN.layers[i + 1].bias.shape)
biases.append(bias)
dWeights = []
for weight in weights:
dWeights.append(np.zeros(shape=weight.shape))
dBiases = []
for bias in biases:
dBiases.append(np.zeros(shape=bias.shape))
##################################### Batch loop #########################
m, n = user_item_rating.shape
batches = range(0, m, batch_size)
if batches[-1] != m:
batches.append(m)
cost = 0.0
for i in range(len(batches) - 1):
start = batches[i]
end = batches[i + 1]
batch_data = user_item_rating[start:end, :]
loss = _getLossUpdateDerivative(batch_data, weights, biases,
dWeights, dBiases, NN, modelArg)
cost += loss
if not modelArg.regularize_bias:
weight_decay = reduce(
lambda x, y: x + y, map(lambda z:
np.power(
weights[z], 2).sum() *
modelArg.lamda[z],
range(len(weights))))
else:
weight_decay = reduce(
lambda x, y: x + y, map(lambda z:
np.power(
weights[z], 2).sum() *
modelArg.lamda[z],
range(len(weights))))
weight_decay = reduce(
lambda x, y: x + y, map(lambda z:
np.power(
biases[z], 2).sum() *
modelArg.lamda[z],
range(len(biases))))
cost += weight_decay
for i in range(len(dWeights)):
dWeights[i] += 2 * modelArg.lamda[i] * weights[i]
if modelArg.regularize_bias:
for i in range(len(dBiases)):
dBiases[i] += 2 * modelArg.lamda[i] * biases[i]
theta_grad = np.concatenate(
map(lambda x: x.flatten(), dWeights + dBiases))
return [cost, theta_grad]
def updateSGD(user_item_rating, NN, modelArg, counter, batch_size,
alpha, dWeights_old, dBiases_old):
counter.increment()
# user_item_rating = shuffle(user_item_rating)
weights = NN.weights
biases = NN.biases
dWeights = []
for weight in weights:
dWeights.append(np.zeros(shape=weight.shape))
dBiases = []
for bias in biases:
dBiases.append(np.zeros(shape=bias.shape))
##################################### Batch loop #########################
m, n = user_item_rating.shape
batches = range(0, m, batch_size)
if batches[-1] != m:
batches.append(m)
cost = 0.0
for i in range(len(batches) - 1):
start = batches[i]
end = batches[i + 1]
batch_data = user_item_rating[start:end, :]
loss = _getLossUpdateDerivative(batch_data, weights, biases,
dWeights, dBiases, NN, modelArg)
cost += loss
if not modelArg.regularize_bias:
weight_decay = reduce(
lambda x, y: x + y, map(lambda z:
np.power(
weights[z], 2).sum() *
modelArg.lamda[z],
range(len(weights))))
else:
weight_decay = reduce(
lambda x, y: x + y, map(lambda z:
np.power(
weights[z], 2).sum() *
modelArg.lamda[z],
range(len(weights))))
weight_decay = reduce(
lambda x, y: x + y, map(lambda z:
np.power(
biases[z], 2).sum() *
modelArg.lamda[z],
range(len(biases))))
cost += weight_decay
for i in range(len(dWeights)):
# dWeights[i] += modelArg.lamda * weights[i]
dWeights[i] += 2 * modelArg.lamda[i] * weights[i]
if modelArg.regularize_bias:
for i in range(len(dBiases)):
# dBiases[i] += modelArg.lamda * biases[i]
dBiases[i] = dBiases[i].reshape(dBiases_old[i].shape)
dBiases[i] += 2 * modelArg.lamda[i] * biases[i]
for i in range(len(weights)):
temp_wderiv = (
alpha * dWeights[i] + dWeights_old[i] * modelArg.momentum)
weights[i] -= temp_wderiv
dWeights_old[i] = temp_wderiv
for i in range(len(biases)):
temp_bderiv = (
alpha * dBiases[i] + dBiases_old[i] * modelArg.momentum)
biases[i] -= temp_bderiv
dBiases_old[i] = temp_bderiv
return dWeights_old, dBiases_old
def updateAdagrad(user_item_rating, NN, modelArg, counter, batch_size,
alpha, dWeights_old, dBiases_old):
counter.increment()
# user_item_rating = shuffle(user_item_rating)
weights = NN.weights
biases = NN.biases
dWeights = []
for weight in weights:
dWeights.append(np.zeros(shape=weight.shape))
dBiases = []
for bias in biases:
dBiases.append(np.zeros(shape=bias.shape))
##################################### Batch loop #########################
m, n = user_item_rating.shape
batches = range(0, m, batch_size)
if batches[-1] != m:
batches.append(m)
cost = 0.0
for i in range(len(batches) - 1):
start = batches[i]
end = batches[i + 1]
batch_data = user_item_rating[start:end, :]
loss = _getLossUpdateDerivative(batch_data, weights, biases,
dWeights, dBiases, NN, modelArg)
cost += loss
if not modelArg.regularize_bias:
weight_decay = reduce(
lambda x, y: x + y, map(lambda z:
np.power(
weights[z], 2).sum() *
modelArg.lamda[z],
range(len(weights))))
else:
weight_decay = reduce(
lambda x, y: x + y, map(lambda z:
np.power(
weights[z], 2).sum() *
modelArg.lamda[z],
range(len(weights))))
weight_decay = reduce(
lambda x, y: x + y, map(lambda z:
np.power(
biases[z], 2).sum() *
modelArg.lamda[z],
range(len(biases))))
cost += weight_decay
for i in range(len(dWeights)):
# dWeights[i] += modelArg.lamda * weights[i]
dWeights[i] += 2 * modelArg.lamda[i] * weights[i]
if modelArg.regularize_bias:
for i in range(len(dBiases)):
# dBiases[i] += modelArg.lamda * biases[i]
dBiases[i] = dBiases[i].reshape(dBiases_old[i].shape)
dBiases[i] += 2 * modelArg.lamda[i] * biases[i]
if counter.count == 1:
dWeights_old[i] += np.power(dWeights[i], 2)
dBiases_old[i] += np.power(dBiases[i], 2)
continue
for i in range(len(weights)):
temp_wderiv = np.divide(
dWeights[i], np.sqrt(dWeights_old[i] + 1)) * alpha
weights[i] -= temp_wderiv
dWeights_old[i] += np.power(dWeights[i], 2)
for i in range(len(biases)):
temp_bderiv = np.divide(
dBiases[i], np.sqrt(dBiases_old[i]) + 1) * alpha
biases[i] -= temp_bderiv
dBiases_old[i] += np.power(dBiases[i], 2)
return dWeights_old, dBiases_old
def trainSGD(train, test, num_iter, evaluate, weights, biases, learn_rate, modelArg, NN, counter, batch_size, driver=False):
old_rmse = float("inf")
dWeights_old = []
for weight in weights:
dWeights_old.append(np.zeros(shape=weight.shape))
dBiases_old = []
for bias in biases:
dBiases_old.append(np.zeros(shape=bias.shape))
for i in range(num_iter):
# t = shuffle(train)
t = train
dWeights_old, dBiases_old = updateSGD(t, NN, modelArg, counter,
batch_size, learn_rate,
dWeights_old, dBiases_old)
if (i % 5 == 0):
trmse, tmae = evaluate.calculateRMSEandMAE(train, test)
rmse, mae = evaluate.calculateRMSEandMAE(train, train)
sign = "+" if rmse < old_rmse else "-"
print "Fold :%d Test RMSE: %f MAE: %f \t %s" % (i, trmse, tmae, sign)
# print "Fold :%d Train RMSE: %f MAE: %f" % (i, rmse, mae)
if driver:
if rmse < old_rmse:
bestWeights = deepcopy(NN.weights)
bestBiases = deepcopy(NN.biases)
learn_rate *= 1.01
old_rmse = rmse
elif rmse > old_rmse:
NN.weights = bestWeights
NN.biases = bestBiases
print "Reducing learning rate"
learn_rate *= 0.5
if learn_rate < EPS:
break
def trainAdagrad(train, test, num_iter, evaluate, weights, biases, learn_rate, modelArg, NN, counter, batch_size, driver=False):
old_rmse = float("inf")
dWeights_old = []
for weight in weights:
dWeights_old.append(np.zeros(shape=weight.shape))
dBiases_old = []
for bias in biases:
dBiases_old.append(np.zeros(shape=bias.shape))
for i in range(num_iter):
# t = shuffle(train)
t = trian
dWeights_old, dBiases_old = updateAdagrad(t, NN, modelArg, counter,
batch_size, learn_rate,
dWeights_old, dBiases_old)
if (i % 5 == 0):
rmse, mae = evaluate.calculateRMSEandMAE(train, test)
print "Fold :%d Test RMSE: %f MAE: %f" % (i, rmse, mae)
| 19,732 | 38.152778 | 128 |
py
|
NNRec
|
NNRec-master/nn/autorec/__init__.py
| 0 | 0 | 0 |
py
|
|
NNRec
|
NNRec-master/nn/autorec/setup_matmul.py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
ext_module = Extension(
"cython_matmul",
["cython_matmul.pyx"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
include_dirs=[np.get_include()]
)
setup(
name='cython helpers',
cmdclass={'build_ext': build_ext},
ext_modules=[ext_module],
include_dirs=[np.get_include()]
)
| 459 | 22 | 41 |
py
|
NNRec
|
NNRec-master/nn/autorec/modelLoader.py
|
from ae import AE
import numpy as np
from nn.blocks.networkConfigParser import NetworkConfigParser
from dataUtils.data import Data, loadTestData
from utils.metrics.evaluate import EvaluateNN
from ae_utils import Counter, ModelArgs
def loadModel(config_path):
modelArgs = NetworkConfigParser.constructModelArgs(config_path, ModelArgs)
nn = NetworkConfigParser.constructNetwork(config_path)
train_path, test_path, save_path = NetworkConfigParser.getDataInfo(
config_path)
ae = AE(nn, modelArgs)
theta = np.load(save_path + ".npy")
ae.setParameters(theta)
return ae
def loadData(config_path):
train_path, test_path, save_path = NetworkConfigParser.getDataInfo(
config_path)
nn = NetworkConfigParser.constructNetwork(config_path)
d = Data()
d.import_ratings(train_path, shape=(None, nn.layers[0].num_units))
train = d.R.copy()
test = loadTestData(d, test_path)
return train, test
def LoadDataAndMapping(config_path):
train_path, test_path, save_path = NetworkConfigParser.getDataInfo(
config_path)
nn = NetworkConfigParser.constructNetwork(config_path)
d = Data()
d.import_ratings(train_path, shape=(None, nn.layers[0].num_units))
train = d.R.copy()
test = loadTestData(d, test_path)
usermap = {v: k for k, v in d.users.items()}
itemmap = {v: k for k, v in d.items.items()}
return train, test, usermap, itemmap
# def evaluateFolds(config_path, nfolds):
# rmses = []
# maes = []
# for i in range(1, nfolds + 1):
# model = loadModel(config_path)
# train, test = loadData(config_path)
# evaluate = EvaluateNN(model)
# rmse, mae = evaluate.calculateRMSEandMAE(train, test)
# rmses.append(rmse)
# maes.append(mae)
# return rmses, maes
# if __name__ == '__main__':
# import argparse
# from utils.statUtil import getMeanCI
# parser = argparse.ArgumentParser(description='Description')
# parser.add_argument(
# '--config', '-c', help='configuration file', required=True)
# parser.add_argument(
# '--nfold', '-n', help='number of folds ', required=True)
# args = parser.parse_args()
# nfolds = int(args.nfold)
# config_path = args.config
# rmses, maes = evaluateFolds(config_path, nfolds)
# ci_rmse = getMeanCI(rmses, 0.95)
# ci_mae = getMeanCI(maes, 0.95)
# print ci_rmse
# print ci_mae
| 2,433 | 32.342466 | 78 |
py
|
NNRec
|
NNRec-master/dataUtils/data.py
|
import envoy
import progressbar
import scipy.sparse
class Data(object):
def __init__(self):
self.users = {}
self.items = {}
self.nusers = 0
self.nitems = 0
self.include_time = False
def update_user_item(self, user, item):
if user not in self.users:
self.users[user] = self.nusers
self.nusers += 1
if item not in self.items:
self.items[item] = self.nitems
self.nitems += 1
def import_ratings(self, filename, shape=None):
r = envoy.run('wc -l {}'.format(filename))
num_lines = int(r.std_out.strip().partition(' ')[0])
bar = progressbar.ProgressBar(maxval=num_lines, widgets=["Loading ratings: ",
progressbar.Bar(
'=', '[', ']'),
' ', progressbar.Percentage(),
' ', progressbar.ETA()]).start()
I, J, V = [], [], []
with open(filename) as f:
for i, line in enumerate(f):
if (i % 1000) == 0:
bar.update(i % bar.maxval)
userid, itemid, rating = line.split()
self.update_user_item(userid, itemid)
uid = self.users[userid]
iid = self.items[itemid]
I.append(uid)
J.append(iid)
V.append(float(rating))
bar.finish()
if shape is not None:
_shape = (self.nusers if shape[0] is None else shape[0],
self.nitems if shape[1] is None else shape[1])
R = scipy.sparse.coo_matrix(
(V, (I, J)), shape=_shape)
else:
R = scipy.sparse.coo_matrix(
(V, (I, J)), shape=(self.nusers, self.nitems))
self.R = R.tocsr()
def loadTestData(d, testpath):
r = envoy.run('wc -l {}'.format(testpath))
num_lines = int(r.std_out.strip().partition(' ')[0])
bar = progressbar.ProgressBar(maxval=num_lines, widgets=['Loading test ratings: ',
progressbar.Bar(
'=', '[', ']'),
' ', progressbar.Percentage(),
' ', progressbar.ETA()]).start()
users = set(d.users.keys())
items = set(d.items.keys())
cold_start_ratings = []
I, J, V = [], [], []
with open(testpath) as fp:
for i, line in enumerate(fp):
if (i % 1000) == 0:
bar.update(i % bar.maxval)
user, item, rating = map(
lambda x: x.lower(), line.strip().split("\t"))
if user in users and item in items:
I.append(d.users[user])
J.append(d.items[item])
V.append(float(rating))
else:
cold_start_ratings.append(float(rating))
bar.finish()
R = scipy.sparse.coo_matrix(
(V, (I, J)), shape=(len(d.users), len(d.items)))
return R.tocsr(), cold_start_ratings
def loadColdStartTestData(d, testpath):
users = set(d.users.keys())
items = set(d.items.keys())
cold_start_ratings = []
with open(testpath) as fp:
for i, line in enumerate(fp):
user, item, rating = map(
lambda x: x.lower(), line.strip().split("\t"))
if (user not in users) or (item not in items):
cold_start_ratings.append(float(rating))
return cold_start_ratings
def loadTrainTest(train_path, test_path, shape=None):
d = Data()
d.import_ratings(train_path, shape)
test, cold = loadTestData(d, test_path)
train = d.R.copy()
return train, test, cold
| 3,977 | 36.17757 | 97 |
py
|
NNRec
|
NNRec-master/dataUtils/__init__.py
| 0 | 0 | 0 |
py
|
|
NNRec
|
NNRec-master/utils/statUtil.py
|
from scipy import stats
import math
def getConfidenceInterval(data, percent, distribution="t"):
n, min_max, mean, var, skew, kurt = stats.describe(data)
std = math.sqrt(var)
if distribution == "t":
R = stats.t.interval(
percent, len(data) - 1, loc=mean, scale=std / math.sqrt(len(data)))
else:
R = stats.norm.interval(
percent, loc=mean, scale=std / math.sqrt(len(data)))
return mean, R
def getMeanCI(data, percent, distribution="t"):
mean, errors = getConfidenceInterval(data, percent)
return mean, (errors[1] - errors[0]) / 2.0
if __name__ == '__main__':
import numpy as np
s = np.array([3, 4, 4, 4, 5, 5, 5, 5, 4, 4, 4, 6])
print getConfidenceInterval(s, 0.95)
| 749 | 29 | 79 |
py
|
NNRec
|
NNRec-master/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
NNRec
|
NNRec-master/utils/datetimeUtils.py
|
import ciso8601
import datetime
import envoy
from datetime import timedelta
def parseDateTime(datetimestring):
return ciso8601.parse_datetime(datetimestring)
def getDaysSinceX(inputdata, reference=None):
if reference is None:
reference = datetime.datetime.now()
input_time = parseDateTime(inputdata)
return (reference - input_time).total_seconds() / (86400.0)
def testStartDate(path, days=1):
r = envoy.run("tail -1 {}".format(path))
date = r.std_out.partition(" ")[0].strip().split("\t")[-1]
purchased_time = parseDateTime(date)
return purchased_time + timedelta(days=days)
def getepochs(datetimestring):
dt = parseDateTime(datetimestring)
return int(dt.strftime("%s"))
| 727 | 25 | 63 |
py
|
NNRec
|
NNRec-master/utils/metrics/evaluate.py
|
from math import fabs, sqrt
import numpy as np
import scipy.sparse
class Evaluate(object):
"""docstring for Evaluate"""
def __init__(self, predictor):
super(Evaluate, self).__init__()
self.predictor = predictor
def calculateRMSEandMAE(self, test):
pass
class EvaluateConstant(Evaluate):
def __init__(self, predictor):
super(Evaluate, self).__init__()
self.predictor = predictor
def calculateRMSEandMAE(self, test, ):
rmse = 0.0
mae = 0.0
count = 0
for user in test:
ratings = test[user]["ratings"]
for actual in ratings:
predicted = self.predictor.predict(user, 1)
rmse += (actual - predicted) ** 2
mae += fabs(actual - predicted)
count += 1
return [sqrt(rmse / count), mae / count]
class EvaluateNN(Evaluate):
"""docstring for EvaluateRBM"""
def __init__(self, predictor, scale=1.0, default=3.0):
super(EvaluateNN, self).__init__(predictor)
self.scale = scale
self.default = default
def calculateRMSEandMAE(self, train, test, cold=None):
predictions = self.predictor.predict(train, test)
if scipy.sparse.isspmatrix(train):
predictions.data = predictions.data * self.scale
err = np.fabs(predictions.data - test.data * self.scale)
total_instances = len(test.data)
else:
err = np.fabs(predictions - test * self.scale)
total_instances = test.size
cold_err = []
if cold is not None:
cold_err = map(lambda x: np.fabs(x - self.default), cold)
total_instances += len(cold)
cold_err = np.array(cold_err)
rmse = np.sqrt(
(np.power(err, 2).sum() + np.power(cold_err, 2).sum()) / (total_instances))
mae = (err.sum() + cold_err.sum()) / total_instances
return [rmse, mae]
class EvaluateRBM(EvaluateNN):
"""docstring for EvaluateRBM"""
def __init__(self, predictor, scale=1.0, default=3.0):
super(EvaluateRBM, self).__init__(predictor, scale, default)
def calculateRMSEandMAE(self, btrain, btest, test,
cold_ratings=None, default_rating=3.0):
predictions = self.predictor.predict(btrain, btest)
if scipy.sparse.isspmatrix(btrain):
predictions = predictions * self.scale
err = np.fabs(predictions - test.data)
total_instances = len(test.data)
else:
err = np.fabs(predictions - test)
total_instances = test.size
cold_err = []
if cold_ratings:
for rating in cold_ratings:
cold_err.append(np.fabs(rating - default_rating))
total_instances += len(cold_err)
cold_err = np.array(cold_err)
# print(np.power(err, 2).sum() + np.power(cold_err, 2).sum())
rmse = np.sqrt((np.power(err, 2).sum() +
np.power(cold_err, 2).sum()) / total_instances)
mae = (err.sum() + cold_err.sum()) / total_instances
return [rmse, mae]
| 3,157 | 31.556701 | 87 |
py
|
NNRec
|
NNRec-master/utils/metrics/__init__.py
| 0 | 0 | 0 |
py
|
|
Multi2WOZ
|
Multi2WOZ-main/specialization/trainer_self.py
|
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import gc
import inspect
import math
import os
import re
import shutil
import sys
import time
import warnings
from logging import StreamHandler
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from transformers.integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from transformers.file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_tpu_available,
is_training_run_on_sagemaker,
)
from transformers.modeling_utils import PreTrainedModel, unwrap_model
from transformers.optimization import Adafactor, AdamW, get_scheduler
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
EarlyStoppingCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from transformers.training_args import ParallelMode, TrainingArguments
from transformers.utils import logging
from transformers.utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
import fairscale
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if version.parse(fairscale.__version__) >= version.parse("0.3"):
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.wrap import auto_wrap
else:
FullyShardedDDP = None
if is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
if is_training_run_on_sagemaker():
logging.add_handler(StreamHandler(sys.stdout))
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class TrainerSelf:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from transformers.trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
do_save_full_model: bool = True,
do_save_adapters: bool = False,
do_save_adapter_fusion: bool = False,
adapter_names: Optional[List[List[str]]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full fp16 eval - since the model needs to be half'ed first
# 4. Sharded DDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or (args.deepspeed and args.do_train)
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
# adapters used
self.do_save_full_model = do_save_full_model
self.do_save_adapters = do_save_adapters
self.do_save_adapter_fusion = do_save_adapter_fusion
if adapter_names is not None:
self.model.set_active_adapters(adapter_names)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
self._signature_columns = None
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_ddp is not None else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"])
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Build the sampler.
if self.args.group_by_length:
if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
lengths = (
self.train_dataset[self.args.length_column_name]
if self.args.length_column_name in self.train_dataset.column_names
else None
)
else:
lengths = None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, lengths=lengths, model_input_name=model_input_name
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=lengths,
model_input_name=model_input_name,
)
else:
if self.args.world_size <= 1:
if len(self.train_dataset)<200000:
#return RandomSampler(self.train_dataset, replacement=True, num_samples=200000)
return RandomSampler(self.train_dataset)
else:
print("SequentialSampler")
print(len(self.train_dataset))
return SequentialSampler(self.train_dataset)
#print("SequentialSampler")
#return SequentialSampler(self.train_dataset)
elif (
self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
and not self.args.dataloader_drop_last
):
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
)
else:
return DistributedSampler(
self.train_dataset, num_replicas=self.args.world_size, rank=self.args.process_index
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif is_sagemaker_mp_enabled():
return SequentialDistributedSampler(
eval_dataset,
num_replicas=smp.dp_size(),
rank=smp.dp_rank(),
batch_size=self.args.per_device_eval_batch_size,
)
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method (or :obj:`create_optimizer`
and/or :obj:`create_scheduler`) in a subclass.
"""
self.create_optimizer()
self.create_scheduler(num_training_steps)
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
if hasattr(self.model, "config") and hasattr(self.model.config, "adapter_fusion_models"):
no_decay = [f"adapter_fusion_layer.{n}.value" for n in self.model.config.adapter_fusion_models]
decay_parameters = [name for name in decay_parameters if name not in no_decay]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
def create_scheduler(self, num_training_steps: int):
"""
Setup the scheduler. The optimizer of the trainer must have been set up before this method is called.
Args:
num_training_steps (int): The number of training steps to do.
"""
if self.lr_scheduler is None:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset does not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
if self.hp_search_backend == HPSearchBackend.OPTUNA:
params = self.hp_space(trial)
elif self.hp_search_backend == HPSearchBackend.RAY:
params = trial
params.pop("wandb", None)
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
if is_sagemaker_mp_enabled():
# Wrapping the base model twice in a DistributedModel will raise an error.
if isinstance(self.model_wrapped, smp.model.DistributedModel):
return self.model_wrapped
return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
elif is_sagemaker_dp_enabled():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
self.is_in_train = True
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(self.args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({self.args.output_dir})")
adapter_reloaded = False
if resume_from_checkpoint is not None:
if os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
logger.info(f"Loading model from {resume_from_checkpoint}).")
if self.deepspeed:
# will be resumed in init_deepspeed
pass
elif isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(resume_from_checkpoint)
model_reloaded = True
else:
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if os.path.isdir(resume_from_checkpoint):
for file_name in os.listdir(resume_from_checkpoint):
if os.path.isdir(os.path.join(resume_from_checkpoint, file_name)):
if "," in file_name:
self.model.load_adapter_fusion(os.path.join(resume_from_checkpoint, file_name))
adapter_reloaded = True
else:
self.model.load_adapter(os.path.join(os.path.join(resume_from_checkpoint, file_name)))
adapter_reloaded = True
if not (os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)) or adapter_reloaded):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
print("len_train_dataloader", len(train_dataloader))
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = model.module
self.model_wrapped = model
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
else:
world_size = 1
total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator)
if train_dataset_is_sized
else self.args.max_steps * self.args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if step % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if (
((step + 1) % self.args.gradient_accumulation_steps != 0)
and self.args.local_rank != -1
and self.args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# apply adapter fusion weight regularization on the value matrix
if (
hasattr(self.model.config, "adapter_fusion")
and self.model.config.adapter_fusion["regularization"]
):
fusion_reg_loss = self.model.base_model.get_fusion_regularization_loss()
fusion_reg_loss.backward()
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
if not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
if self.do_save_adapters:
logger.info("\n\nTraining completed. Do not forget to share your adapters on https://adapterhub.ml =)\n\n")
else:
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif self.args.local_rank != -1:
dist.barrier()
if self.do_save_full_model:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(model, PreTrainedModel):
self.model = model.from_pretrained(self.state.best_model_checkpoint)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.do_save_adapters:
logger.info(
f"Loading best adapter(s) from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
# attempt to re-load all adapters from checkpoint
for adapter in self.model.config.adapters.adapters:
adapter_dir = os.path.join(self.state.best_model_checkpoint, adapter)
if os.path.exists(adapter_dir):
self.model.load_adapter(adapter_dir)
if self.do_save_adapter_fusion:
logger.info(
f"Loading best adapter fusion(s) from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
# attempt to re-load all adapter fusions from checkpoint
for fusion in self.model.config.adapter_fusion_models:
fusion_dir = os.path.join(self.state.best_model_checkpoint, fusion)
if os.path.exists(fusion_dir):
self.model.load_adapter_fusion(fusion_dir)
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
if self.deepspeed:
# free up any memory that might be useful for eval
self.deepspeed = None
self.optimizer = None
self.lr_scheduler = None
self.model_wrapped = self.model
gc.collect() # force memory release
# to restore normal behavior outside of train replay the place_model_on_device logic w/o deepspeed
self.place_model_on_device = self.args.place_model_on_device
if self.is_model_parallel:
self.place_model_on_device = False
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
# Consolidate the state dict on all processed of dp_rank 0
opt_state_dict = self.optimizer.state_dict()
# Save it and the scheduler on the main process
if self.is_world_process_zero():
torch.save(opt_state_dict, os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if self.deepspeed:
# deepspeed loads optimizer/lr_scheduler together with the model in init_deepspeed
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=map_location)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
return loss_mb.reduce_mean().detach().to(self.args.device)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
elif is_sagemaker_mp_enabled():
return smp.local_rank() == 0
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
elif is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
state_dict = self.model_wrapped.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
state_dict = self.model.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif self.is_world_process_zero():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info(f"Saving model checkpoint to {output_dir}")
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
if self.do_save_adapters:
self.model.save_all_adapters(output_dir)
if self.do_save_adapter_fusion:
self.model.save_all_adapter_fusions(output_dir)
if self.do_save_full_model:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
if self.do_save_adapters:
self.model.save_all_adapters(output_dir)
if self.do_save_adapter_fusion:
self.model.save_all_adapter_fusions(output_dir)
if self.do_save_full_model:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
if self.args.deepspeed and not self.args.do_train:
# no harm, but flagging to the user that deepspeed config is ignored for eval
# flagging only for when --do_train wasn't passed as only then it's redundant
logger.info("Detected the deepspeed argument but it will not be used for evaluation")
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, half it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info(f"***** Running {description} *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Batch size = {batch_size}")
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
| 102,647 | 46.765472 | 146 |
py
|
Multi2WOZ
|
Multi2WOZ-main/specialization/opensubtitles.py
|
import json
import os
import random
import datasets
# _CITATION = """
# """
# _DESCRIPTION = """
# """
class OpensubtitlesConfig(datasets.BuilderConfig):
"""BuilderConfig for Opensubtitles."""
def __init__(
self,
language,
data_dir,
**kwargs,
):
"""BuilderConfig for Opensubtitles.
Args:
language: `string`, which language in use
data_dir: `string`, directory to load the file from
**kwargs: keyword arguments forwarded to super.
"""
super(OpensubtitlesConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.language = language
self.data_dir = data_dir
class Opensubtitles(datasets.GeneratorBasedBuilder):
"""Opensubtitles Dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
OpensubtitlesConfig(
name="cn",
description="chinese",
language="cn",
data_dir="../LangOpenSubtitles/rs-x/rs_dialogue_300K.en-zh.json",
),
OpensubtitlesConfig(
name="de",
description="german",
language="de",
data_dir="../LangOpenSubtitles/rs-x/rs_dialogue_300K.en-de.json",
),
OpensubtitlesConfig(
name="ar",
description="arabic",
language="ar",
data_dir="../LangOpenSubtitles/rs-x/rs_dialogue_300K.en-ar.json",
),
OpensubtitlesConfig(
name="ru",
description="russian",
language="ru",
data_dir="../LangOpenSubtitles/rs-x/rs_dialogue_300K.en-ru.json",
),
OpensubtitlesConfig(
name="cn-cn",
description="chinese-chinese",
language="cn-cn",
data_dir="../LangOpenSubtitles/rs-mono/rs_mono_dialogue_300K.zh-zh.json",
),
OpensubtitlesConfig(
name="de-de",
description="german-german",
language="de-de",
data_dir="../LangOpenSubtitles/rs-mono/rs_mono_dialogue_300K.de-de.json",
),
OpensubtitlesConfig(
name="ar-ar",
description="arabic-arabic",
language="ar-ar",
data_dir="../LangOpenSubtitles/rs-mono/rs_mono_dialogue_300K.ar-ar.json",
),
OpensubtitlesConfig(
name="ru-ru",
description="russian-russian",
language="ru-ru",
data_dir="../LangOpenSubtitles/rs-mono/rs_mono_dialogue_300K.ru-ru.json",
),
]
def _info(self):
return datasets.DatasetInfo(
description="",
features=datasets.Features(
{
"context": datasets.Value("string"),
"response": datasets.Value("string"),
"label": datasets.Value("int8"),
}
),
supervised_keys=None,
homepage="",
citation="",
)
def _split_generators(self, dl_manager):
data_file = dl_manager.download_and_extract(self.config.data_dir)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_file,
},),]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, "r") as f:
json_reader = json.load(f)
for id_, dial in enumerate(json_reader):
context = dial['context']
response = dial['response']
label = dial['label']
yield id_, {"context": context, "response": response, "label": label}
| 3,729 | 30.610169 | 98 |
py
|
Multi2WOZ
|
Multi2WOZ-main/specialization/run_mlm.py
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=masked-lm
"""
# You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments.
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
HfArgumentParser,
MultiLingAdapterArguments,
TrainingArguments,
IntervalStrategy,
set_seed,
)
from transformers.trainer_callback import EarlyStoppingCallback
from trainer_self import TrainerSelf
#from transformers import Trainer
from transformers.adapters.configuration import AdapterConfig
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.5.0")
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pretaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=False,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
patience: Optional[int] = field(
default=2,
metadata={
"help": "Number of epochs for early stopping criteria."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_val_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, MultiLingAdapterArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, adapter_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args, adapter_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
)
datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
config.gradient_checkpointing=True
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForMaskedLM.from_config(config)
tokenizer.add_tokens(['[URL]'])
model.resize_token_embeddings(len(tokenizer))
# Setup adapters
if adapter_args.train_adapter:
task_name = data_args.dataset_name or "mlm"
# check if adapter already exists, otherwise add it
if task_name not in model.config.adapters:
# resolve the adapter config
adapter_config = AdapterConfig.load(
adapter_args.adapter_config,
non_linearity=adapter_args.adapter_non_linearity,
reduction_factor=adapter_args.adapter_reduction_factor,
)
# load a pre-trained from Hub if specified
if adapter_args.load_adapter:
model.load_adapter(
adapter_args.load_adapter,
config=adapter_config,
load_as=task_name,
)
# otherwise, add a fresh adapter
else:
model.add_adapter(task_name, config=adapter_config)
# optionally load a pre-trained language adapter
if adapter_args.load_lang_adapter:
# resolve the language adapter config
lang_adapter_config = AdapterConfig.load(
adapter_args.lang_adapter_config,
non_linearity=adapter_args.lang_adapter_non_linearity,
reduction_factor=adapter_args.lang_adapter_reduction_factor,
)
# load the language adapter from Hub
lang_adapter_name = model.load_adapter(
adapter_args.load_lang_adapter,
config=lang_adapter_config,
load_as=adapter_args.language,
)
else:
lang_adapter_name = None
# Freeze all model weights except of those of this adapter
model.train_adapter([task_name])
# Set the adapters to be used in every forward pass
if lang_adapter_name:
model.set_active_adapters([lang_adapter_name, task_name])
else:
model.set_active_adapters([task_name])
else:
if adapter_args.load_adapter or adapter_args.load_lang_adapter:
raise ValueError(
"Adapters can only be loaded in adapters training mode."
"Use --train_adapter to enable adapter training"
)
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
# When using line_by_line, we just tokenize each nonempty line.
padding = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(examples):
# Remove empty lines
examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()]
return tokenizer(
examples["text"],
padding=padding,
truncation=True,
max_length=max_seq_length,
# We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it
# receives the `special_tokens_mask`.
return_special_tokens_mask=True,
)
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
# efficient when it receives the `special_tokens_mask`.
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
# max_seq_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = tokenized_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = tokenized_datasets["validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
# Data collator
# This one will take care of randomly masking the tokens.
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability)
callbacks = [EarlyStoppingCallback(model_args.patience, 0.0001)]
# Initialize our Trainer
#training_args.evaluation_strategy = "epoch"
trainer = TrainerSelf(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
callbacks = callbacks,
do_save_full_model=not adapter_args.train_adapter,
do_save_adapters=adapter_args.train_adapter,
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
checkpoint=None ########This line is to avoid continue training from our trained model -> can modify for your own needs
print(last_checkpoint)
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
perplexity = math.exp(metrics["eval_loss"])
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 23,874 | 42.567518 | 128 |
py
|
Multi2WOZ
|
Multi2WOZ-main/specialization/run_rs.py
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for Sequence Classificataion (BERT, ALBERT, RoBERTa...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=masked-lm
"""
# You can also adapt this script on your own Sequence Classification task. Pointers for this are left as comments.
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset, load_metric
import numpy as np
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoModelForNextSentencePrediction,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorForLanguageModeling,
default_data_collator,
DataCollatorWithPadding,
HfArgumentParser,
MultiLingAdapterArguments,
TrainingArguments,
IntervalStrategy,
set_seed,
EvalPrediction,
)
from transformers.trainer_callback import EarlyStoppingCallback
from trainer_self import TrainerSelf
from transformers.adapters.configuration import AdapterConfig
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.5.0")
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pretaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=False,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
do_lower_case: bool = field(
default=False,
metadata={
"help": "Whether to do lower case for the tokenizer."
},
)
patience: Optional[int] = field(
default=2,
metadata={
"help": "Number of epochs for early stopping criteria."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
lang: Optional[str] = field(
default=None, metadata={"help": "The language to use: {cn, de, ar, ru}"}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_val_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, MultiLingAdapterArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, adapter_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args, adapter_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
)
datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
)
else:
datasets = load_dataset('./opensubtitles.py', name=data_args.lang, cache_dir="../LangOpenSubtitles/reload")
datasets = datasets.shuffle(seed=42)
datasets = datasets['train'].train_test_split(test_size=0.1) # Now we'll have train/test with column names: context, response, label
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
config.gradient_checkpointing=True
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
"do_lower_case": True if model_args.do_lower_case else False,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForSequenceClassification.from_config(config)
tokenizer.add_tokens(['[URL]'])
model.resize_token_embeddings(len(tokenizer))
# Setup adapters
if adapter_args.train_adapter:
task_name = data_args.dataset_name or "nsp"
# check if adapter already exists, otherwise add it
if task_name not in model.config.adapters:
# resolve the adapter config
adapter_config = AdapterConfig.load(
adapter_args.adapter_config,
non_linearity=adapter_args.adapter_non_linearity,
reduction_factor=adapter_args.adapter_reduction_factor,
)
# load a pre-trained from Hub if specified
if adapter_args.load_adapter:
model.load_adapter(
adapter_args.load_adapter,
config=adapter_config,
load_as=task_name,
)
# otherwise, add a fresh adapter
else:
model.add_adapter(task_name, config=adapter_config)
# optionally load a pre-trained language adapter
if adapter_args.load_lang_adapter:
# resolve the language adapter config
lang_adapter_config = AdapterConfig.load(
adapter_args.lang_adapter_config,
non_linearity=adapter_args.lang_adapter_non_linearity,
reduction_factor=adapter_args.lang_adapter_reduction_factor,
)
# load the language adapter from Hub
lang_adapter_name = model.load_adapter(
adapter_args.load_lang_adapter,
config=lang_adapter_config,
load_as=adapter_args.language,
)
else:
lang_adapter_name = None
# Freeze all model weights except of those of this adapter
model.train_adapter([task_name])
# Set the adapters to be used in every forward pass
if lang_adapter_name:
model.set_active_adapters([lang_adapter_name, task_name])
else:
model.set_active_adapters([task_name])
else:
if adapter_args.load_adapter or adapter_args.load_lang_adapter:
raise ValueError(
"Adapters can only be loaded in adapters training mode."
"Use --train_adapter to enable adapter training"
)
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["test"].column_names
context_column_name = "context"
response_column_name = "response"
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
def tokenize_function(examples):
return tokenizer(examples[context_column_name], examples[response_column_name], max_length=max_seq_length, truncation=True, padding=padding)
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=['context', 'response'],
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = tokenized_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "test" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = tokenized_datasets["test"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
# Data collator
# This one will take care of randomly masking the tokens.
# data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.argmax(preds, axis=1)
acc_metric = load_metric("accuracy")
acc = acc_metric.compute(predictions=preds, references=p.label_ids)
f1_metric = load_metric("f1")
f1 = f1_metric.compute(predictions=preds, references=p.label_ids, average="weighted")
return {"accuracy": acc['accuracy'], "f1": f1['f1']}
#return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
callbacks = [EarlyStoppingCallback(model_args.patience, 0.0001)]
# Initialize our Trainer
#training_args.evaluation_strategy = "epoch"
trainer = TrainerSelf(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
callbacks = callbacks,
do_save_full_model=not adapter_args.train_adapter,
do_save_adapters=adapter_args.train_adapter,
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
checkpoint=None ########This line is to avoid continue training from our trained model -> can modify for your own needs
print(last_checkpoint)
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
perplexity = math.exp(metrics["eval_loss"])
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 22,061 | 41.426923 | 148 |
py
|
Multi2WOZ
|
Multi2WOZ-main/LangCC/langcc_extract.py
|
import os
import lzma
import re
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input_lang_file', type=str, help="input language file path") #"./de.txt.xz
parser.add_argument('--save_file_name', type=str, default = "./cc_de_500K.txt", help = "save file name for language data")
parser.add_argument('--max_line', type=int, default=500000)
return parser.parse_args()
def is_match(regex, text):
pattern = re.compile(regex)
return pattern.search(text, re.IGNORECASE) is not None
def match_num(regex, text):
pattern = re.compile(regex, re.IGNORECASE)
return pattern.findall(text)
#return len(pattern.findall(text))
def store_line(filename):
if os.path.exists(filename):
mode = 'a'
else:
mode = 'w'
return mode
if __name__ == '__main__':
args = parse_args()
count = 0
extract_list = []
num = []
with lzma.open(args.input_lang_file, mode='rt') as file:
for i, line in enumerate(file):
if len(line.split())>30: ##ar, ru, de, en
#if len(line)>30: ## cn
extract_list.append(line)
num.append(len(extract_list))
if len(extract_list)>1000:
mode = store_line(args.save_file_name)
with open(args.save_file_name, mode) as s:
for element in extract_list:
s.write(element)
count +=1000
extract_list = []
if i%100000==0 and i!=0:
print("Load {}".format(i))
if len(num)>args.max_line:
print(i)
break
mode = store_line(args.save_file_name)
if len(extract_list)!=0:
with open(args.save_file_name, mode) as s:
for element in extract_list:
s.write(element)
| 1,868 | 32.981818 | 126 |
py
|
Multi2WOZ
|
Multi2WOZ-main/LangCC/langcc_prep.py
|
import random
import numpy as np
from sklearn.model_selection import train_test_split
import argparse
import re
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--random_seed', type = int, default = 10, help="set random seed")
parser.add_argument('--train_size', type=int, default = 200000, help="size for training set")
parser.add_argument('--test_size', type=int, default = 10000, help="size for testing set")
parser.add_argument('--input_lang_file', type=str, help="input language file path") #"./langcc/cc_de_500K.txt"
parser.add_argument('--save_train_file_name', type=str, default = "./train/train.txt", help = "file name for training data")
parser.add_argument('--save_test_file_name', type=str, default = "./test/test.txt", help = "file name for testing data")
return parser.parse_args()
def remove_puncts(text):
return re.sub(r"\.+", ".", text)
def remove_email(text):
text = re.sub(r"\[…\]", " ", text)
text = re.sub(r"\S*@\S*\s?", "", text)
return re.sub(r"\_+", " ", text)
def remove_quotes(text):
text = text.replace("'", "").replace("…", "").replace(". .", ".")
text = re.sub(r"[\x08|\x06|\x05|\x07|\xad|\u200b|\x96|\x97|█|\u200f|\u200c|\u200e|\u200d|\u061c]+", "", text)
return re.sub('[`"“„»«<>↑~”•●]', " ", text) #ar, ru, de
#return re.sub('[`"“„»«<>↑~”•●]', " ", text) #cn
def remove_url(text):
text = re.sub(r"https?://\S+|www\.\S+|\w*\.[com|org|de|ru|ar|cn]+/*\w*|\w+//\w+:*/*|https?:\w*", " [URL] ", text, flags=re.IGNORECASE)
text = re.sub(r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''', " [URL] ", text, flags=re.IGNORECASE)
text = re.sub(' +', ' ', text)
text = text.replace("[ [URL]", " [URL]").replace("( [URL] )", " [URL]").replace("( [URL]", "[URL]").replace("[URL] )", "[URL]").replace("[URL] -", "[URL]").replace("[URL] [URL]", "[URL]")
text = text.replace("[ [URL]", " [URL]").replace("( [URL] )", " [URL]").replace("[URL] [URL]", "[URL]").replace("( [URL]", "[URL]").replace("[URL] )", "[URL]")
text = re.sub(' +', ' ', text)
text = text.replace("[URL] [URL]", " [URL]")
text = re.sub(r'[/\\]+', " ", text)
return re.sub(' +', ' ', text) #de, ar, ru, en
#return re.sub(' +', '', text) #cn
def save_file(file_name, corpus_list, max_len = 10000):
c = 0
with open(file_name, 'a') as s:
for i, element in enumerate(corpus_list):
element = remove_email(remove_puncts(remove_url(remove_quotes(element))))
element = element.replace('[ url ]', '[URL]')
element = element.strip()
if element!="[URL]" and len(element)>30 and c<max_len:
c+=1
s.write("{}\n".format(element))
if i%10000==0:
print(i)
if __name__ == '__main__':
args = parse_args()
random.seed(args.random_seed)
np.random.seed(args.random_seed)
with open(args.input_lang_file, 'r') as f:
data = f.read().split('\n')
data = data[0:500000]
print("Original data size: {}".format(len(data)))
random.shuffle(data)
train, test = train_test_split(data, test_size=0.10, random_state=args.random_seed)
train = train[0:args.train_size+10000]
test = test[0:args.test_size+10000]
print("Training data size: {}".format(len(train)))
print("Testing data size: {}".format(len(test)))
save_file(args.save_test_file_name, test, max_len=args.test_size)
save_file(args.save_train_file_name, train, max_len=args.train_size)
#cd ./XLM
#from https://github.com/facebookresearch/XLM/tree/cd281d32612d145c6742b4d3f048f80df8669c30
###en, de, ar, ru###
#cat ../langcc/cc_de_test_10K.txt | ./tools/tokenize.sh de | python ./tools/lowercase_and_remove_accent.py > ../langcc/cc_de_test_10K_final.txt
#cat ../langcc/cc_de_train_200K.txt | ./tools/tokenize.sh de | python ./tools/lowercase_and_remove_accent.py > ../langcc/cc_de_train_200K_final.txt
###zh-cn###
#cat ../langcc/cc_cn_test_10K.txt | ./tools/tokenize.sh zh | python ./tools/lowercase_and_remove_accent.py > ../langcc/cc_cn_test_10K_final.txt
#cat ../langcc/cc_cn_train_200K.txt | ./tools/tokenize.sh zh | python ./tools/lowercase_and_remove_accent.py > ../langcc/cc_cn_train_200K_final.txt
| 4,428 | 54.3625 | 245 |
py
|
Multi2WOZ
|
Multi2WOZ-main/LangOpenSubtitles/concat_files.py
|
import json
import argparse
import re, os
import random
import glob
import numpy as np
import linecache
import unicodedata
import pickle
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--file1', type=str, help='First file to process')
parser.add_argument('--file2', type=str, nargs="?")
parser.add_argument('--fileids', type=str, help='File with ids to process')
parser.add_argument('--ofile', type=str)
parser.add_argument('--lang', type=str)
return parser.parse_args()
def load_file(file):
with open(file, 'r') as f:
for index, _ in enumerate(f):
a = index
f.close()
print("Number of lines: {}".format(a))
return a
def load_dial_json(filename):
with open(filename, "r") as f:
dial_json = json.load(f)
return dial_json
def save_dial_json(json_dial, filename):
with open(filename, "w") as f:
json.dump(json_dial, f, indent=2, separators=(",", ": "), sort_keys=False)
def prep_text(text):
characters = ["", "{", "}", "", "", "", "", ""]
if not any(j for j in text if unicodedata.category(j).startswith('C')) and not any(j in text for j in characters):
return True
else:
return False
def get_data(args, data, total_lines):
context = []
response = []
label = []
c = 0
for dial in data:
if prep_text(dial['context']) and prep_text(dial['response']) and prep_text(dial['false_response']):
if dial['curr_lang']==1:
file = args.file2 #tgt file
else:
file = args.file1
context.append(dial['context'])
response.append(dial['response'])
label.append(1)
context.append(dial['context'])
response.append(dial['false_response'])
label.append(0)
# sampel negative samples
negative_sampling = random.randint(1,3)
all_id = dial['context_ids']+[dial['response_id']]+[dial['false_response_id']]
i=0
ids = []
while(i<negative_sampling):
f_resp_id = random.randint(0,total_lines)
if f_resp_id not in all_id:
f_resp = linecache.getline(file, f_resp_id+1).strip()
if prep_text(f_resp) and len(f_resp)>10:
context.append(dial['context'])
response.append(f_resp)
label.append(0)
all_id.append(f_resp_id)
i+=1
c+=1
return context, response, label
def convert_to_json(context, response, label):
dialogues = []
for c, r, l in zip(context, response, label):
dials = {}
dials['context'] = c
dials['response'] = r
dials['label'] = l
dialogues.append(dials)
return dialogues
if __name__ == '__main__':
args = parse_args()
#all_files = glob.glob("./" + args.lang + "/*K.pkl") #for rs-x
all_files = glob.glob("./" + args.lang + "/*mono*K.pkl") #for rs-mono
total_lines = load_file(args.fileids)
dialogues = []
for file in all_files:
dialog = pickle.load(open(file, "rb"))
print(len(dialog), file)
context, response, label = get_data(args, dialog, total_lines)
dialogues += convert_to_json(context, response, label)
print(len(context), len(dialogues))
save_dial_json(dialogues, "./" + args.lang + '/prep/' + args.ofile)
print(len(dialogues))
bs = 400000
for i in range(0, len(dialogues), bs):
pickle.dump(dialogues[i:i+bs], open("./" + args.lang + '/prep/' + args.ofile+str(i)+"K.pkl", "wb"), protocol=2) # converts array to binary and writes to output
print(len(dialogues[i:i+bs]))
| 3,791 | 34.773585 | 167 |
py
|
Multi2WOZ
|
Multi2WOZ-main/LangOpenSubtitles/convert_mlm.py
|
import sys
import random
import argparse
from tqdm import *
import numpy as np
from sklearn.model_selection import train_test_split
import unicodedata
import json
import linecache
import pickle
# bi: 0
# mono: 1
random.seed(1)
np.random.seed(1)
def parse_args():
parser = argparse.ArgumentParser(description='Process to chat-level files')
parser.add_argument('--file1', type=str, help='First file to process')
parser.add_argument('--file2', type=str, help="Second file to process")
parser.add_argument('--fileids', type=str, help='File with ids to process')
parser.add_argument('--fileids_lines', type=str, help='File with unique ids and start and end for each dialogue')
parser.add_argument('--ofile', type=str)
parser.add_argument('--xdm', action='store_true')
parser.add_argument('--xdm_dialogue', action='store_true')
parser.add_argument('--rs_dialogue', action='store_true')
parser.add_argument('--single', action='store_true')
parser.add_argument('--mixed', action='store_true')
parser.add_argument('--tlm', action='store_true')
parser.add_argument('--monodm', action='store_true')
parser.add_argument('--response', action='store_true')
parser.add_argument('--split', action='store_true')
parser.add_argument('--count', type=int, default=200000)
parser.add_argument('--max_length', type=int, default=15)
return parser.parse_args()
def coin_toss():
if random.random() < 0.5:
return 1
return 2
def load_dial_json(filename):
with open(filename, "r") as f:
dial_json = json.load(f)
return dial_json
def get_hard_neg_response(args, all_ids, curr_id, curr_lang, orig_resp, context_ids): # same imbdbid
all_turns = []
start = all_ids[curr_id]['start']
end = all_ids[curr_id]['end']
#print(start, end)
if curr_lang==1:
file = args.file2 #tgt file
else:
file = args.file1
for i in range(start+1, end+2):
all_turns.append(linecache.getline(file, i).strip())
rand_index_false_resp = random.choice(list(range(0,len(all_turns))))
false_resp = all_turns[rand_index_false_resp]
while rand_index_false_resp+start in context_ids or false_resp==orig_resp:
rand_index_false_resp = random.choice(list(range(0,len(all_turns))))
false_resp = all_turns[rand_index_false_resp]
return false_resp, rand_index_false_resp+start
def rs_dialogue():
with open(args.fileids, 'r') as f:
for index, _ in tqdm(enumerate(f)):
a = index
f.close()
index = 0
curr_count = 0
cont_resp = []
while (curr_count != chat_count) and (index<=a-2):
k = random.randint(2, max_length)
curr_lang = coin_toss()
s = ""
ids = []
curr_c = 0
context_ids = []
with open(args.file1, "r") as f1, open(args.file2, "r") as f2, open(args.fileids, "r") as fid:
for index, (line_R1, line_R2, line_id) in tqdm(enumerate(zip(f1, f2, fid))): #specify start
if index>a-2:
break
infos = line_id.split('\t')[0].split('/')
curr_year, curr_imdbid = infos[1], infos[2]
curr_id = line_id.split('\t')[0].split(".")[0]
if curr_c != k: #concat src language
if curr_lang == 1:
s = s + " " + line_R1.strip()
ids.append(curr_imdbid)
context_ids.append(index)
cr = 0 #src
resp = line_R1.strip()
else:
s = s + " " + line_R2.strip()
ids.append(curr_imdbid)
context_ids.append(index)
resp = line_R2.strip()
cr = 1 #tgt
curr_c = curr_c + 1
else: #append response
dials = {}
if curr_lang != 1:
ids.append(curr_imdbid)
cr = 0 #src
resp = line_R1.strip()
dials['ids'] = curr_id
dials['curr_lang'] = curr_lang #context_lang = "tgt_lang", resp_lang = "en"
dials['context'] = s
dials['context_ids'] = context_ids
dials['response'] = line_R1.strip()
dials['response_id'] = index
dials['false_response'], dials['false_response_id'] = get_hard_neg_response(args, all_ids, curr_id, curr_lang, line_R1.strip(), context_ids)
context_ids = []
else:
#s = s + " " + line_R2.strip()
ids.append(curr_imdbid)
resp = line_R2.strip()
cr = 1 #tgt
dials['ids'] = curr_id
dials['curr_lang'] = curr_lang
dials['context'] = s
dials['context_ids'] = context_ids
dials['response'] = line_R2.strip()
dials['response_id'] = index
dials['false_response'], dials['false_response_id'] = get_hard_neg_response(args, all_ids, curr_id, curr_lang, line_R2.strip(), context_ids)
context_ids = []
#if (cr==0 and len(s)+len(resp.split()) < 256 and len(s)+len(dials['false_response'].split()) < 256) or (cr==1 and len(s.split())+len(resp) < 256 and len(s.split())+len(dials['false_response']) < 256): #->for chinese
#if (cr==0 and len(s)+len(resp) < 256 and len(s)+len(dials['false_response']) < 256) or (cr==1 and len(s)+len(resp) < 256 and len(s)+len(dials['false_response']) < 256): #->for mono chinese
if (cr==0 and len(s.split())+len(resp.split()) < 256 and len(s.split())+len(dials['false_response'].split()) < 256) or (cr==1 and len(s.split())+len(resp.split()) < 256 and len(s.split())+len(dials['false_response'].split()) < 256):
if len(set(ids))==1:
#if (cr==1 and len(resp)>10 and len(dials['false_response'])>10) or (cr==0 and len(resp.split())>10 and len(dials['false_response'].split())>10): #-> for chinese
#if (cr==1 and len(resp)>10 and len(dials['false_response'])>10) or (cr==0 and len(resp)>10 and len(dials['false_response'])>10): #-> for mono chinese
if (cr==1 and len(resp.split())>10 and len(dials['false_response'].split())>10) or (cr==0 and len(resp.split())>10 and len(dials['false_response'].split())>10): #-> for others
curr_count = curr_count + 1
cont_resp.append(dials)
ids = []
context_ids = []
if curr_count >= chat_count:
break
else:
#print(ids)
#print(s.strip())
ids = []
context_ids = []
if curr_count%10000==0 and curr_count!=0:
print(curr_count)
curr_c = 0
curr_lang = coin_toss()
s = ""
k = random.randint(2, max_length)
f1.close()
f2.close()
bs = 20000
for i in range(0, len(cont_resp), bs):
pickle.dump(cont_resp[i:i+bs], open(args.ofile+str(i)+"K.pkl", "wb"), protocol=2) # converts array to binary and writes to output
print(len(cont_resp[i:i+bs]))
def xdm_dialogue():
f3 = open(args.ofile, 'w')
curr_count = 0
while curr_count != chat_count:
k = random.randint(2, max_length)
curr_lang = coin_toss()
s = ""
ids = []
curr_c = 0
with open(args.file1, "r") as f1, open(args.file2, "r") as f2, open(args.fileids, "r") as fid:
for index, (line_R1, line_R2, line_id) in tqdm(enumerate(zip(f1, f2, fid))): #specify start
infos = line_id.split('\t')[0].split('/')
curr_year, curr_imdbid = infos[1], infos[2]
if curr_c != k: #concat src language
if curr_lang == 1:
s = s + " " + line_R1.strip()
ids.append(curr_imdbid)
cr = 0 #src
resp = line_R1.strip()
else:
s = s + " " + line_R2.strip()
ids.append(curr_imdbid)
resp = line_R2.strip()
cr = 1 #tgt
curr_c = curr_c + 1
else: #append response
if curr_lang != 1:
s = s + " " + line_R1.strip()
ids.append(curr_imdbid)
cr = 0 #src
resp = line_R1.strip()
else:
s = s + " " + line_R2.strip()
ids.append(curr_imdbid)
resp = line_R2.strip()
cr = 1 #tgt
if len(s.split()) < 256:
if len(set(ids))==1:
#if (cr==1 and len(resp)>10) or (cr==0 and len(resp.split())>10): #-> for chinese
if (cr==1 and len(resp.split())>10) or (cr==0 and len(resp.split())>10):
curr_count = curr_count + 1
#f3.write(" ".join(ids) + '\n')
f3.write(s.strip() + '\n')
#print(set(ids))
#print(s.strip())
ids = []
#print(index)
#index = random.randint(5, 9300000)
if curr_count >= chat_count:
break
else:
print(ids)
print(s.strip())
ids = []
curr_c = 0
curr_lang = coin_toss()
s = ""
k = random.randint(2, max_length)
f1.close()
f2.close()
def tlm_dialogue():
f3 = open(args.ofile, 'w')
curr_count = 0
while curr_count != chat_count:
k = random.randint(2, max_length)
curr_lang = coin_toss()
s = ""
t = ""
ids = []
curr_c = 0
with open(args.file1, "r") as f1, open(args.file2, "r") as f2, open(args.fileids, "r") as fid:
for index, (line_R1, line_R2, line_id) in tqdm(enumerate(zip(f1, f2, fid))): #specify start
infos = line_id.split('\t')[0].split('/')
curr_year, curr_imdbid = infos[1], infos[2]
if curr_c != k:
s = s + " " + line_R1.strip()
t = t + " " + line_R2.strip()
ids.append(curr_imdbid)
curr_c = curr_c + 1
else:
s = s + " " + line_R1.strip()
t = t + " " + line_R2.strip()
ids.append(curr_imdbid)
#if len(s.split())+len(t) < 256: #if tgt==chinese
if len(s.split())+len(t.split()) < 256:
if len(set(ids))==1:
curr_count = curr_count + 1
if curr_lang == 1: # tgt, src
s = t + " " + s
else:
s = s + " " + t
f3.write(s.strip() + '\n')
ids=[]
#print(index)
if curr_count >= chat_count:
break
else:
print(ids)
print(s.strip())
print(t.strip())
ids = []
curr_c = 0
curr_lang = coin_toss()
s = ""
t = ""
k = random.randint(2, max_length)
f1.close()
f2.close()
def parallel_dialogue():
f3 = open(args.ofile, 'w')
curr_count = 0
while curr_count != chat_count:
k = random.randint(2, max_length)
curr_lang = coin_toss()
s = ""
curr_c = 0
with open(args.file1, "r") as f1, open(args.file2, "r") as f2:
for index, (line_R1, line_R2) in tqdm(enumerate(zip(f1, f2))):
if curr_c != k:
if curr_lang == 1:
s = s + " " + line_R1.strip()
else:
s = s + " " + line_R2.strip()
curr_c = curr_c + 1
else:
if curr_lang != 1:
s = s + " " + line_R1.strip()
else:
s = s + " " + line_R2.strip()
if len(s.split()) < 512:
curr_count = curr_count + 1
f3.write(s.strip() + '\n')
if curr_count >= chat_count:
break
curr_c = 0
curr_lang = coin_toss()
s = ""
k = random.randint(2, max_length)
f1.close()
f2.close()
def response_dialogue():
f3 = open(args.ofile, 'w')
curr_count = 0
while curr_count != chat_count:
k = random.randint(2, max_length)
curr_lang = coin_toss()
s = ""
curr_c = 0
with open(args.file1, "r") as f1, open(args.file2, "r") as f2:
for index, (line_R1, line_R2) in tqdm(enumerate(zip(f1, f2))):
if curr_c != k:
if curr_lang == 1:
s = s + " " + line_R1.strip()
else:
s = s + " " + line_R2.strip()
curr_c = curr_c + 1
else:
if curr_lang != 1:
s = s + " <S> " + line_R1.strip()
else:
s = s + " <S> " + line_R2.strip()
if len(s.split()) < 512:
curr_count = curr_count + 1
f3.write(s.strip() + '\n')
if curr_count >= chat_count:
break
curr_c = 0
curr_lang = coin_toss()
s = ""
k = random.randint(2, max_length)
f1.close()
f2.close()
def single_dialogue():
f3 = open(args.ofile, 'w')
curr_count = 0
while curr_count != chat_count:
k = random.randint(2, max_length)
s = ""
curr_c = 0
with open(args.file1, "r") as f1:
for line_R1 in f1:
s = s + " " + line_R1.strip()
curr_c = curr_c + 1
if curr_c == k:
if len(s.split()) < 512:
curr_count = curr_count + 1
f3.write(s.strip() + '\n')
if curr_count >= chat_count:
break
curr_c = 0
s = ""
k = random.randint(2, max_length)
f1.close()
def mixed_dialogue():
f3 = open(args.ofile, 'w')
curr_count = 0
while curr_count != chat_count:
k = random.randint(2, max_length)
s = ""
curr_c = 0
set_flag = False
with open(args.file1, "r") as f1, open(args.file2, "r") as f2:
for index, (line_R1, line_R2) in tqdm(enumerate(zip(f1, f2))):
if not set_flag:
curr_lang = coin_toss()
if curr_c != k:
if curr_lang == 1:
s = s + " " + line_R1.strip()
else:
s = s + " " + line_R2.strip()
curr_c = curr_c + 1
else:
if len(s.split()) < 512:
curr_count = curr_count + 1
f3.write(s.strip() + '\n')
if curr_count >= chat_count:
break
curr_c = 0
s = ""
k = random.randint(2, max_length)
set_flag = coin_toss()
if set_flag == 1:
set_flag = True
curr_lang = coin_toss()
else:
set_flag = False
f1.close()
f2.close()
f3.close()
def bilingual_dialogue():
f3 = open(args.ofile, 'w')
curr_count = 0
while curr_count != chat_count:
k = random.randint(2, max_length)
curr_lang = coin_toss()
s = ""
curr_c = 0
with open(args.file1, "r") as f1, open(args.file2, "r") as f2:
for index, (line_R1, line_R2) in tqdm(enumerate(zip(f1, f2))):
if curr_c != k:
if curr_lang == 1:
s = s + " " + line_R1.strip()
else:
s = s + " " + line_R2.strip()
curr_c = curr_c + 1
else:
if curr_lang == 1:
s = s + " " + line_R1.strip()
else:
s = s + " " + line_R2.strip()
if len(s.split()) < 512:
curr_count = curr_count + 1
f3.write(s.strip() + '\n')
if curr_count >= chat_count:
break
curr_c = 0
curr_lang = coin_toss()
s = ""
k = random.randint(2, max_length)
f1.close()
f2.close()
def split():
with open(args.ofile, 'r') as f:
data = f.read().split('\n')
data = data[0:500000]
print("Original data size: {}".format(len(data)))
random.shuffle(data)
train, test = train_test_split(data, test_size=0.10, random_state=0)
train = train[0:200000+10000]
test = test[0:10000+10000]
print("Training data size: {}".format(len(train)))
print("Testing data size: {}".format(len(test)))
#print(test[-1])
save_file("xdm_mlm_10K."+args.ofile.split('.')[-2]+".txt", test, max_len=10000)
save_file("xdm_mlm_200K."+args.ofile.split('.')[-2]+".txt", train, max_len=200000)
def save_file(file_name, corpus_list, max_len = 10000):
characters = ["", "{", "}", "", "", "", "", ""]
c = 0
with open(file_name, 'w') as s:
for i, element in enumerate(corpus_list):
if not any(j for j in element if unicodedata.category(j).startswith('C')) and not any(j in element for j in characters):
element = element.strip()
c+=1
s.write("{}\n".format(element))
else:
print("False: {}".format(element))
if i%10000==0:
print(i)
if c>max_len-1:
break
if __name__ == '__main__':
args = parse_args()
chat_count = args.count
max_length = args.max_length
if args.xdm:
parallel_dialogue()
if args.mixed:
mixed_dialogue()
if args.single:
single_dialogue()
if args.tlm:
tlm_dialogue()
if args.monodm:
bilingual_dialogue()
if args.response:
response_dialogue()
if args.xdm_dialogue:
xdm_dialogue()
if args.rs_dialogue:
all_ids = load_dial_json(args.fileids_lines)
rs_dialogue()
if args.split:
split()
| 20,237 | 38.527344 | 252 |
py
|
Multi2WOZ
|
Multi2WOZ-main/LangOpenSubtitles/extract_imdb_ids.py
|
import json
import argparse
import linecache
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--src_file', type = str, help="src language file")
parser.add_argument('--tgt_file', type = str, help="tgt language file")
parser.add_argument('--ids', type = str, help="ids file")
parser.add_argument('--save_file_name', type=str, help = "save_file_name as json")
return parser.parse_args()
def save_file(dict_to_save, filepath):
with open(filepath, 'w') as file:
json.dump(dict_to_save, file)
if __name__ == '__main__':
args = parse_args()
with open(args.ids, "r") as f3:
k=0
a = {}
for index, line_ids in enumerate(f3):
imdbid = line_ids.split('\t')[0].split(".")[0]
curr_c = index
prev_c = index-1
if imdbid not in a.keys(): #new ids
a[imdbid] = {}
a[imdbid]['start']=index
if index!=0:
prev_imdbid = linecache.getline(args.ids, index-1).split('\t')[0].split(".")[0]
a[prev_imdbid]['end']=index-1
if index%2000000==0 and index!=0:
print("Process index {}...".format(index))
a[imdbid]['end']=index
f3.close()
print("Total utterances: {}".format(str(index)))
print("Total imdbs: {}".format(str(sum([len(v) for k, v in a.items()]))))
save_file(a, args.save_file_name)
| 1,452 | 34.439024 | 99 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/main_cl.py
|
from tqdm import tqdm
import torch.nn as nn
import ast
import glob
import numpy as np
import copy
# utils
from utils.config import *
from utils.utils_general import *
from utils.utils_multiwoz_cl import *
from utils.utils_oos_intent import *
from utils.utils_universal_act import *
# models
from models.BERT_DST_Picklist import *
from models.dual_encoder_ranking import *
# Huggingface models
from transformers import *
import logging
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
## model selection
MODELS = {"bert": (BertModel, BertTokenizer, BertConfig),
"todbert": (BertModel, BertTokenizer, BertConfig),
"gpt2": (GPT2Model, GPT2Tokenizer, GPT2Config),
"todgpt2": (GPT2Model, GPT2Tokenizer, GPT2Config),
"dialogpt": (AutoModelWithLMHead, AutoTokenizer, GPT2Config),
"albert": (AlbertModel, AlbertTokenizer, AlbertConfig),
"roberta": (RobertaModel, RobertaTokenizer, RobertaConfig),
"distilbert": (DistilBertModel, DistilBertTokenizer, DistilBertConfig),
"electra": (ElectraModel, ElectraTokenizer, ElectraConfig),
"xlmroberta": (XLMRobertaModel, XLMRobertaTokenizer, XLMRobertaConfig)}
## Fix torch random seed
if args["fix_rand_seed"]:
torch.manual_seed(args["rand_seed"])
#logging.info("Running Tgt Language: {}".format(args["tgt_lang"]))
## Reading data and create data loaders
datasets = {}
for ds_name in ast.literal_eval(args["dataset"]):
data_trn, data_dev, data_tst, data_meta = globals()["prepare_data_{}".format(ds_name)](args)
datasets[ds_name] = {"train": data_trn, "dev":data_dev, "test": data_tst, "meta":data_meta}
unified_meta = get_unified_meta(datasets)
if "resp_cand_trn" not in unified_meta.keys(): unified_meta["resp_cand_trn"] = {}
args["unified_meta"] = unified_meta
## Create vocab and model class
args["model_type"] = args["model_type"].lower()
model_class, tokenizer_class, config_class = MODELS[args["model_type"]]
tokenizer = tokenizer_class.from_pretrained(args["model_name_or_path"], cache_dir=args["cache_dir"])
args["model_class"] = model_class
args["tokenizer"] = tokenizer
if args["model_name_or_path"]:
config = config_class.from_pretrained(args["model_name_or_path"], cache_dir=args["cache_dir"])
else:
config = config_class()
args["config"] = config
args["num_labels"] = unified_meta["num_labels"]
## Training and Testing Loop
if args["do_train"]:
result_runs = []
output_dir_origin = str(args["output_dir"])
## Setup logger
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=os.path.join(args["output_dir"], "train.log"),
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
## training loop
for run in range(args["nb_runs"]):
## Setup random seed and output dir
rand_seed = SEEDS[run]
if args["fix_rand_seed"]:
torch.manual_seed(rand_seed)
args["rand_seed"] = rand_seed
args["output_dir"] = os.path.join(output_dir_origin, "run{}".format(run))
os.makedirs(args["output_dir"], exist_ok=False)
logging.info("Running Random Seed: {}".format(rand_seed))
## Loading model
model = globals()[args['my_model']](args)
if torch.cuda.is_available(): model = model.cuda()
## Create Dataloader
trn_loader = get_loader(args, "train", tokenizer, datasets, unified_meta)
dev_loader = get_loader(args, "dev" , tokenizer, datasets, unified_meta, shuffle=args["task_name"]=="rs")
tst_loader = get_loader(args, "test" , tokenizer, datasets, unified_meta, shuffle=args["task_name"]=="rs")
## Create TF Writer
tb_writer = SummaryWriter(comment=args["output_dir"].replace("/", "-"))
# Start training process with early stopping
loss_best, acc_best, cnt, train_step = 1e10, -1, 0, 0
try:
for epoch in range(args["epoch"]):
logging.info("Epoch:{}".format(epoch+1))
train_loss = 0
pbar = tqdm(trn_loader)
for i, d in enumerate(pbar):
model.train()
outputs = model(d)
train_loss += outputs["loss"]
train_step += 1
pbar.set_description("Training Loss: {:.4f}".format(train_loss/(i+1)))
## Dev Evaluation
if (train_step % args["eval_by_step"] == 0 and args["eval_by_step"] != -1) or \
(i == len(pbar)-1 and args["eval_by_step"] == -1):
model.eval()
dev_loss = 0
preds, labels = [], []
ppbar = tqdm(dev_loader)
for d in ppbar:
with torch.no_grad():
outputs = model(d)
#print(outputs)
dev_loss += outputs["loss"]
preds += [item for item in outputs["pred"]]
labels += [item for item in outputs["label"]]
dev_loss = dev_loss / len(dev_loader)
results = model.evaluation(preds, labels)
dev_acc = results[args["earlystop"]] if args["earlystop"] != "loss" else dev_loss
## write to tensorboard
tb_writer.add_scalar("train_loss", train_loss/(i+1), train_step)
tb_writer.add_scalar("eval_loss", dev_loss, train_step)
tb_writer.add_scalar("eval_{}".format(args["earlystop"]), dev_acc, train_step)
if (dev_loss < loss_best and args["earlystop"] == "loss") or \
(dev_acc > acc_best and args["earlystop"] != "loss"):
loss_best = dev_loss
acc_best = dev_acc
cnt = 0 # reset
if args["not_save_model"]:
model_clone = globals()[args['my_model']](args)
model_clone.load_state_dict(copy.deepcopy(model.state_dict()))
else:
output_model_file = os.path.join(args["output_dir"], "pytorch_model.bin")
if args["n_gpu"] == 1:
torch.save(model.state_dict(), output_model_file)
else:
torch.save(model.module.state_dict(), output_model_file)
logging.info("[Info] Model saved at epoch {} step {}".format(epoch, train_step))
else:
cnt += 1
logging.info("[Info] Early stop count: {}/{}...".format(cnt, args["patience"]))
if cnt > args["patience"]:
logging.info("Ran out of patient, early stop...")
break
logging.info("Trn loss {:.4f}, Dev loss {:.4f}, Dev {} {:.4f}".format(train_loss/(i+1),
dev_loss,
args["earlystop"],
dev_acc))
if cnt > args["patience"]:
tb_writer.close()
break
except KeyboardInterrupt:
logging.info("[Warning] Earlystop by KeyboardInterrupt")
## Load the best model
if args["not_save_model"]:
model.load_state_dict(copy.deepcopy(model_clone.state_dict()))
else:
# Start evaluating on the test set
if torch.cuda.is_available():
model.load_state_dict(torch.load(output_model_file))
else:
model.load_state_dict(torch.load(output_model_file, lambda storage, loc: storage))
## Run test set evaluation
pbar = tqdm(tst_loader)
for nb_eval in range(args["nb_evals"]):
test_loss = 0
preds, labels = [], []
for d in pbar:
with torch.no_grad():
outputs = model(d)
test_loss += outputs["loss"]
preds += [item for item in outputs["pred"]]
labels += [item for item in outputs["label"]]
test_loss = test_loss / len(tst_loader)
results = model.evaluation(preds, labels)
result_runs.append(results)
logging.info("[{}] Test Results: ".format(nb_eval) + str(results))
## Average results over runs
if args["nb_runs"] > 1:
f_out = open(os.path.join(output_dir_origin, "eval_results_multi-runs.txt"), "w")
f_out.write("Average over {} runs and {} evals \n".format(args["nb_runs"], args["nb_evals"]))
for key in results.keys():
mean = np.mean([r[key] for r in result_runs])
std = np.std([r[key] for r in result_runs])
f_out.write("{}: mean {} std {} \n".format(key, mean, std))
f_out.close()
else:
## Load Model
print("[Info] Loading model from {}".format(args['my_model']))
model = globals()[args['my_model']](args)
if args["load_path"]:
print("MODEL {} LOADED".format(args["load_path"]))
if torch.cuda.is_available():
model.load_state_dict(torch.load(args["load_path"]))
else:
model.load_state_dict(torch.load(args["load_path"], lambda storage, loc: storage))
else:
print("[WARNING] No trained model is loaded...")
if torch.cuda.is_available():
model = model.cuda()
print("[Info] Start Evaluation on dev and test set...")
dev_loader = get_loader(args, "dev" , tokenizer, datasets, unified_meta)
tst_loader = get_loader(args, "test" , tokenizer, datasets, unified_meta, shuffle=args["task_name"]=="rs")
model.eval()
for d_eval in ["tst"]: #["dev", "tst"]:
f_w = open(os.path.join(args["output_dir"], "{}_results.txt".format(d_eval)), "w")
## Start evaluating on the test set
test_loss = 0
preds, labels = [], []
pbar = tqdm(locals()["{}_loader".format(d_eval)])
for d in pbar:
with torch.no_grad():
outputs = model(d)
test_loss += outputs["loss"]
preds += [item for item in outputs["pred"]]
labels += [item for item in outputs["label"]]
test_loss = test_loss / len(tst_loader)
results = model.evaluation(preds, labels)
print("{} Results: {}".format(d_eval, str(results)))
f_w.write(str(results))
f_w.close()
| 11,553 | 42.931559 | 114 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/tod_xlmr_pretraining.py
|
import argparse
import glob
import logging
import os
import pickle
import random
import re
import shutil
from typing import Tuple
import gzip
import shelve
import json
import math
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from concurrent.futures import ThreadPoolExecutor
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Sampler
from utils.utils_general import *
from utils.utils_multiwoz import *
from utils.utils_camrest676 import *
from utils.utils_woz import *
from utils.utils_smd import *
from utils.utils_frames import *
from utils.utils_msre2e import *
from utils.utils_taskmaster import *
from utils.utils_metalwoz import *
from utils.utils_schema import *
import gc
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertModel,
BertForMaskedLM,
BertTokenizer,
CamembertConfig,
CamembertForMaskedLM,
CamembertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPT2Config,
GPT2LMHeadModel,
GPT2Tokenizer,
OpenAIGPTConfig,
OpenAIGPTLMHeadModel,
OpenAIGPTTokenizer,
PreTrainedTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
XLMRobertaConfig,
XLMRobertaForMaskedLM,
XLMRobertaTokenizer,
get_linear_schedule_with_warmup,
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
"openai-gpt": (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"bert-seq": (BertConfig, BertModel, BertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"camembert": (CamembertConfig, CamembertForMaskedLM, CamembertTokenizer),
"xlmr": (XLMRobertaConfig, XLMRobertaForMaskedLM, XLMRobertaTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _rotate_checkpoints(args, checkpoint_prefix, use_mtime=False):
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
if len(glob_checkpoints) <= args.save_total_limit:
return
ordering_and_checkpoint_path = []
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def mask_tokens(inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, args) -> Tuple[torch.Tensor, torch.Tensor]:
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
inputs = inputs.to("cpu")
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, args.mlm_probability)
special_tokens_mask = [
tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
# padding position value = 0
inputs_pad_pos = (inputs == 0).cpu()
probability_matrix.masked_fill_(inputs_pad_pos, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
try:
labels[~masked_indices] = -100 # We only compute loss on masked tokens
except:
masked_indices = masked_indices.byte()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
try:
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
except:
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool().byte() & masked_indices
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
try:
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
except:
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool().byte() & masked_indices & ~indices_replaced
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
if inputs.is_cuda:
indices_random = indices_random.to(args.device)
random_words = random_words.to(args.device)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def mask_for_response_selection(batch, tokenizer, args, cand_uttr_sys_dict, others, set_max_resp_len=150):
""" Prepare (context,response) pairs for response contrastive learning (RCL). """
inputs = batch["context"]
inputs = inputs.to("cpu")
batch_size = inputs.size(0)
probability_matrix = torch.full(inputs.shape, 1.0)
usr_token_idx = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(args.usr_token))[0]
sys_token_idx = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(args.sys_token))[0]
cand_uttr_sys = list(cand_uttr_sys_dict.keys())
cand_uttr_sys_tokens = list(cand_uttr_sys_dict.values())
## Find positions of user and system tokens and split them into (context, repsonse) pairs
last_sys_position, last_usr_position = [], []
for bsz_i, batch_sample in enumerate(inputs):
nb_sys_token = len((batch_sample == sys_token_idx).nonzero())
nb_usr_token = len((batch_sample == usr_token_idx).nonzero())
## Randomly select a turn to split
if nb_sys_token == 0 or nb_usr_token == 0:
last_sys_position.append(len(batch_sample)//2)
last_usr_position.append(len(batch_sample))
else:
if nb_sys_token > 2 and nb_usr_token > 2:
rand_pos = random.randint(1, min(nb_sys_token, nb_usr_token)-1)
else:
rand_pos = -1
temp1 = (batch_sample == sys_token_idx).nonzero()[rand_pos][0].item()
last_sys_position.append(temp1)
temp2 = (batch_sample == usr_token_idx).nonzero()[rand_pos][0].item()
if temp2 > temp1:
last_usr_position.append(temp2)
else:
if temp1 + 10 < len(batch_sample):
last_usr_position.append(temp1 + 10)
else:
last_usr_position.append(len(batch_sample))
last_usr_position = np.array(last_usr_position)
last_sys_position = np.array(last_sys_position)
max_last_sys_position = max(last_sys_position)
max_response_len = max(last_usr_position-last_sys_position) + 1
max_response_len = max_response_len if max_response_len < set_max_resp_len else set_max_resp_len
## placeholders
input_contexts = torch.zeros(batch_size, max_last_sys_position).long() #.to(args.device)
input_responses = torch.zeros(batch_size, max_response_len).long() #.to(args.device)
output_labels = torch.tensor(np.arange(batch_size)).long() #.to(args.device)
## assign response indexs by start and end position
responses = []
for bsz_i, (sys_pos, usr_pos) in enumerate(zip(last_sys_position, last_usr_position)):
input_contexts[bsz_i, :sys_pos] = inputs[bsz_i, :sys_pos]
input_responses[bsz_i, 0] = inputs[bsz_i, 0] ## CLS token
responses.append(tokenizer.decode(inputs[bsz_i, sys_pos+1:usr_pos]).replace(" ", ""))
s, e = (sys_pos, usr_pos) if usr_pos-sys_pos < max_response_len else (sys_pos, sys_pos+max_response_len-1)
input_responses[bsz_i, 1:e-s+1] = inputs[bsz_i, s:e]
## Add additional negative samples. Either randomly select from candidate pool or choose by Kmeans.
candidates_tokens = []
if args.negative_sampling_by_kmeans:
for ri, resp in enumerate(responses):
if resp in others["ToD_BERT_SYS_UTTR_KMEANS"].keys():
cur_cluster = others["ToD_BERT_SYS_UTTR_KMEANS"][resp]
candidates = others["KMEANS_to_SENTS"][cur_cluster]
nb_selected = min(args.nb_addition_negresponse_per_sample, len(candidates)-1)
start_pos = random.randint(0, len(candidates)-nb_selected-1)
sampled_neg_resps = candidates[start_pos:start_pos+nb_selected]
candidates_tokens += [cand_uttr_sys_dict[r] for r in sampled_neg_resps]
else:
start_pos = random.randint(0, len(cand_uttr_sys)-args.nb_addition_negresponse_per_sample-1)
candidates_tokens += cand_uttr_sys_tokens[start_pos:start_pos+args.nb_addition_negresponse_per_sample]
else:
for i in range(args.nb_addition_negresponse_per_sample * batch_size):
pos = random.randint(0, len(cand_uttr_sys_tokens)-1)
candidates_tokens.append(cand_uttr_sys_tokens[pos])
## Padding
input_responses_neg = torch.zeros(len(candidates_tokens), max_response_len).long()
for i in range(len(candidates_tokens)):
if len(candidates_tokens[i]) > max_response_len:
input_responses_neg[i] = candidates_tokens[i][:max_response_len]
else:
input_responses_neg[i, :len(candidates_tokens[i])] = candidates_tokens[i]
## Add those negative responses to response selection pool
input_responses = torch.cat([input_responses, input_responses_neg], 0)
return input_contexts, input_responses, output_labels
def get_candidate_embeddings(uttr_sys_dict, tokenizer, model):
"""
obtain candidate representations by passing through model encoding,
return a dictionary that maps sentences to embeddings
"""
print("Start obtaining representations from model...")
ToD_BERT_SYS_UTTR_EMB = {}
uttr_sys = list(uttr_sys_dict.keys())
uttr_sys_tokens = list(uttr_sys_dict.values())
batch_size = 100
for start in tqdm(range(0, len(uttr_sys), batch_size)):
if start+batch_size > len(uttr_sys):
inputs = uttr_sys[start:]
inputs_ids = uttr_sys_tokens[start:]
else:
inputs = uttr_sys[start:start+batch_size]
inputs_ids = uttr_sys_tokens[start:start+batch_size]
inputs_ids = pad_sequence(inputs_ids, batch_first=True, padding_value=0)
if torch.cuda.is_available(): inputs_ids = inputs_ids.cuda()
with torch.no_grad():
outputs = model.roberta(input_ids=inputs_ids, attention_mask=inputs_ids>0)
sequence_output = outputs[0]
cls_rep = sequence_output[:, 0, :]
for i in range(cls_rep.size(0)):
ToD_BERT_SYS_UTTR_EMB[inputs[i].replace(" ", "")] = {
"sent":inputs[i],
"emb":cls_rep[i, :].cpu().numpy()}
return ToD_BERT_SYS_UTTR_EMB
def get_candidate_kmeans(args, uttr_sys_dict, tokenizer, model):
"""obtain kmeans clustering results"""
import faiss
print("Start computing kmeans with faiss...")
ToD_BERT_SYS_UTTR_EMB = get_candidate_embeddings(uttr_sys_dict, tokenizer, model)
#print('get_candidate_kmeans', ToD_BERT_SYS_UTTR_EMB)
ToD_BERT_SYS_UTTR_KMEANS = {}
KMEANS_to_SENTS = {i:[] for i in range(args.nb_kmeans)}
data = [v["emb"] for v in ToD_BERT_SYS_UTTR_EMB.values()]
data = np.array(data)
kmeans_1k = faiss.Kmeans(data.shape[1], args.nb_kmeans, niter=20, nredo=5, verbose=True)
kmeans_1k.train(data)
D, I = kmeans_1k.index.search(data, 1)
for i, key in enumerate(ToD_BERT_SYS_UTTR_EMB.keys()):
ToD_BERT_SYS_UTTR_KMEANS[key] = I[i][0]
KMEANS_to_SENTS[I[i][0]].append(ToD_BERT_SYS_UTTR_EMB[key]["sent"])
return ToD_BERT_SYS_UTTR_KMEANS, KMEANS_to_SENTS
def train(args, trn_loader, dev_loader, model, tokenizer, cand_uttr_sys_dict, others):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter("runs/"+args.output_dir.replace("/","-"))
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(trn_loader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(trn_loader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
scaler = torch.cuda.amp.GradScaler()
print('n_gpu', args.n_gpu)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
#os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
print('multi-gpu training:', args.n_gpu)
model = torch.nn.DataParallel(model) #, device_ids=[0,1]
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
# print('BATCH', args.train_batch_size
# * args.gradient_accumulation_steps
# * (torch.distributed.get_world_size()))
logger.info("***** Running training *****")
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Num batches = %d", len(trn_loader))
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
tr_loss, logging_loss = 0.0, 0.0
loss_mlm, loss_rs = 0, 0
patience, best_loss = 0, 1e10
xeloss = torch.nn.CrossEntropyLoss()
model = model.module if hasattr(model, "module") else model
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproducibility
for _ in train_iterator:
## Calculate kmeans results in the beginning of each epoch
if args.negative_sampling_by_kmeans:
ToD_BERT_SYS_UTTR_KMEANS, KMEANS_to_SENTS = get_candidate_kmeans(args, cand_uttr_sys_dict, tokenizer, model)
trn_loader = get_loader(vars(args), "train", tokenizer, others["datasets"], others["unified_meta"], "train")
loss_arr, loss_mlm_arr, loss_rs_arr = [], [], []
epoch_iterator = tqdm(trn_loader, disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
## add response selection into pretraining
if args.add_rs_loss:
kmeans_others = {"ToD_BERT_SYS_UTTR_KMEANS":ToD_BERT_SYS_UTTR_KMEANS,
"KMEANS_to_SENTS":KMEANS_to_SENTS} if args.negative_sampling_by_kmeans else {}
## Split dialogue into (context, response) pairs
input_cont, input_resp, resp_label = mask_for_response_selection(batch,
tokenizer,
args,
cand_uttr_sys_dict,
kmeans_others)
## Mask context part for MLM loss
input_cont, labels = mask_tokens(input_cont, tokenizer, args) if args.mlm else (input_cont, input_cont)
## Allocate tensors to (gpu) devices
input_cont = input_cont.to(args.device)
input_resp = input_resp.to(args.device)
resp_label = resp_label.to(args.device)
labels = labels.to(args.device)
## Encode the context part with BERT
with torch.cuda.amp.autocast(enabled=True):
outputs = model.roberta(
input_cont,
attention_mask=input_cont>0,
)
sequence_output = outputs[0]
hid_cont = sequence_output[:, 0, :] ## CLS token
## Calculate MLM loss for the context
prediction_scores = model.lm_head(sequence_output)
loss = xeloss(prediction_scores.view(-1, model.config.vocab_size), labels.view(-1))
loss_mlm = loss.item()
del input_cont, labels, sequence_output, prediction_scores, outputs
## Encode the response part with BERT
outputs = model.roberta(
input_resp,
attention_mask=input_resp>0,
)
sequence_output = outputs[0]
hid_resp = sequence_output[:, 0, :]
## Calculate RCL loss
scores = torch.matmul(hid_cont, hid_resp.transpose(1, 0))
loss_rs = xeloss(scores, resp_label)
loss += loss_rs
loss_rs = loss_rs.item()
#print('loss_rs:', loss_rs)
del input_resp, resp_label, scores, sequence_output, hid_resp, outputs
## with only MLM loss
else:
inputs = batch["context"].clone()
if args.mlm:
inputs, labels = mask_tokens(inputs, tokenizer, args)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
outputs = model(inputs,
labels=labels, #masked_lm_labels
attention_mask=inputs>0)
else:
labels = inputs.clone()
masked_indices = (labels == 0)
labels[masked_indices] = -100
outputs = model(inputs, labels=labels)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
loss_mlm = loss.item()
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss_arr.append(loss.item())
loss_mlm_arr.append(loss_mlm)
loss_rs_arr.append(loss_rs)
#break
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
## Print loss
epoch_iterator.set_description("Loss:{:.4f} MLM:{:.4f} RS:{:.4f}".format(np.mean(loss_arr),
np.mean(loss_mlm_arr),
np.mean(loss_rs_arr)))
#break
tr_loss += loss.item()
del loss
if (step + 1) % args.gradient_accumulation_steps == 0:
print(step, args.gradient_accumulation_steps)
if args.fp16:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scaler.step(optimizer)
scaler.update()
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.evaluate_during_training and args.n_gpu == 1:
results = evaluate(args, model, dev_loader, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
else:
results = {}
results["loss"] = best_loss - 0.1 # always saving
if results["loss"] < best_loss:
patience = 0
best_loss = results["loss"]
checkpoint_prefix = "checkpoint"
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
_rotate_checkpoints(args, checkpoint_prefix)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
else:
patience += 1
logger.info("Current patience: patience {}".format(patience))
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if patience > args.patience:
logger.info("Ran out of patience...")
break
if (args.max_steps > 0 and global_step > args.max_steps) or patience > args.patience:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
#torch.cuda.empty_cache()
return global_step, tr_loss / global_step
def evaluate(args, model, dev_loader, tokenizer, prefix=""):
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
eval_dataloader = dev_loader
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataloader))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
inputs = batch["context"].clone()
#inputs, labels = mask_tokens(inputs, tokenizer, args) if args.mlm else (inputs, inputs)
if args.mlm:
inputs, labels = mask_tokens(inputs, tokenizer, args)
else:
labels = inputs.clone()
masked_indices = (labels == 0)
labels[masked_indices] = -100
inputs = inputs.to(args.device)
labels = labels.to(args.device)
with torch.no_grad():
outputs = model(inputs,
labels=labels, #masked_lm_labels
attention_mask=inputs>0) if args.mlm else model(inputs, labels=labels)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {"perplexity": perplexity, "loss":eval_loss}
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--model_type", default="bert", type=str, help="The model architecture to be fine-tuned.")
parser.add_argument(
"--model_name_or_path",
default="bert-base-uncased",
type=str,
help="The model checkpoint for weights initialization.",
)
parser.add_argument(
"--mlm", action="store_true", help="Train with masked-language modeling loss instead of language modeling."
)
parser.add_argument(
"--mlm_probability", type=float, default=0.15, help="Ratio of tokens to mask for masked language modeling loss"
)
parser.add_argument(
"--config_name",
default="",
type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",
)
parser.add_argument(
"--block_size",
default=-1,
type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--per_gpu_train_batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=4, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=300, type=int, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=100, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=100, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--save_total_limit",
type=int,
default=1,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
## My add
parser.add_argument(
'-dpath','--data_path',
help='path to dataset folder',
required=False,
default='/export/home/dialog_datasets',
type=str)
parser.add_argument(
'-ds','--dataset',
help='which dataset to be used for training.',
required=False,
default='["multiwoz", "camrest676", "woz", "smd", "frames", "msre2e", "taskmaster", "metalwoz", "schema"]',
type=str)
parser.add_argument(
'-hds','--holdout_dataset',
help='which dataset to be held out as dev and test set.',
required=False,
default='["multiwoz"]',
type=str)
parser.add_argument(
'-task','--task',
help='task in ["nlu", "dst", "dm", "nlg", "e2e"] to decide which dataloader to use',
default="usdl",
required=False)
parser.add_argument(
'--usr_token',
help='use to identify user utterance',
required=False,
default="[USR]",
type=str)
parser.add_argument(
'--sys_token',
help='use to identify system response',
required=False,
default="[SYS]",
type=str)
parser.add_argument(
"--add_rs_loss",
action="store_true",
help="whether to add RCL loss during training")
parser.add_argument(
"--nb_addition_negresponse_per_sample",
default=0,
type=int,
help="number of negative responses per sample added to the in-batch negative samples")
parser.add_argument(
"--negative_sampling_by_kmeans",
action="store_true",
help="whether use kmeans to select negative samples or select randomly",)
parser.add_argument(
"--nb_kmeans",
default=500,
type=int,
help="number of kmeans clusters")
parser.add_argument(
"--patience",
type=int,
default=15,
help="waiting to earlystop")
## data reading related setting (can be ignored here)
parser.add_argument(
'--max_line', help='maximum line for reading data (for quick testing)', required=False, default=None, type=int)
parser.add_argument(
'--example_type', help='type in ["turn", "dial"] for data reading', required=False, default="turn")
parser.add_argument(
"--train_data_ratio", default=1.0, type=float, help="")
parser.add_argument(
"--ratio_by_random", action="store_true", help="read data by random with a defined ratio")
parser.add_argument(
"--domain_act", action="store_true", help="use domain_act for mwoz")
parser.add_argument(
'-task_name', '--task_name', help='', required=False, default="")
parser.add_argument(
"--only_last_turn", action="store_true", help="")
parser.add_argument(
"--oracle_domain", action="store_true", help="")
parser.add_argument(
"--ontology_version", default="", type=str, help="['', '1.0']")
parser.add_argument(
"--dstlm", action="store_true", help="")
parser.add_argument(
"--max_seq_length", default=512, type=int, help="")
parser.add_argument(
"--nb_shots", default=-1, type=int, help="")
parser.add_argument(
"--domain", default="all", type=str, help="")
args = parser.parse_args()
args_dict = vars(args)
if args.model_type in ["bert", "roberta", "distilbert", "camembert"] and not args.mlm:
raise ValueError(
"BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling)."
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
#config.output_hidden_states = True
config.gradient_checkpointing=True
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.block_size <= 0:
args.block_size = (
tokenizer.max_len_single_sentence
) # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model.to(args.device)
# Add new tokens to the vocabulary and embeddings of our model
tokenizer.add_tokens([args.sys_token, args.usr_token])
model.resize_token_embeddings(len(tokenizer))
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
print('Start training...')
if args.do_train:
# Barrier to make sure only the first process in distributed training process the dataset,
# and the others will use the cache
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
## Read datasets and create global set of candidate responses
datasets = {}
cand_uttr_sys = set()
for ds_name in ast.literal_eval(args.dataset):
data_trn, data_dev, data_tst, data_meta = globals()["prepare_data_{}".format(ds_name)](args_dict)
# held-out mwoz for now
if ds_name in ast.literal_eval(args.holdout_dataset):
datasets[ds_name] = {"train": data_trn, "dev":data_dev, "test": data_tst, "meta":data_meta}
else:
datasets[ds_name] = {"train": data_trn + data_dev + data_tst, "dev":[], "test": [], "meta":data_meta}
for d in datasets[ds_name]["train"]:
cand_uttr_sys.add(d["turn_sys"])
cand_uttr_sys.update(set([sent for si, sent in enumerate(d["dialog_history"]) if si%2==0]))
unified_meta = get_unified_meta(datasets)
## process candidate responses
if args.nb_addition_negresponse_per_sample > 0:
cand_uttr_sys = list(cand_uttr_sys)
cand_uttr_sys = [s.lower() for s in cand_uttr_sys if len(s.split(" ")) <= 100] # remove too long responses
cand_uttr_sys_tokens = []
for cand in tqdm(cand_uttr_sys):
cand_ids = tokenizer.tokenize("[CLS] [SYS]") + tokenizer.tokenize(cand)
cand_ids = torch.tensor(tokenizer.convert_tokens_to_ids(cand_ids))
cand_uttr_sys_tokens.append(cand_ids)
cand_uttr_sys_dict = {a:b for a, b in zip(cand_uttr_sys, cand_uttr_sys_tokens)}
else:
cand_uttr_sys_dict = {}
## batch size
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
args_dict["batch_size"] = args.train_batch_size
args_dict["eval_batch_size"] = args.eval_batch_size
print(os.environ["CUDA_VISIBLE_DEVICES"])
print(args.train_batch_size, args.eval_batch_size, args.n_gpu, args.device)
## Create Dataloader
trn_loader = get_loader(args_dict, "train", tokenizer, datasets, unified_meta, "train")
dev_loader = get_loader(args_dict, "dev" , tokenizer, datasets, unified_meta, "dev")
## additional information for negative sampling
others = {}
if args.negative_sampling_by_kmeans:
others["datasets"] = datasets
others["unified_meta"] = unified_meta
if args.local_rank == 0:
torch.distributed.barrier()
global_step, tr_loss = train(args, trn_loader, dev_loader, model, tokenizer, cand_uttr_sys_dict, others)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, dev_loader, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
print(results)
if __name__ == "__main__":
main()
| 44,684 | 42.595122 | 141 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/main_cl-continue.py
|
from tqdm import tqdm
import torch.nn as nn
import ast
import glob
import numpy as np
import copy
# utils
from utils.config import *
from utils.utils_general import *
from utils.utils_multiwoz_cl import *
from utils.utils_oos_intent import *
from utils.utils_universal_act import *
# models
from models.BERT_DST_Picklist import *
from models.dual_encoder_ranking import *
# Huggingface models
from transformers import *
import logging
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
## model selection
MODELS = {"bert": (BertModel, BertTokenizer, BertConfig),
"todbert": (BertModel, BertTokenizer, BertConfig),
"gpt2": (GPT2Model, GPT2Tokenizer, GPT2Config),
"todgpt2": (GPT2Model, GPT2Tokenizer, GPT2Config),
"dialogpt": (AutoModelWithLMHead, AutoTokenizer, GPT2Config),
"albert": (AlbertModel, AlbertTokenizer, AlbertConfig),
"roberta": (RobertaModel, RobertaTokenizer, RobertaConfig),
"distilbert": (DistilBertModel, DistilBertTokenizer, DistilBertConfig),
"electra": (ElectraModel, ElectraTokenizer, ElectraConfig),
"xlmroberta": (XLMRobertaModel, XLMRobertaTokenizer, XLMRobertaConfig)}
## Fix torch random seed
if args["fix_rand_seed"]:
torch.manual_seed(args["rand_seed"])
#logging.info("Running Tgt Language: {}".format(args["tgt_lang"]))
## Reading data and create data loaders
datasets = {}
for ds_name in ast.literal_eval(args["dataset"]):
data_trn, data_dev, data_tst, data_meta = globals()["prepare_data_{}".format(ds_name)](args)
datasets[ds_name] = {"train": data_trn, "dev":data_dev, "test": data_tst, "meta":data_meta}
unified_meta = get_unified_meta(datasets)
if "resp_cand_trn" not in unified_meta.keys(): unified_meta["resp_cand_trn"] = {}
args["unified_meta"] = unified_meta
## Create vocab and model class
args["model_type"] = args["model_type"].lower()
model_class, tokenizer_class, config_class = MODELS[args["model_type"]]
tokenizer = tokenizer_class.from_pretrained(args["model_name_or_path"], cache_dir=args["cache_dir"])
args["model_class"] = model_class
args["tokenizer"] = tokenizer
if args["model_name_or_path"]:
config = config_class.from_pretrained(args["model_name_or_path"], cache_dir=args["cache_dir"])
else:
config = config_class()
args["config"] = config
args["num_labels"] = unified_meta["num_labels"]
if args["continue_ft"]:
datasets[ds_name] = {"train": data_dev, "dev":data_dev, "test": data_tst, "meta":data_meta} #Our training becomes dev set, ignore dev, and eval on test set
else:
datasets[ds_name] = {"train": data_trn, "dev":data_dev, "test": data_tst, "meta":data_meta}
print(args["continue_model_path"])
## Training and Testing Loop
if args["do_train"]:
result_runs = []
output_dir_origin = str(args["output_dir"])
## Setup logger
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=os.path.join(args["output_dir"], "train.log"),
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("Num_labels: {}".format(args["num_labels"]))
## training loop
for run in range(args["nb_runs"]):
## Setup random seed and output dir
rand_seed = SEEDS[run]
if args["fix_rand_seed"]:
torch.manual_seed(rand_seed)
args["rand_seed"] = rand_seed
args["output_dir"] = os.path.join(output_dir_origin, "run{}".format(run))
os.makedirs(args["output_dir"], exist_ok=False)
logging.info("Running Random Seed: {}".format(rand_seed))
## Loading model
model = globals()[args['my_model']](args)
print(args["continue_model_path"])
if args['continue_ft']:
print("MODEL {} LOADED".format(args["continue_model_path"]))
if torch.cuda.is_available():
model.load_state_dict(torch.load(args["continue_model_path"]))
else:
model.load_state_dict(torch.load(args["continue_model_path"], lambda storage, loc: storage))
if torch.cuda.is_available(): model = model.cuda()
logging.info("Done loading model...")
## Create Dataloader
trn_loader = get_loader(args, "train", tokenizer, datasets, unified_meta) # here will be dev set
dev_loader = get_loader(args, "dev" , tokenizer, datasets, unified_meta, shuffle=args["task_name"]=="rs")
tst_loader = get_loader(args, "test" , tokenizer, datasets, unified_meta, shuffle=args["task_name"]=="rs")
logging.info("Train: {}, Dev: {}, Test: {}".format(len(trn_loader), len(dev_loader), len(tst_loader)))
logging.info("Epochs: {}".format(args["epoch"]))
logging.info("Continue model path: {}".format(args["continue_model_path"]))
## Create TF Writer
tb_writer = SummaryWriter(comment=args["output_dir"].replace("/", "-"))
# Start training process with early stopping
loss_best, acc_best, cnt, train_step = 1e10, -1, 0, 0
try:
for epoch in range(args["epoch"]):
logging.info("Epoch:{}".format(epoch+1))
train_loss = 0
pbar = tqdm(trn_loader)
for i, d in enumerate(pbar):
model.train()
outputs = model(d)
train_loss += outputs["loss"]
train_step += 1
pbar.set_description("Training Loss: {:.4f}".format(train_loss/(i+1)))
## Dev Evaluation
if (train_step % args["eval_by_step"] == 0 and args["eval_by_step"] != -1) or \
(i == len(pbar)-1 and args["eval_by_step"] == -1):
model.eval()
dev_loss = 0
preds, labels = [], []
ppbar = tqdm(dev_loader)
for d in ppbar:
with torch.no_grad():
outputs = model(d)
#print(outputs)
dev_loss += outputs["loss"]
preds += [item for item in outputs["pred"]]
labels += [item for item in outputs["label"]]
dev_loss = dev_loss / len(dev_loader)
results = model.evaluation(preds, labels)
dev_acc = results[args["earlystop"]] if args["earlystop"] != "loss" else dev_loss #joint_accuracy
## write to tensorboard
tb_writer.add_scalar("train_loss", train_loss/(i+1), train_step)
tb_writer.add_scalar("eval_loss", dev_loss, train_step)
tb_writer.add_scalar("eval_{}".format(args["earlystop"]), dev_acc, train_step)
if args["continue_ft"]:
loss_best = dev_loss
acc_best = dev_acc
cnt = 0 # reset
if args["not_save_model"]:
model_clone = globals()[args['my_model']](args)
model_clone.load_state_dict(copy.deepcopy(model.state_dict()))
else:
output_model_file = os.path.join(args["output_dir"], "pytorch_model.bin")
if args["n_gpu"] == 1:
torch.save(model.state_dict(), output_model_file)
else:
torch.save(model.module.state_dict(), output_model_file)
logging.info("[Info] Model saved at epoch {} step {}".format(epoch, train_step))
else:
if (dev_loss < loss_best and args["earlystop"] == "loss") or \
(dev_acc > acc_best and args["earlystop"] != "loss"):
loss_best = dev_loss
acc_best = dev_acc
cnt = 0 # reset
if args["not_save_model"]:
model_clone = globals()[args['my_model']](args)
model_clone.load_state_dict(copy.deepcopy(model.state_dict()))
else:
output_model_file = os.path.join(args["output_dir"], "pytorch_model.bin")
if args["n_gpu"] == 1:
torch.save(model.state_dict(), output_model_file)
else:
torch.save(model.module.state_dict(), output_model_file)
logging.info("[Info] Model saved at epoch {} step {}".format(epoch, train_step))
else:
cnt += 1
logging.info("[Info] Early stop count: {}/{}...".format(cnt, args["patience"]))
if cnt > args["patience"]:
logging.info("Ran out of patient, early stop...")
break
logging.info("Trn loss {:.4f}, Dev loss {:.4f}, Dev {} {:.4f}".format(train_loss/(i+1),
dev_loss,
args["earlystop"],
dev_acc))
if cnt > args["patience"]:
tb_writer.close()
break
if args["continue_ft"]:
if args["not_save_model"]:
model_clone = globals()[args['my_model']](args)
model_clone.load_state_dict(copy.deepcopy(model.state_dict()))
else:
output_model_file = os.path.join(args["output_dir"], "pytorch_model.bin")
if args["n_gpu"] == 1:
torch.save(model.state_dict(), output_model_file)
else:
torch.save(model.module.state_dict(), output_model_file)
logging.info("[Info] Model saved at epoch {} step {}".format(epoch, train_step))
except KeyboardInterrupt:
logging.info("[Warning] Earlystop by KeyboardInterrupt")
## Load the best model
model = globals()[args['my_model']](args)
if args["not_save_model"]:
model.load_state_dict(copy.deepcopy(model_clone.state_dict()))
else:
# Start evaluating on the test set
if torch.cuda.is_available():
model.load_state_dict(torch.load(output_model_file))
else:
model.load_state_dict(torch.load(output_model_file, lambda storage, loc: storage))
if torch.cuda.is_available():
model = model.cuda()
model.eval()
## Run test set evaluation
pbar = tqdm(tst_loader)
for nb_eval in range(args["nb_evals"]):
test_loss = 0
preds, labels = [], []
for d in pbar:
with torch.no_grad():
outputs = model(d)
test_loss += outputs["loss"]
preds += [item for item in outputs["pred"]]
labels += [item for item in outputs["label"]]
test_loss = test_loss / len(tst_loader)
results = model.evaluation(preds, labels)
result_runs.append(results)
logging.info("[{}] Test Results: ".format(nb_eval) + str(results))
## Average results over runs
if args["nb_runs"] > 1:
f_out = open(os.path.join(output_dir_origin, "eval_results_multi-runs.txt"), "w")
f_out.write("Average over {} runs and {} evals \n".format(args["nb_runs"], args["nb_evals"]))
for key in results.keys():
mean = np.mean([r[key] for r in result_runs])
std = np.std([r[key] for r in result_runs])
f_out.write("{}: mean {} std {} \n".format(key, mean, std))
f_out.close()
else:
## Load Model
print("[Info] Loading model from {}".format(args['my_model']))
model = globals()[args['my_model']](args)
if args["load_path"]:
print("MODEL {} LOADED".format(args["load_path"]))
if torch.cuda.is_available():
model.load_state_dict(torch.load(args["load_path"]))
else:
model.load_state_dict(torch.load(args["load_path"], lambda storage, loc: storage))
else:
print("[WARNING] No trained model is loaded...")
if torch.cuda.is_available():
model = model.cuda()
print("[Info] Start Evaluation on dev and test set...")
dev_loader = get_loader(args, "dev" , tokenizer, datasets, unified_meta)
tst_loader = get_loader(args, "test" , tokenizer, datasets, unified_meta, shuffle=args["task_name"]=="rs")
model.eval()
for d_eval in ["tst"]: #["dev", "tst"]:
f_w = open(os.path.join(args["output_dir"], "{}_results.txt".format(d_eval)), "w")
## Start evaluating on the test set
test_loss = 0
preds, labels = [], []
pbar = tqdm(locals()["{}_loader".format(d_eval)])
for d in pbar:
with torch.no_grad():
outputs = model(d)
test_loss += outputs["loss"]
preds += [item for item in outputs["pred"]]
labels += [item for item in outputs["label"]]
test_loss = test_loss / len(tst_loader)
results = model.evaluation(preds, labels)
print("{} Results: {}".format(d_eval, str(results)))
f_w.write(str(results))
f_w.close()
| 14,594 | 46.386364 | 159 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/models/dual_encoder_ranking.py
|
import os.path
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.nn import CrossEntropyLoss
from torch.nn import CosineEmbeddingLoss
from sklearn.metrics import f1_score #, average_precision_score
import numpy as np
from transformers import *
import logging
class dual_encoder_ranking(nn.Module):
def __init__(self, args): #, num_labels, device):
super(dual_encoder_ranking, self).__init__()
self.args = args
self.xeloss = nn.CrossEntropyLoss()
self.n_gpu = args["n_gpu"]
### Utterance Encoder
self.utterance_encoder = args["model_class"].from_pretrained(self.args["model_name_or_path"], cache_dir = args["cache_dir"])
if self.args["fix_encoder"]:
for p in self.utterance_encoder.parameters():
p.requires_grad = False
## Prepare Optimizer
def get_optimizer_grouped_parameters(model):
param_optimizer = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01,
'lr': args["learning_rate"]},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,
'lr': args["learning_rate"]},
]
return optimizer_grouped_parameters
if self.n_gpu == 1:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(self)
else:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(self.module)
self.optimizer = AdamW(optimizer_grouped_parameters,
lr=args["learning_rate"],)
#warmup=args["warmup_proportion"],
#t_total=t_total)
def optimize(self):
self.loss_grad.backward()
clip_norm = torch.nn.utils.clip_grad_norm_(self.parameters(), self.args["grad_clip"])
self.optimizer.step()
def forward(self, data):
#input_ids, input_len, labels=None, n_gpu=1, target_slot=None):
self.optimizer.zero_grad()
batch_size = data["context"].size(0)
#max_seq_len = 256
interval = 25
start_list = list(np.arange(0, batch_size, interval))
end_list = start_list[1:] + [None]
context_outputs, response_outputs = [], []
#logging.info("start: {}".format(start_list))
#logging.info("end: {}".format(end_list))
for start, end in zip(start_list, end_list):
#logging.info("{}:{}".format(start, end))
inputs_con = {"input_ids": data["context"][start:end],
"attention_mask": (data["context"][start:end] > 0).long()}
inputs_res = {"input_ids": data["response"][start:end],
"attention_mask": (data["response"][start:end] > 0).long()}
#print(inputs_con, inputs_res)
if "bert" in self.args["model_type"]:
context_output = self.utterance_encoder(**inputs_con)[1] #hidden_state, pooler_output
response_output = self.utterance_encoder(**inputs_res)[1]#hidden_state, pooler_output
elif self.args["model_type"] == "gpt2":
context_output = self.utterance_encoder(**inputs_con)[0].mean(1)
response_output = self.utterance_encoder(**inputs_res)[0].mean(1)
elif self.args["model_type"] == "dialogpt":
transformer_outputs = self.utterance_encoder.transformer(**inputs_con)
context_output = transformer_outputs[0].mean(1)
transformer_outputs = self.utterance_encoder.transformer(**inputs_res)
response_output = transformer_outputs[0].mean(1)
# print(self.utterance_encoder(**inputs_con))
# print(self.utterance_encoder(**inputs_res))
context_outputs.append(context_output.cpu())
response_outputs.append(response_output.cpu())
# evaluation for k-to-100
if (not self.training) and (batch_size < self.args["eval_batch_size"]):
response_outputs.append(self.final_response_output[:self.args["eval_batch_size"]-batch_size, :])
final_context_output = torch.cat(context_outputs, 0)
final_response_output = torch.cat(response_outputs, 0)
if torch.cuda.is_available():
final_context_output = final_context_output.cuda()
final_response_output = final_response_output.cuda()
if (not self.training):
self.final_response_output = final_response_output.cpu()
# mat
logits = torch.matmul(final_context_output, final_response_output.transpose(1, 0))
# loss
labels = torch.tensor(np.arange(batch_size))
if torch.cuda.is_available(): labels = labels.cuda()
loss = self.xeloss(logits, labels)
if self.training:
self.loss_grad = loss
self.optimize()
predictions = np.argsort(logits.detach().cpu().numpy(), axis=1) #torch.argmax(logits, -1)
outputs = {"loss":loss.item(),
"pred":predictions,
"label":np.arange(batch_size)}
return outputs
def evaluation(self, preds, labels):
assert len(preds) == len(labels)
preds = np.array(preds)
labels = np.array(labels)
def _recall_topk(preds_top10, labels, k):
preds = preds_top10[:, -k:]
acc = 0
for li, label in enumerate(labels):
if label in preds[li]: acc += 1
acc = acc / len(labels)
return acc
results = {"top-1": _recall_topk(preds, labels, 1),
"top-3": _recall_topk(preds, labels, 3),
"top-5": _recall_topk(preds, labels, 5),
"top-10": _recall_topk(preds, labels, 10)}
print(results)
return results
| 6,321 | 40.320261 | 132 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/models/BERT_DST_Picklist.py
|
import os.path
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn import CosineEmbeddingLoss
import numpy as np
from transformers import *
def _gelu(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initialy created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BeliefTracker(nn.Module):
def __init__(self, args):
super(BeliefTracker, self).__init__()
self.args = args
self.n_gpu = args["n_gpu"]
self.hidden_dim = args["hdd_size"]
self.rnn_num_layers = args["num_rnn_layers"]
self.zero_init_rnn = args["zero_init_rnn"]
self.num_direct = 2 if self.args["bidirect"] else 1
self.num_labels = [len(v) for k, v in args["unified_meta"]["slots"].items()]
self.num_slots = len(self.num_labels)
self.tokenizer = args["tokenizer"]
self.slots = [k for k, v in self.args["unified_meta"]["slots"].items()]
self.slot_value2id_dict = self.args["unified_meta"]["slots"]
self.slot_id2value_dict = {}
for k, v in self.slot_value2id_dict.items():
self.slot_id2value_dict[k] = {vv: kk for kk, vv in v.items()}
#print("self.num_slots", self.num_slots)
### Utterance Encoder
self.utterance_encoder = args["model_class"].from_pretrained(self.args["model_name_or_path"], cache_dir = self.args["cache_dir"])
self.bert_output_dim = args["config"].hidden_size
#self.hidden_dropout_prob = self.utterance_encoder.config.hidden_dropout_prob
if self.args["fix_encoder"]:
print("[Info] Utterance Encoder does not requires grad...")
for p in self.utterance_encoder.parameters():
p.requires_grad = False
### slot, slot-value Encoder (not trainable)
self.sv_encoder = args["model_class"].from_pretrained(self.args["model_name_or_path"], cache_dir = self.args["cache_dir"])
print("[Info] SV Encoder does not requires grad...")
for p in self.sv_encoder.parameters():
p.requires_grad = False
#self.slot_lookup = nn.Embedding(self.num_slots, self.bert_output_dim)
self.value_lookup = nn.ModuleList([nn.Embedding(num_label, self.bert_output_dim) for num_label in self.num_labels])
### RNN Belief Tracker
#self.nbt = None
#self.linear = nn.Linear(self.hidden_dim, self.bert_output_dim)
#self.layer_norm = nn.LayerNorm(self.bert_output_dim)
### Classifier
self.nll = CrossEntropyLoss(ignore_index=-1)
### Etc.
#self.dropout = nn.Dropout(self.hidden_dropout_prob)
### My Add
self.project_W_1 = nn.ModuleList([nn.Linear(self.bert_output_dim, self.bert_output_dim) \
for _ in range(self.num_slots)])
self.project_W_2 = nn.ModuleList([nn.Linear(2*self.bert_output_dim, self.bert_output_dim) \
for _ in range(self.num_slots)])
self.project_W_3 = nn.ModuleList([nn.Linear(self.bert_output_dim, 1) \
for _ in range(self.num_slots)])
if self.args["gate_supervision_for_dst"]:
self.gate_classifier = nn.Linear(self.bert_output_dim, 2)
self.start_token = self.tokenizer.cls_token if "bert" in self.args["model_type"] else self.tokenizer.bos_token
self.sep_token = self.tokenizer.sep_token if "bert" in self.args["model_type"] else self.tokenizer.eos_token
## Prepare Optimizer
def get_optimizer_grouped_parameters(model):
param_optimizer = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01,
'lr': args["learning_rate"]},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,
'lr': args["learning_rate"]},
]
return optimizer_grouped_parameters
if self.n_gpu == 1:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(self)
else:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(self.module)
self.optimizer = AdamW(optimizer_grouped_parameters,
lr=args["learning_rate"],)
#warmup=args["warmup_proportion"])
#t_total=t_total)
self.initialize_slot_value_lookup()
def optimize(self):
self.loss_grad.backward()
clip_norm = torch.nn.utils.clip_grad_norm_(self.parameters(), self.args["grad_clip"])
self.optimizer.step()
def initialize_slot_value_lookup(self, max_seq_length=32):
self.sv_encoder.eval()
label_ids = []
for dslot, value_dict in self.args["unified_meta"]["slots"].items():
label_id = []
value_dict_rev = {v:k for k, v in value_dict.items()}
for i in range(len(value_dict)):
label = value_dict_rev[i]
label = " ".join([i for i in label.split(" ") if i != ""])
label_tokens = [self.start_token] + self.tokenizer.tokenize(label) + [self.sep_token]
label_token_ids = self.tokenizer.convert_tokens_to_ids(label_tokens)
label_len = len(label_token_ids)
label_padding = [0] * (max_seq_length - len(label_token_ids))
label_token_ids += label_padding
assert len(label_token_ids) == max_seq_length
label_id.append(label_token_ids)
label_id = torch.tensor(label_id).long()
label_ids.append(label_id)
for s, label_id in enumerate(label_ids):
inputs = {"input_ids":label_id, "attention_mask":(label_id > 0).long()}
if self.args["sum_token_emb_for_value"]:
hid_label = self.utterance_encoder.embeddings(input_ids=label_id).sum(1)
else:
if "bert" in self.args["model_type"]:
hid_label = self.sv_encoder(**inputs)[0]
hid_label = hid_label[:, 0, :]
elif self.args["model_type"] == "gpt2":
hid_label = self.sv_encoder(**inputs)[0]
hid_label = hid_label.mean(1)
elif self.args["model_type"] == "dialogpt":
transformer_outputs = self.sv_encoder.transformer(**inputs)[0]
hid_label = transformer_outputs.mean(1)
hid_label = hid_label.detach()
self.value_lookup[s] = nn.Embedding.from_pretrained(hid_label, freeze=True)
self.value_lookup[s].padding_idx = -1
print("Complete initialization of slot and value lookup")
def forward(self, data):#input_ids, input_len, labels, gate_label, n_gpu=1, target_slot=None):
batch_size = data["context"].size(0)
labels = data["belief_ontology"]
# Utterance encoding
inputs = {"input_ids": data["context"], "attention_mask":(data["context"] > 0).long()}
if "bert" in self.args["model_type"]:
hidden = self.utterance_encoder(**inputs)[0]
hidden_rep = hidden[:, 0, :]
elif self.args["model_type"] == "gpt2":
hidden = self.utterance_encoder(**inputs)[0]
hidden_rep = hidden.mean(1)
elif self.args["model_type"] == "dialogpt":
#outputs = self.utterance_encoder(**inputs)[2] # 0 is vocab logits, 1 is a tuple of attn head
transformer_outputs = self.utterance_encoder.transformer(
data["context"],
attention_mask=(data["context"] > 0).long()
)
hidden = transformer_outputs[0]
hidden_rep = hidden.mean(1)
# Label (slot-value) encoding
loss = 0
pred_slot = []
for slot_id in range(self.num_slots): ## note: target_slots are successive
# loss calculation
hid_label = self.value_lookup[slot_id].weight # v * d
num_slot_labels = hid_label.size(0)
_hidden = _gelu(self.project_W_1[slot_id](hidden_rep))
_hidden = torch.cat([hid_label.unsqueeze(0).repeat(batch_size, 1, 1), _hidden.unsqueeze(1).repeat(1, num_slot_labels, 1)], dim=2)
_hidden = _gelu(self.project_W_2[slot_id](_hidden))
_hidden = self.project_W_3[slot_id](_hidden)
_dist = _hidden.squeeze(2) # b * 1 * num_slot_labels
_, pred = torch.max(_dist, -1)
pred_slot.append(pred.unsqueeze(1))
#output.append(_dist)
if labels is not None:
_loss = self.nll(_dist, labels[:, slot_id])
#loss_slot.append(_loss.item())
loss += _loss
predictions = torch.cat(pred_slot, 1).detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
if self.training:
self.loss_grad = loss
self.optimize()
if self.args["error_analysis"]:
for bsz_i, (pred, label) in enumerate(zip(np.array(predictions), np.array(labels))):
assert len(pred) == len(label)
joint = 0
pred_arr, gold_arr = [], []
for i, p in enumerate(pred):
pred_str = self.slot_id2value_dict[self.slots[i]][p]
gold_str = self.slot_id2value_dict[self.slots[i]][label[i]]
pred_arr.append(self.slots[i]+"-"+pred_str)
gold_arr.append(self.slots[i]+"-"+gold_str)
if pred_str == gold_str or pred_str in gold_str.split("|"):
joint += 1
#if joint == len(pred):
print(data["context_plain"][bsz_i])
print("Gold:", [s for s in gold_arr if s.split("-")[2] != "none"])
print("Pred:", [s for s in pred_arr if s.split("-")[2] != "none"])
print()
outputs = {"loss":loss.item(), "pred":predictions, "label":labels}
return outputs
def evaluation(self, preds, labels):
preds = np.array(preds)
labels = np.array(labels)
slot_acc, joint_acc, slot_acc_total, joint_acc_total = 0, 0, 0, 0
for pred, label in zip(preds, labels):
joint = 0
assert len(pred) == len(label)
for i, p in enumerate(pred):
pred_str = self.slot_id2value_dict[self.slots[i]][p]
gold_str = self.slot_id2value_dict[self.slots[i]][label[i]]
if pred_str == gold_str or pred_str in gold_str.split("|"):
slot_acc += 1
joint += 1
slot_acc_total += 1
if joint == len(pred):
joint_acc += 1
joint_acc_total += 1
joint_acc = joint_acc / joint_acc_total
slot_acc = slot_acc / slot_acc_total
results = {"joint_acc":joint_acc, "slot_acc":slot_acc}
print("Results 1: ", results)
return results
| 11,918 | 42.5 | 141 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_smd.py
|
import json
import ast
import collections
import os
from .utils_function import get_input_example
def read_langs_turn(args, file_name, max_line = None, ds_name=""):
print(("Reading from {} for read_langs_turn".format(file_name)))
data = []
with open(file_name) as f:
dials = json.load(f)
cnt_lin = 1
for dial_dict in dials:
dialog_history = []
turn_usr = ""
turn_sys = ""
for ti, turn in enumerate(dial_dict["dialogue"]):
if turn["turn"] == "driver":
turn_usr = turn["data"]["utterance"].lower().strip()
data_detail = get_input_example("turn")
data_detail["ID"] = "{}-{}".format(ds_name, cnt_lin)
data_detail["turn_id"] = ti % 2
data_detail["turn_usr"] = turn_usr
data_detail["turn_sys"] = turn_sys
data_detail["dialog_history"] = list(dialog_history)
if (not args["only_last_turn"]):
data.append(data_detail)
dialog_history.append(turn_sys)
dialog_history.append(turn_usr)
elif turn["turn"] == "assistant":
turn_sys = turn["data"]["utterance"].lower().strip()
if args["only_last_turn"]:
data.append(data_detail)
cnt_lin += 1
if(max_line and cnt_lin >= max_line):
break
return data
def read_langs_dial(file_name, ontology, dialog_act, max_line = None, domain_act_flag=False):
print(("Reading from {} for read_langs_dial".format(file_name)))
raise NotImplementedError
def prepare_data_smd(args):
ds_name = "SMD"
example_type = args["example_type"]
max_line = args["max_line"]
file_trn = os.path.join(args["data_path"], "kvret/kvret_train_public.json")
file_dev = os.path.join(args["data_path"], "kvret/kvret_dev_public.json")
file_tst = os.path.join(args["data_path"], "kvret/kvret_test_public.json")
_example_type = "dial" if "dial" in example_type else example_type
pair_trn = globals()["read_langs_{}".format(_example_type)](args, file_trn, max_line, ds_name)
pair_dev = globals()["read_langs_{}".format(_example_type)](args, file_dev, max_line, ds_name)
pair_tst = globals()["read_langs_{}".format(_example_type)](args, file_tst, max_line, ds_name)
print("Read {} pairs train from {}".format(len(pair_trn), ds_name))
print("Read {} pairs valid from {}".format(len(pair_dev), ds_name))
print("Read {} pairs test from {}".format(len(pair_tst), ds_name))
meta_data = {"num_labels":0}
return pair_trn, pair_dev, pair_tst, meta_data
| 2,883 | 34.604938 | 98 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/dataloader_nlg.py
|
import torch
import torch.utils.data as data
import random
from .utils_function import to_cuda, merge
# from .config import *
class Dataset_nlg(torch.utils.data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data_info, tokenizer, args, unified_meta, mode, max_length=512, max_sys_resp_len=50):
"""Reads source and target sequences from txt files."""
self.data = data_info
self.tokenizer = tokenizer
self.max_length = max_length
self.num_total_seqs = len(data_info["ID"])
self.usr_token = args["usr_token"]
self.sys_token = args["sys_token"]
self.unified_meta = unified_meta
self.args = args
self.mode = mode
if "bert" in self.args["model_type"] or "electra" in self.args["model_type"]:
self.start_token = self.tokenizer.cls_token
self.sep_token = self.tokenizer.sep_token
else:
self.start_token = self.tokenizer.bos_token
self.sep_token = self.tokenizer.eos_token
self.resp_cand_trn = list(self.unified_meta["resp_cand_trn"])
random.shuffle(self.resp_cand_trn)
self.max_sys_resp_len = max_sys_resp_len
self.others = unified_meta["others"]
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
if self.args["example_type"] == "turn":
context_plain = self.get_concat_context(self.data["dialog_history"][index])
context_plain_delex = self.get_concat_context(self.data["dialog_history_delex"][index])
context = self.preprocess(context_plain)
context_delex = self.preprocess(context_plain_delex)
response_plain = "{} ".format(self.sys_token) + self.data["turn_sys"][index]
response = self.preprocess(response_plain)[:self.max_sys_resp_len]
response_plain_delex = "{} ".format(self.sys_token) + self.data["turn_sys_delex"][index]
response_delex = self.preprocess(response_plain_delex)
utterance_plain = "{} ".format(self.usr_token) + self.data["turn_usr"][index]
utterance = self.preprocess(utterance_plain)
utterance_plain_delex = "{} ".format(self.usr_token) + self.data["turn_usr_delex"][index]
utterance_delex = self.preprocess(utterance_plain_delex)
else:
raise NotImplementedError
item_info = {
"ID":self.data["ID"][index],
"turn_id":self.data["turn_id"][index],
"context":context,
"context_plain":context_plain,
"context_delex":context_delex,
"context_plain_delex":context_plain_delex,
"response":response,
"response_plain":response_plain,
"response_delex":response_delex,
"response_plain_delex":response_plain_delex,
"utterance":utterance,
"utterance_plain":utterance_plain,
"utterance_delex":utterance_delex,
"utterance_plain_delex":utterance_plain_delex}
'''
Add additional negative samples per training samples to make the selection harder,
we found that by adding this we can slightly improve the response selection performance
'''
if self.args["nb_neg_sample_rs"] != 0 and self.mode == "train":
if self.args["sample_negative_by_kmeans"]:
try:
cur_cluster = self.others["ToD_BERT_SYS_UTTR_KMEANS"][self.data["turn_sys"][index]]
candidates = self.others["KMEANS_to_SENTS"][cur_cluster]
nb_selected = min(self.args["nb_neg_sample_rs"], len(candidates))
try:
start_pos = random.randint(0, len(candidates)-nb_selected-1)
except:
start_pos = 0
sampled_neg_resps = candidates[start_pos:start_pos+nb_selected]
except:
start_pos = random.randint(0, len(self.resp_cand_trn)-self.args["nb_neg_sample_rs"]-1)
sampled_neg_resps = self.resp_cand_trn[start_pos:start_pos+self.args["nb_neg_sample_rs"]]
else:
start_pos = random.randint(0, len(self.resp_cand_trn)-self.args["nb_neg_sample_rs"]-1)
sampled_neg_resps = self.resp_cand_trn[start_pos:start_pos+self.args["nb_neg_sample_rs"]]
neg_resp_arr, neg_resp_idx_arr = [], []
for neg_resp in sampled_neg_resps:
neg_resp_plain = "{} ".format(self.sys_token) + neg_resp
neg_resp_idx = self.preprocess(neg_resp_plain)[:self.max_sys_resp_len]
neg_resp_idx_arr.append(neg_resp_idx)
neg_resp_arr.append(neg_resp_plain)
item_info["neg_resp_idx_arr"] = neg_resp_idx_arr
item_info["neg_resp_arr"] = neg_resp_arr
return item_info
def __len__(self):
return self.num_total_seqs
def preprocess(self, sequence):
"""Converts words to ids."""
tokens = self.tokenizer.tokenize(self.start_token) + self.tokenizer.tokenize(sequence)[-self.max_length+1:]
story = torch.Tensor(self.tokenizer.convert_tokens_to_ids(tokens))
return story
def get_concat_context(self, dialog_history):
dialog_history_str = ""
for ui, uttr in enumerate(dialog_history):
if ui%2 == 0:
dialog_history_str += "{} {} ".format(self.sys_token, uttr)
else:
dialog_history_str += "{} {} ".format(self.usr_token, uttr)
dialog_history_str = dialog_history_str.strip()
return dialog_history_str
def collate_fn_nlg_turn(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# augment negative samples
if "neg_resp_idx_arr" in item_info.keys():
neg_resp_idx_arr = []
for arr in item_info['neg_resp_idx_arr']:
neg_resp_idx_arr += arr
# remove neg samples that are the same as one of the gold responses
#print('item_info["response"]', item_info["response"])
#print('neg_resp_idx_arr', neg_resp_idx_arr)
for bi, arr in enumerate(item_info['neg_resp_arr']):
for ri, neg_resp in enumerate(arr):
if neg_resp not in item_info["response_plain"]:
item_info["response"] += [item_info['neg_resp_idx_arr'][bi][ri]]
# merge sequences
context, context_lengths = merge(item_info['context'])
context_delex, context_delex_lengths = merge(item_info['context_delex'])
response, response_lengths = merge(item_info["response"])
response_delex, response_delex_lengths = merge(item_info["response_delex"])
utterance, utterance_lengths = merge(item_info["utterance"])
utterance_delex, utterance_delex_lengths = merge(item_info["utterance_delex"])
#print("context", context.size())
#print("response", response.size())
item_info["context"] = to_cuda(context)
item_info["context_lengths"] = context_lengths
item_info["response"] = to_cuda(response)
item_info["response_lengths"] = response_lengths
item_info["utterance"] = to_cuda(utterance)
item_info["utterance_lengths"] = response_lengths
return item_info
| 7,624 | 43.590643 | 115 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/dataloader_nlu.py
|
import torch
import torch.utils.data as data
# from .config import *
from .utils_function import to_cuda, merge, merge_multi_response, merge_sent_and_word
class Dataset_nlu(torch.utils.data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data_info, tokenizer, args, unified_meta, mode, max_length=512):
"""Reads source and target sequences from txt files."""
self.data = data_info
self.tokenizer = tokenizer
self.num_total_seqs = len(data_info["ID"])
self.usr_token = args["usr_token"]
self.sys_token = args["sys_token"]
self.max_length = max_length
self.args = args
self.unified_meta = unified_meta
if "bert" in self.args["model_type"] or "electra" in self.args["model_type"]:
self.start_token = self.tokenizer.cls_token
self.sep_token = self.tokenizer.sep_token
else:
self.start_token = self.tokenizer.bos_token
self.sep_token = self.tokenizer.eos_token
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
if self.args["example_type"] == "turn":
context_plain = "{} {} {} {} {}".format(self.start_token,
self.sys_token,
self.data["turn_sys"][index],
self.usr_token,
self.data["turn_usr"][index])
context = self.preprocess(context_plain)
intent_plain = self.data["intent"][index]
turn_sys_plain = "{} {}".format(self.sys_token, self.data["turn_sys"][index])
turn_sys = self.preprocess(turn_sys_plain)
try:
intent_idx = self.unified_meta["intent"][intent_plain]
except:
intent_idx = -100
try:
domain_idx = self.unified_meta["turn_domain"][self.data["turn_domain"][index]]
except:
domain_idx = -100
try:
turn_slot_one_hot = [0] * len(self.unified_meta["turn_slot"])
for ts in self.data["turn_slot"][index]:
turn_slot_one_hot[self.unified_meta["turn_slot"][ts]] = 1
except:
turn_slot_one_hot = -100
elif self.args["example_type"] == "dial":
print("Not Implemented dial for nlu yet...")
item_info = {
"ID":self.data["ID"][index],
"turn_id":self.data["turn_id"][index],
"turn_domain":self.data["turn_domain"][index],
"context":context,
"context_plain":context_plain,
"intent":intent_idx,
"intent_plain":intent_plain,
"domain_plain":self.data["turn_domain"][index],
"turn_domain": domain_idx,
"turn_sys":turn_sys,
"turn_slot":turn_slot_one_hot,
"turn_sys_plain":turn_sys_plain
}
return item_info
def __len__(self):
return self.num_total_seqs
def preprocess(self, sequence):
"""Converts words to ids."""
tokens = self.tokenizer.tokenize(self.start_token) + self.tokenizer.tokenize(sequence)[-self.max_length+1:]
story = torch.Tensor(self.tokenizer.convert_tokens_to_ids(tokens))
return story
def collate_fn_nlu_turn(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge(item_info['context'])
turn_sys, _ = merge(item_info["turn_sys"])
intent = torch.tensor(item_info["intent"])
turn_domain = torch.tensor(item_info["turn_domain"])
turn_slot = torch.tensor(item_info["turn_slot"]).float()
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
item_info["intent"] = to_cuda(intent)
item_info["turn_domain"] = to_cuda(turn_domain)
item_info["turn_sys"] = to_cuda(turn_sys)
item_info["turn_slot"] = to_cuda(turn_slot)
return item_info
def collate_fn_nlu_dial(data):
# TODO
return
| 4,513 | 37.254237 | 115 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/dataloader_dm.py
|
import torch
import torch.utils.data as data
# from .config import *
from .utils_function import to_cuda, merge, merge_multi_response, merge_sent_and_word
class Dataset_dm(torch.utils.data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data_info, tokenizer, args, unified_meta, mode, max_length=512):
"""Reads source and target sequences from txt files."""
self.data = data_info
self.tokenizer = tokenizer
self.num_total_seqs = len(data_info["ID"])
self.usr_token = args["usr_token"]
self.sys_token = args["sys_token"]
self.max_length = max_length
self.args = args
self.unified_meta = unified_meta
if "bert" in self.args["model_type"] or "electra" in self.args["model_type"]:
self.start_token = self.tokenizer.cls_token
self.sep_token = self.tokenizer.sep_token
else:
self.start_token = self.tokenizer.bos_token
self.sep_token = self.tokenizer.eos_token
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
if self.args["example_type"] == "turn":
dialog_history_str = self.get_concat_context(self.data["dialog_history"][index])
context_plain = self.concat_dh_sys_usr(dialog_history_str, self.data["turn_sys"][index], self.data["turn_usr"][index])
context = self.preprocess(context_plain)
act_plain = self.data["sys_act"][index]
turn_sys_plain = "{} {}".format(self.sys_token, self.data["turn_sys"][index])
turn_sys = self.preprocess(turn_sys_plain)
act_one_hot = [0] * len(self.unified_meta["sysact"])
for act in act_plain:
act_one_hot[self.unified_meta["sysact"][act]] = 1
elif self.args["example_type"] == "dial":
#TODO
print("Not Implemented dial for nlu yet...")
item_info = {
"ID":self.data["ID"][index],
"turn_id":self.data["turn_id"][index],
"context":context,
"context_plain":context_plain,
"sysact":act_one_hot,
"sysact_plain":act_plain,
"turn_sys":turn_sys}
return item_info
def __len__(self):
return self.num_total_seqs
def preprocess(self, sequence):
"""Converts words to ids."""
tokens = self.tokenizer.tokenize(self.start_token) + self.tokenizer.tokenize(sequence)[-self.max_length+1:]
story = torch.Tensor(self.tokenizer.convert_tokens_to_ids(tokens))
return story
def concat_dh_sys_usr(self, dialog_history, sys, usr):
return dialog_history + " {} ".format(self.sys_token) + " {} ".format(self.sep_token) + sys + " {} ".format(self.usr_token) + usr
def get_concat_context(self, dialog_history):
dialog_history_str = ""
for ui, uttr in enumerate(dialog_history):
if ui%2 == 0:
dialog_history_str += "{} {} ".format(self.sys_token, uttr)
else:
dialog_history_str += "{} {} ".format(self.usr_token, uttr)
dialog_history_str = dialog_history_str.strip()
return dialog_history_str
def collate_fn_dm_turn(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge(item_info['context'])
turn_sys, _ = merge(item_info["turn_sys"])
sysact = torch.tensor(item_info["sysact"]).float()
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
item_info["sysact"] = to_cuda(sysact)
item_info["turn_sys"] = to_cuda(turn_sys)
return item_info
def collate_fn_nlu_dial(data):
# TODO
return
| 4,048 | 37.561905 | 137 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_multiwoz_cl.py
|
import json
import ast
import collections
import os
from .utils_function import get_input_example
from .multiwoz.fix_label import *
EXPERIMENT_DOMAINS = ["hotel", "train", "restaurant", "attraction", "taxi"] #, "hospital", "police"]
def read_langs_turn(args, file_name, ontology, dialog_act, max_line = None, domain_act_flag=False, update_ont_flag=False):
print(("Reading from {} for read_langs_turn".format(file_name)))
data = []
SLOTS = [k for k in ontology.keys()]
max_resp_len, max_value_len = 0, 0
domain_counter = {}
response_candidates = set()
add_slot_values = set()
with open(file_name) as f:
dials = json.load(f)
cnt_lin = 1
for dial_dict in dials:
dialog_history, dialog_history_delex = [], []
# Filtering and counting domains
for domain in dial_dict["domains"]:
if domain not in EXPERIMENT_DOMAINS:
continue
if domain not in domain_counter.keys():
domain_counter[domain] = 0
domain_counter[domain] += 1
# Reading data
for ti, turn in enumerate(dial_dict["dialogue"]):
belief_dict = fix_general_label_error(turn["belief_state"], False, SLOTS, args["ontology_version"])
belief_list = [str(k)+'-'+str(v) for k, v in belief_dict.items()]
turn_slot_dict = fix_general_label_error(turn["turn_label"], True, SLOTS, args["ontology_version"])
turn_slot_list = [str(k)+'-'+str(v) for k, v in turn_slot_dict.items()]
turn_slot = list(set([k.split("-")[1] for k, v in turn_slot_dict.items()]))
slot_values, gates = [], []
for slot in SLOTS:
if slot in belief_dict.keys():
# update ontology
if args["ontology_version"] != "" and "the {}".format(belief_dict[slot]) in ontology[slot].keys():
belief_dict[slot] = "the {}".format(belief_dict[slot])
if belief_dict[slot] not in ontology[slot].keys() and update_ont_flag:
if slot+"-"+belief_dict[slot] not in add_slot_values:
print("[Info] Adding Slot: {} with value: [{}]".format(slot, belief_dict[slot]))
add_slot_values.add(slot+"-"+belief_dict[slot])
ontology[slot][belief_dict[slot]] = len(ontology[slot])
slot_values.append(belief_dict[slot])
if belief_dict[slot] == "none":
gates.append(0)
else:
gates.append(1)
else:
slot_values.append("none")
gates.append(0)
# dialgoue act (exclude domain)
if turn["turn_idx"] == 0 and turn["system_transcript"] == "":
cur_sys_acts = set()
elif str(turn["turn_idx"]) not in dialog_act[dial_dict["dialogue_idx"].replace(".json", "")].keys():
cur_sys_acts = set()
elif dialog_act[dial_dict["dialogue_idx"].replace(".json", "")][str(turn["turn_idx"])] == "No Annotation":
cur_sys_acts = set()
else:
cur_sys_acts = dialog_act[dial_dict["dialogue_idx"].replace(".json", "")][str(turn["turn_idx"])]
if domain_act_flag:
cur_sys_acts = set([key.lower() for key in cur_sys_acts.keys()])
else:
cur_sys_acts = set([key.split("-")[1].lower() for key in cur_sys_acts.keys()])
data_detail = get_input_example("turn")
data_detail["slots"] = SLOTS
data_detail["ID"] = dial_dict["dialogue_idx"]
data_detail["turn_id"] = turn["turn_idx"]
data_detail["domains"] = dial_dict["domains"]
data_detail["turn_domain"] = turn["domain"]
data_detail["turn_usr"] = turn["transcript"].strip()
data_detail["turn_sys"] = turn["system_transcript"].strip()
data_detail["turn_usr_delex"] = turn["transcript_delex"].strip()
data_detail["turn_sys_delex"] = turn["system_transcript_delex"].strip()
data_detail["belief_state_vec"] = ast.literal_eval(turn["belief_state_vec"])
data_detail["db_pointer"] = ast.literal_eval(turn["db_pointer"])
data_detail["dialog_history"] = list(dialog_history)
data_detail["dialog_history_delex"] = list(dialog_history_delex)
data_detail["belief"] = belief_dict
data_detail["del_belief"] = turn_slot_dict
data_detail["slot_gate"] = gates
data_detail["slot_values"] = slot_values
data_detail["sys_act"] = cur_sys_acts
data_detail["turn_slot"] = turn_slot
if not args["only_last_turn"]:
data.append(data_detail)
dialog_history.append(turn["system_transcript"])
dialog_history.append(turn["transcript"])
dialog_history_delex.append(turn["system_transcript_delex"])
dialog_history_delex.append(turn["transcript_delex"])
response_candidates.add(str(data_detail["turn_sys"]))
if args["only_last_turn"]:
data.append(data_detail)
cnt_lin += 1
if(max_line and cnt_lin >= max_line):
break
#print("MultiWOZ domain counter: ", domain_counter)
return data, ontology, response_candidates
def read_langs_dial(args, file_name, ontology, dialog_act, max_line = None, domain_act_flag=False, update_ont_flag=False):
print(("Reading from {} for read_langs_dial".format(file_name)))
raise NotImplementedError
def get_slot_information(args, ontology):
ontology_domains = dict([(k, v) for k, v in ontology.items() if k.split("-")[0] in EXPERIMENT_DOMAINS])
ontology_new = collections.OrderedDict()
for k, v in ontology_domains.items():
name = k.replace(" ","").lower() if ("book" not in k) else k.lower()
if args["ontology_version"] != "":
v = clean_original_ontology(v)
ontology_new[name] = {"none":0, "do n't care":1}
for vv in v:
if vv not in ontology_new[name].keys():
ontology_new[name][vv] = len(ontology_new[name])
return ontology_new
def prepare_data_multiwoz(args):
example_type = args["example_type"]
max_line = args["max_line"]
version = "2.1"
print("[Info] Using Version", version)
if args["domain"] == 'all':
file_trn = os.path.join(args["data_path"], 'MultiWOZ-{}/train_dials.json'.format(version))
file_dev = os.path.join(args["data_path"], 'MultiWOZ-{}/dev_dials-{}.json'.format(version, args['tgt_lang']))
file_tst = os.path.join(args["data_path"], 'MultiWOZ-{}/test_dials-{}.json'.format(version, args['tgt_lang']))
elif args["domain"] == 'en':
file_trn = os.path.join(args["data_path"], 'MultiWOZ-{}/train_dials.json'.format(version))
file_dev = os.path.join(args["data_path"], 'MultiWOZ-{}/dev_dials.json'.format(version))
file_tst = os.path.join(args["data_path"], 'MultiWOZ-{}/test_dials.json'.format(version))
elif args["domain_option"] == 'single':
print(args["domain"], args["domain_option"])
file_trn = os.path.join(args["data_path"], 'MultiWOZ-{}/single/train/{}_train_dials.json'.format(version, args["domain"]))
file_dev = os.path.join(args["data_path"], 'MultiWOZ-{}/single/dev/{}_dev_dials.json'.format(version, args["domain"]))
file_tst = os.path.join(args["data_path"], 'MultiWOZ-{}/single/test/{}_test_dials.json'.format(version, args["domain"]))
else:
print(args["domain"], args["domain_option"])
file_trn = os.path.join(args["data_path"], 'MultiWOZ-{}/train/{}_train_dials.json'.format(version, args["domain"]))
file_dev = os.path.join(args["data_path"], 'MultiWOZ-{}/dev/{}_dev_dials.json'.format(version, args["domain"]))
file_tst = os.path.join(args["data_path"], 'MultiWOZ-{}/test/{}_test_dials.json'.format(version, args["domain"]))
print(file_trn, file_dev, file_tst)
path_to_ontology_mapping = os.path.join(args["data_path"],
"MultiWOZ-{}/ontology-mapping{}.json".format(version, args["ontology_version"]))
if os.path.exists(path_to_ontology_mapping):
print("[Info] Load from old complete ontology from version {}...".format(args["ontology_version"]))
ontology_mapping = json.load(open(path_to_ontology_mapping, 'r'))
update_ont_flag = False
else:
print("[Info] Creating new ontology for version {}...".format(args["ontology_version"]))
ontology = json.load(open(os.path.join(args["data_path"], "MultiWOZ-{}/ontology.json".format(version)), 'r'))
ontology_mapping = get_slot_information(args, ontology)
update_ont_flag = True
dialog_act = json.load(open(os.path.join(args["data_path"], "MultiWOZ-{}/dialogue_acts.json".format(version)), 'r'))
_example_type = "dial" if "dial" in example_type else example_type
print(file_trn, file_dev, file_tst)
pair_trn, ontology_mapping, resp_cand_trn = globals()["read_langs_{}".format(_example_type)](args,
file_trn,
ontology_mapping,
dialog_act,
max_line,
args["domain_act"],
update_ont_flag)
pair_dev, ontology_mapping, resp_cand_dev = globals()["read_langs_{}".format(_example_type)](args,
file_dev,
ontology_mapping,
dialog_act,
max_line,
args["domain_act"],
update_ont_flag)
pair_tst, ontology_mapping, resp_cand_tst = globals()["read_langs_{}".format(_example_type)](args,
file_tst,
ontology_mapping,
dialog_act,
max_line,
args["domain_act"],
update_ont_flag)
if not os.path.exists(path_to_ontology_mapping):
print("[Info] Dumping complete ontology...")
json.dump(ontology_mapping, open(path_to_ontology_mapping, "w"), indent=4)
print("Read %s pairs train from MultiWOZ" % len(pair_trn))
print("Read %s pairs valid from MultiWOZ" % len(pair_dev))
print("Read %s pairs test from MultiWOZ" % len(pair_tst))
# print('args["task_name"]', args["task_name"])
if args["task_name"] == "dst":
meta_data = {"slots":ontology_mapping, "num_labels": len(ontology_mapping)}
elif args["task_name"] == "turn_domain":
domain_set = set([d["turn_domain"] for d in pair_trn])
domain_dict = {d:i for i, d in enumerate(domain_set)}
meta_data = {"turn_domain":domain_dict, "num_labels": len(domain_dict)}
elif args["task_name"] == "turn_slot":
turn_slot_list = []
for d in pair_trn:
turn_slot_list += d["turn_slot"]
turn_slot_list = list(set(turn_slot_list))
turn_slot_mapping = {d:i for i, d in enumerate(turn_slot_list)}
meta_data = {"turn_slot":turn_slot_mapping, "num_labels": len(turn_slot_mapping)}
elif args["task_name"] == "sysact":
act_set = set()
for pair in [pair_tst, pair_dev, pair_trn]:
for p in pair:
if type(p["sys_act"]) == list:
for sysact in p["sys_act"]:
act_set.update(sysact)
else:
act_set.update(p["sys_act"])
print("act_set", len(act_set), act_set)
sysact_lookup = {sysact:i for i, sysact in enumerate(act_set)}
meta_data = {"sysact":sysact_lookup, "num_labels":len(act_set)}
elif args["task_name"] == "rs":
print("resp_cand_trn", len(resp_cand_trn))
print("resp_cand_dev", len(resp_cand_dev))
print("resp_cand_tst", len(resp_cand_tst))
meta_data = {"num_labels":0, "resp_cand_trn": resp_cand_trn}
else:
meta_data = {"num_labels":0}
return pair_trn, pair_dev, pair_tst, meta_data
| 13,939 | 50.821561 | 130 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_frames.py
|
import json
import ast
import collections
import os
from .utils_function import get_input_example
def read_langs_turn(args, file_name, max_line = None, ds_name=""):
print(("Reading from {} for read_langs_turn".format(file_name)))
data = []
with open(file_name) as f:
dials = json.load(f)
cnt_lin = 1
for dial_dict in dials:
dialog_history = []
turn_usr = ""
turn_sys = ""
for ti, turn in enumerate(dial_dict["turns"]):
if turn["author"] == "user":
turn_usr = turn["text"].lower().strip()
data_detail = get_input_example("turn")
data_detail["ID"] = "{}-{}".format(ds_name, cnt_lin)
data_detail["turn_id"] = ti % 2
data_detail["turn_usr"] = turn_usr
data_detail["turn_sys"] = turn_sys
data_detail["dialog_history"] = list(dialog_history)
if not args["only_last_turn"]:
data.append(data_detail)
dialog_history.append(turn_sys)
dialog_history.append(turn_usr)
elif turn["author"] == "wizard":
turn_sys = turn["text"].lower().strip()
if args["only_last_turn"]:
data.append(data_detail)
cnt_lin += 1
if(max_line and cnt_lin >= max_line):
break
return data
def read_langs_dial(file_name, ontology, dialog_act, max_line = None, domain_act_flag=False):
print(("Reading from {} for read_langs_dial".format(file_name)))
raise NotImplementedError
def prepare_data_frames(args):
ds_name = "FRAMES"
example_type = args["example_type"]
max_line = args["max_line"]
file_trn = os.path.join(args["data_path"], "frames.json")
_example_type = "dial" if "dial" in example_type else example_type
pair_trn = globals()["read_langs_{}".format(_example_type)](args, file_trn, max_line, ds_name)
pair_dev = []
pair_tst = []
print("Read {} pairs train from {}".format(len(pair_trn), ds_name))
print("Read {} pairs valid from {}".format(len(pair_dev), ds_name))
print("Read {} pairs test from {}".format(len(pair_tst), ds_name))
meta_data = {"num_labels":0}
return pair_trn, pair_dev, pair_tst, meta_data
| 2,548 | 30.469136 | 98 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_metalwoz.py
|
import json
import ast
import collections
import os
from .utils_function import get_input_example
def read_langs_turn(args, dial_files, max_line = None, ds_name=""):
print(("Reading from {} for read_langs_turn".format(ds_name)))
data = []
cnt_lin = 1
for dial_file in dial_files:
f_dials = open(dial_file, 'r')
dials = f_dials.readlines()
for dial in dials:
dialog_history = []
dial_dict = json.loads(dial)
# Reading data
for ti, turn in enumerate(dial_dict["turns"]):
if ti%2 == 0:
turn_sys = turn.lower().strip()
else:
turn_usr = turn.lower().strip()
data_detail = get_input_example("turn")
data_detail["ID"] = "{}-{}".format(ds_name, cnt_lin)
data_detail["turn_id"] = ti % 2
data_detail["turn_usr"] = turn_usr
data_detail["turn_sys"] = turn_sys
data_detail["dialog_history"] = list(dialog_history)
if not args["only_last_turn"]:
data.append(data_detail)
dialog_history.append(turn_sys)
dialog_history.append(turn_usr)
if args["only_last_turn"]:
data.append(data_detail)
cnt_lin += 1
if(max_line and cnt_lin >= max_line):
break
return data
def read_langs_dial(file_name, ontology, dialog_act, max_line = None, domain_act_flag=False):
print(("Reading from {} for read_langs_dial".format(file_name)))
raise NotImplementedError
def prepare_data_metalwoz(args):
ds_name = "MetaLWOZ"
example_type = args["example_type"]
max_line = args["max_line"]
onlyfiles = [os.path.join(args["data_path"], 'metalwoz/dialogues/{}'.format(f)) for f in os.listdir(os.path.join(args["data_path"], "metalwoz/dialogues/")) if ".txt" in f]
_example_type = "dial" if "dial" in example_type else example_type
pair_trn = globals()["read_langs_{}".format(_example_type)](args, onlyfiles, max_line, ds_name)
pair_dev = []
pair_tst = []
print("Read {} pairs train from {}".format(len(pair_trn), ds_name))
print("Read {} pairs valid from {}".format(len(pair_dev), ds_name))
print("Read {} pairs test from {}".format(len(pair_tst), ds_name))
meta_data = {"num_labels":0}
return pair_trn, pair_dev, pair_tst, meta_data
| 2,615 | 31.7 | 175 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_woz.py
|
import json
import ast
import collections
import os
from .utils_function import get_input_example
def read_langs_turn(args, file_name, max_line = None, ds_name=""):
print(("Reading from {} for read_langs_turn".format(file_name)))
data = []
with open(file_name) as f:
dials = json.load(f)
cnt_lin = 1
for dial_dict in dials:
dialog_history = []
# Reading data
for ti, turn in enumerate(dial_dict["dialogue"]):
assert ti == turn["turn_idx"]
turn_usr = turn["transcript"].lower().strip()
turn_sys = turn["system_transcript"].lower().strip()
data_detail = get_input_example("turn")
data_detail["ID"] = "{}-{}".format(ds_name, cnt_lin)
data_detail["turn_id"] = turn["turn_idx"]
data_detail["turn_usr"] = turn_usr
data_detail["turn_sys"] = turn_sys
data_detail["dialog_history"] = list(dialog_history)
if not args["only_last_turn"]:
data.append(data_detail)
dialog_history.append(turn_sys)
dialog_history.append(turn_usr)
if args["only_last_turn"]:
data.append(data_detail)
cnt_lin += 1
if(max_line and cnt_lin >= max_line):
break
return data
def read_langs_dial(file_name, ontology, dialog_act, max_line = None, domain_act_flag=False):
print(("Reading from {} for read_langs_dial".format(file_name)))
raise NotImplementedError
def prepare_data_woz(args):
ds_name = "WOZ"
example_type = args["example_type"]
max_line = args["max_line"]
file_trn = os.path.join(args["data_path"], "neural-belief-tracker/data/woz/woz_train_en.json")
file_dev = os.path.join(args["data_path"], "neural-belief-tracker/data/woz/woz_validate_en.json")
file_tst = os.path.join(args["data_path"], "neural-belief-tracker/data/woz/woz_test_en.json")
_example_type = "dial" if "dial" in example_type else example_type
pair_trn = globals()["read_langs_{}".format(_example_type)](args, file_trn, max_line, ds_name)
pair_dev = globals()["read_langs_{}".format(_example_type)](args, file_dev, max_line, ds_name)
pair_tst = globals()["read_langs_{}".format(_example_type)](args, file_tst, max_line, ds_name)
print("Read {} pairs train from {}".format(len(pair_trn), ds_name))
print("Read {} pairs valid from {}".format(len(pair_dev), ds_name))
print("Read {} pairs test from {}".format(len(pair_tst), ds_name))
meta_data = {"num_labels":0}
return pair_trn, pair_dev, pair_tst, meta_data
| 2,767 | 34.487179 | 101 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_multiwoz.py
|
import json
import ast
import collections
import os
from .utils_function import get_input_example
from .multiwoz.fix_label import *
EXPERIMENT_DOMAINS = ["hotel", "train", "restaurant", "attraction", "taxi"] #, "hospital", "police"]
def read_langs_turn(args, file_name, ontology, dialog_act, max_line = None, domain_act_flag=False, update_ont_flag=False):
print(("Reading from {} for read_langs_turn".format(file_name)))
data = []
SLOTS = [k for k in ontology.keys()]
max_resp_len, max_value_len = 0, 0
domain_counter = {}
response_candidates = set()
add_slot_values = set()
with open(file_name) as f:
dials = json.load(f)
cnt_lin = 1
for dial_dict in dials:
dialog_history, dialog_history_delex = [], []
# Filtering and counting domains
for domain in dial_dict["domains"]:
if domain not in EXPERIMENT_DOMAINS:
continue
if domain not in domain_counter.keys():
domain_counter[domain] = 0
domain_counter[domain] += 1
# Reading data
for ti, turn in enumerate(dial_dict["dialogue"]):
belief_dict = fix_general_label_error(turn["belief_state"], False, SLOTS, args["ontology_version"])
belief_list = [str(k)+'-'+str(v) for k, v in belief_dict.items()]
turn_slot_dict = fix_general_label_error(turn["turn_label"], True, SLOTS, args["ontology_version"])
turn_slot_list = [str(k)+'-'+str(v) for k, v in turn_slot_dict.items()]
turn_slot = list(set([k.split("-")[1] for k, v in turn_slot_dict.items()]))
slot_values, gates = [], []
for slot in SLOTS:
if slot in belief_dict.keys():
# update ontology
if args["ontology_version"] != "" and "the {}".format(belief_dict[slot]) in ontology[slot].keys():
belief_dict[slot] = "the {}".format(belief_dict[slot])
if belief_dict[slot] not in ontology[slot].keys() and update_ont_flag:
if slot+"-"+belief_dict[slot] not in add_slot_values:
print("[Info] Adding Slot: {} with value: [{}]".format(slot, belief_dict[slot]))
add_slot_values.add(slot+"-"+belief_dict[slot])
ontology[slot][belief_dict[slot]] = len(ontology[slot])
slot_values.append(belief_dict[slot])
if belief_dict[slot] == "none":
gates.append(0)
else:
gates.append(1)
else:
slot_values.append("none")
gates.append(0)
# dialgoue act (exclude domain)
if turn["turn_idx"] == 0 and turn["system_transcript"] == "":
cur_sys_acts = set()
elif str(turn["turn_idx"]) not in dialog_act[dial_dict["dialogue_idx"].replace(".json", "")].keys():
cur_sys_acts = set()
elif dialog_act[dial_dict["dialogue_idx"].replace(".json", "")][str(turn["turn_idx"])] == "No Annotation":
cur_sys_acts = set()
else:
cur_sys_acts = dialog_act[dial_dict["dialogue_idx"].replace(".json", "")][str(turn["turn_idx"])]
if domain_act_flag:
cur_sys_acts = set([key.lower() for key in cur_sys_acts.keys()])
else:
cur_sys_acts = set([key.split("-")[1].lower() for key in cur_sys_acts.keys()])
data_detail = get_input_example("turn")
data_detail["slots"] = SLOTS
data_detail["ID"] = dial_dict["dialogue_idx"]
data_detail["turn_id"] = turn["turn_idx"]
data_detail["domains"] = dial_dict["domains"]
data_detail["turn_domain"] = turn["domain"]
data_detail["turn_usr"] = turn["transcript"].strip()
data_detail["turn_sys"] = turn["system_transcript"].strip()
data_detail["turn_usr_delex"] = turn["transcript_delex"].strip()
data_detail["turn_sys_delex"] = turn["system_transcript_delex"].strip()
data_detail["belief_state_vec"] = ast.literal_eval(turn["belief_state_vec"])
data_detail["db_pointer"] = ast.literal_eval(turn["db_pointer"])
data_detail["dialog_history"] = list(dialog_history)
data_detail["dialog_history_delex"] = list(dialog_history_delex)
data_detail["belief"] = belief_dict
data_detail["del_belief"] = turn_slot_dict
data_detail["slot_gate"] = gates
data_detail["slot_values"] = slot_values
data_detail["sys_act"] = cur_sys_acts
data_detail["turn_slot"] = turn_slot
if not args["only_last_turn"]:
data.append(data_detail)
dialog_history.append(turn["system_transcript"])
dialog_history.append(turn["transcript"])
dialog_history_delex.append(turn["system_transcript_delex"])
dialog_history_delex.append(turn["transcript_delex"])
response_candidates.add(str(data_detail["turn_sys"]))
if args["only_last_turn"]:
data.append(data_detail)
cnt_lin += 1
if(max_line and cnt_lin >= max_line):
break
#print("MultiWOZ domain counter: ", domain_counter)
return data, ontology, response_candidates
def read_langs_dial(args, file_name, ontology, dialog_act, max_line = None, domain_act_flag=False, update_ont_flag=False):
print(("Reading from {} for read_langs_dial".format(file_name)))
raise NotImplementedError
def get_slot_information(args, ontology):
ontology_domains = dict([(k, v) for k, v in ontology.items() if k.split("-")[0] in EXPERIMENT_DOMAINS])
ontology_new = collections.OrderedDict()
for k, v in ontology_domains.items():
name = k.replace(" ","").lower() if ("book" not in k) else k.lower()
if args["ontology_version"] != "":
v = clean_original_ontology(v)
ontology_new[name] = {"none":0, "do n't care":1}
for vv in v:
if vv not in ontology_new[name].keys():
ontology_new[name][vv] = len(ontology_new[name])
return ontology_new
def prepare_data_multiwoz(args):
example_type = args["example_type"]
max_line = args["max_line"]
version = "2.1"
print("[Info] Using Version", version)
if args["domain"] == 'all':
file_trn = os.path.join(args["data_path"], 'MultiWOZ-{}/train_dials.json'.format(version))
file_dev = os.path.join(args["data_path"], 'MultiWOZ-{}/dev_dials.json'.format(version))
file_tst = os.path.join(args["data_path"], 'MultiWOZ-{}/test_dials.json'.format(version))
elif args["domain_option"] == 'single':
print(args["domain"], args["domain_option"])
file_trn = os.path.join(args["data_path"], 'MultiWOZ-{}/single/train/{}_train_dials.json'.format(version, args["domain"]))
file_dev = os.path.join(args["data_path"], 'MultiWOZ-{}/single/dev/{}_dev_dials.json'.format(version, args["domain"]))
file_tst = os.path.join(args["data_path"], 'MultiWOZ-{}/single/test/{}_test_dials.json'.format(version, args["domain"]))
else:
print(args["domain"], args["domain_option"])
file_trn = os.path.join(args["data_path"], 'MultiWOZ-{}/train/{}_train_dials.json'.format(version, args["domain"]))
file_dev = os.path.join(args["data_path"], 'MultiWOZ-{}/dev/{}_dev_dials.json'.format(version, args["domain"]))
file_tst = os.path.join(args["data_path"], 'MultiWOZ-{}/test/{}_test_dials.json'.format(version, args["domain"]))
print(file_trn, file_dev, file_tst)
path_to_ontology_mapping = os.path.join(args["data_path"],
"MultiWOZ-{}/ontology-mapping{}.json".format(version, args["ontology_version"]))
if os.path.exists(path_to_ontology_mapping):
print("[Info] Load from old complete ontology from version {}...".format(args["ontology_version"]))
ontology_mapping = json.load(open(path_to_ontology_mapping, 'r'))
update_ont_flag = False
else:
print("[Info] Creating new ontology for version {}...".format(args["ontology_version"]))
ontology = json.load(open(os.path.join(args["data_path"], "MultiWOZ-{}/ontology.json".format(version)), 'r'))
ontology_mapping = get_slot_information(args, ontology)
update_ont_flag = True
dialog_act = json.load(open(os.path.join(args["data_path"], "MultiWOZ-{}/dialogue_acts.json".format(version)), 'r'))
_example_type = "dial" if "dial" in example_type else example_type
pair_trn, ontology_mapping, resp_cand_trn = globals()["read_langs_{}".format(_example_type)](args,
file_trn,
ontology_mapping,
dialog_act,
max_line,
args["domain_act"],
update_ont_flag)
pair_dev, ontology_mapping, resp_cand_dev = globals()["read_langs_{}".format(_example_type)](args,
file_dev,
ontology_mapping,
dialog_act,
max_line,
args["domain_act"],
update_ont_flag)
pair_tst, ontology_mapping, resp_cand_tst = globals()["read_langs_{}".format(_example_type)](args,
file_tst,
ontology_mapping,
dialog_act,
max_line,
args["domain_act"],
update_ont_flag)
if not os.path.exists(path_to_ontology_mapping):
print("[Info] Dumping complete ontology...")
json.dump(ontology_mapping, open(path_to_ontology_mapping, "w"), indent=4)
print("Read %s pairs train from MultiWOZ" % len(pair_trn))
print("Read %s pairs valid from MultiWOZ" % len(pair_dev))
print("Read %s pairs test from MultiWOZ" % len(pair_tst))
# print('args["task_name"]', args["task_name"])
if args["task_name"] == "dst":
meta_data = {"slots":ontology_mapping, "num_labels": len(ontology_mapping)}
elif args["task_name"] == "turn_domain":
domain_set = set([d["turn_domain"] for d in pair_trn])
domain_dict = {d:i for i, d in enumerate(domain_set)}
meta_data = {"turn_domain":domain_dict, "num_labels": len(domain_dict)}
elif args["task_name"] == "turn_slot":
turn_slot_list = []
for d in pair_trn:
turn_slot_list += d["turn_slot"]
turn_slot_list = list(set(turn_slot_list))
turn_slot_mapping = {d:i for i, d in enumerate(turn_slot_list)}
meta_data = {"turn_slot":turn_slot_mapping, "num_labels": len(turn_slot_mapping)}
elif args["task_name"] == "sysact":
act_set = set()
for pair in [pair_tst, pair_dev, pair_trn]:
for p in pair:
if type(p["sys_act"]) == list:
for sysact in p["sys_act"]:
act_set.update(sysact)
else:
act_set.update(p["sys_act"])
print("act_set", len(act_set), act_set)
sysact_lookup = {sysact:i for i, sysact in enumerate(act_set)}
meta_data = {"sysact":sysact_lookup, "num_labels":len(act_set)}
elif args["task_name"] == "rs":
print("resp_cand_trn", len(resp_cand_trn))
print("resp_cand_dev", len(resp_cand_dev))
print("resp_cand_tst", len(resp_cand_tst))
meta_data = {"num_labels":0, "resp_cand_trn": resp_cand_trn}
else:
meta_data = {"num_labels":0}
return pair_trn, pair_dev, pair_tst, meta_data
| 13,530 | 50.253788 | 130 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_taskmaster.py
|
import json
import ast
import collections
import os
from .utils_function import get_input_example
def read_langs_turn(args, dials, ds_name, max_line):
print(("Reading from {} for read_langs_turn".format(ds_name)))
data = []
turn_sys = ""
turn_usr = ""
cnt_lin = 1
for dial in dials:
dialog_history = []
for ti, turn in enumerate(dial["utterances"]):
if turn["speaker"] == "USER":
turn_usr = turn["text"].lower().strip()
data_detail = get_input_example("turn")
data_detail["ID"] = "{}-{}".format(ds_name, cnt_lin)
data_detail["turn_id"] = ti % 2
data_detail["turn_usr"] = turn_usr
data_detail["turn_sys"] = turn_sys
data_detail["dialog_history"] = list(dialog_history)
if (not args["only_last_turn"]):
data.append(data_detail)
dialog_history.append(turn_sys)
dialog_history.append(turn_usr)
elif turn["speaker"] == "ASSISTANT":
turn_sys = turn["text"].lower().strip()
else:
turn_usr += " {}".format(turn["text"])
if args["only_last_turn"]:
data.append(data_detail)
cnt_lin += 1
if(max_line and cnt_lin >= max_line):
break
return data
def read_langs_dial(file_name, ontology, dialog_act, max_line = None, domain_act_flag=False):
print(("Reading from {} for read_langs_dial".format(file_name)))
raise NotImplementedError
def prepare_data_taskmaster(args):
ds_name = "TaskMaster"
example_type = args["example_type"]
max_line = args["max_line"]
fr_trn_id = open(os.path.join(args["data_path"], 'Taskmaster/TM-1-2019/train-dev-test/train.csv'), 'r')
fr_dev_id = open(os.path.join(args["data_path"], 'Taskmaster/TM-1-2019/train-dev-test/dev.csv'), 'r')
fr_trn_id = fr_trn_id.readlines()
fr_dev_id = fr_dev_id.readlines()
fr_trn_id = [_id.replace("\n", "").replace(",", "") for _id in fr_trn_id]
fr_dev_id = [_id.replace("\n", "").replace(",", "") for _id in fr_dev_id]
fr_data_woz = open(os.path.join(args["data_path"], 'Taskmaster/TM-1-2019/woz-dialogs.json'), 'r')
fr_data_self = open(os.path.join(args["data_path"], 'Taskmaster/TM-1-2019/self-dialogs.json'), 'r')
dials_all = json.load(fr_data_woz) + json.load(fr_data_self)
_example_type = "dial" if "dial" in example_type else example_type
pair_trn = globals()["read_langs_{}".format(_example_type)](args, dials_all, ds_name, max_line)
pair_dev = []
pair_tst = []
print("Read {} pairs train from {}".format(len(pair_trn), ds_name))
print("Read {} pairs valid from {}".format(len(pair_dev), ds_name))
print("Read {} pairs test from {}".format(len(pair_tst), ds_name))
meta_data = {"num_labels":0}
return pair_trn, pair_dev, pair_tst, meta_data
| 3,043 | 33.988506 | 107 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_msre2e.py
|
import json
import ast
import collections
import os
from .utils_function import get_input_example
def read_langs_turn(args, file_name, max_line = None, ds_name=""):
print(("Reading from {} for read_langs_turn".format(file_name)))
data = []
with open(file_name) as f:
dials = f.readlines()
cnt_lin = 1
dialog_history = []
turn_usr = ""
turn_sys = ""
turn_idx = 0
for dial in dials[1:]:
dial_split = dial.split("\t")
session_ID, Message_ID, Message_from, Message = dial_split[0], dial_split[1], dial_split[3], dial_split[4]
if Message_ID == "1" and turn_sys != "":
if args["only_last_turn"]:
data.append(data_detail)
turn_usr = ""
turn_sys = ""
dialog_history = []
cnt_lin += 1
turn_idx = 0
if Message_from == "user":
turn_usr = Message.lower().strip()
data_detail = get_input_example("turn")
data_detail["ID"] = "{}-{}".format(ds_name, cnt_lin)
data_detail["turn_id"] = turn_idx
data_detail["turn_usr"] = turn_usr
data_detail["turn_sys"] = turn_sys
data_detail["dialog_history"] = list(dialog_history)
if not args["only_last_turn"]:
data.append(data_detail)
dialog_history.append(turn_sys)
dialog_history.append(turn_usr)
turn_idx += 1
elif Message_from == "agent":
turn_sys = Message.lower().strip()
if(max_line and cnt_lin >= max_line):
break
return data
def read_langs_dial(file_name, ontology, dialog_act, max_line = None, domain_act_flag=False):
print(("Reading from {} for read_langs_dial".format(file_name)))
raise NotImplementedError
def prepare_data_msre2e(args):
ds_name = "MSR-E2E"
example_type = args["example_type"]
max_line = args["max_line"]
file_mov = os.path.join(args["data_path"], 'e2e_dialog_challenge/data/movie_all.tsv')
file_rst = os.path.join(args["data_path"], 'e2e_dialog_challenge/data/restaurant_all.tsv')
file_tax = os.path.join(args["data_path"], 'e2e_dialog_challenge/data/taxi_all.tsv')
_example_type = "dial" if "dial" in example_type else example_type
pair_mov = globals()["read_langs_{}".format(_example_type)](args, file_mov, max_line, ds_name+"-mov")
pair_rst = globals()["read_langs_{}".format(_example_type)](args, file_rst, max_line, ds_name+"-rst")
pair_tax = globals()["read_langs_{}".format(_example_type)](args, file_tax, max_line, ds_name+"-tax")
pair_trn = pair_mov + pair_rst + pair_tax
pair_dev = []
pair_tst = []
print("Read {} pairs train from {}".format(len(pair_trn), ds_name))
print("Read {} pairs valid from {}".format(len(pair_dev), ds_name))
print("Read {} pairs test from {}".format(len(pair_tst), ds_name))
meta_data = {"num_labels":0}
return pair_trn, pair_dev, pair_tst, meta_data
| 3,276 | 33.135417 | 118 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_function.py
|
import torch
import numpy as np
PAD_token = 0
def to_cuda(x):
if torch.cuda.is_available(): x = x.cuda()
return x
def merge(sequences, ignore_idx=None):
'''
merge from batch * sent_len to batch * max_len
'''
pad_token = PAD_token if type(ignore_idx)==type(None) else ignore_idx
lengths = [len(seq) for seq in sequences]
max_len = 1 if max(lengths)==0 else max(lengths)
padded_seqs = torch.ones(len(sequences), max_len).long() * pad_token
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
padded_seqs = padded_seqs.detach() #torch.tensor(padded_seqs)
return padded_seqs, lengths
def merge_multi_response(sequences, ignore_idx=None):
'''
merge from batch * nb_slot * slot_len to batch * nb_slot * max_slot_len
'''
pad_token = PAD_token if type(ignore_idx)==type(None) else ignore_idx
lengths = []
for bsz_seq in sequences:
length = [len(v) for v in bsz_seq]
lengths.append(length)
max_len = max([max(l) for l in lengths])
padded_seqs = []
for bsz_seq in sequences:
pad_seq = []
for v in bsz_seq:
v = v + [pad_token] * (max_len-len(v))
pad_seq.append(v)
padded_seqs.append(pad_seq)
padded_seqs = torch.tensor(padded_seqs).long()
lengths = torch.tensor(lengths)
return padded_seqs, lengths
def merge_sent_and_word(sequences, ignore_idx=None):
'''
merge from batch * nb_sent * nb_word to batch * max_nb_sent * max_nb_word
'''
max_nb_sent = max([len(seq) for seq in sequences])
max_nb_word, lengths = [], []
for seq in sequences:
length = [len(sent) for sent in seq]
max_nb_word += length
lengths.append(length)
max_nb_word = max(max_nb_word)
pad_token = PAD_token if type(ignore_idx)==type(None) else ignore_idx
padded_seqs = np.ones((len(sequences), max_nb_sent, max_nb_word)) * pad_token
for i, seq in enumerate(sequences):
for ii, sent in enumerate(seq):
padded_seqs[i, ii, :len(sent)] = np.array(sent)
padded_seqs = torch.LongTensor(padded_seqs)
padded_seqs = padded_seqs.detach()
return padded_seqs, lengths
def get_input_example(example_type):
if example_type == "turn":
data_detail = {
"ID":"",
"turn_id":0,
"domains":[],
"turn_domain":[],
"turn_usr":"",
"turn_sys":"",
"turn_usr_delex":"",
"turn_sys_delex":"",
"belief_state_vec":[],
"db_pointer":[],
"dialog_history":[],
"dialog_history_delex":[],
"belief":{},
"del_belief":{},
"slot_gate":[],
"slot_values":[],
"slots":[],
"sys_act":[],
"usr_act":[],
"intent":"",
"turn_slot":[]}
elif example_type == "dial":
data_detail = {
"ID":"",
"turn_id":[],
"domains":[],
"turn_domain":[],
"turn_usr":[],
"turn_sys":[],
"turn_usr_delex":[],
"turn_sys_delex":[],
"belief_state_vec":[],
"db_pointer":[],
"dialog_history":[],
"dialog_history_delex":[],
"belief":[],
"del_belief":[],
"slot_gate":[],
"slot_values":[],
"slots":[],
"sys_act":[],
"usr_act":[],
"intent":[],
"turn_slot":[]}
return data_detail
| 3,659 | 28.28 | 82 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/config.py
|
import os
import logging
import argparse
from tqdm import tqdm
import torch
import numpy as np
parser = argparse.ArgumentParser(description='Task-oriented Dialogue System Benchmarking')
## Training Setting
parser.add_argument(
'--do_train', action='store_true', help="do training")
parser.add_argument(
'-epoch','--epoch', help='number of epochs to train', required=False, default=300, type=int)
parser.add_argument(
'-patience','--patience', help='patience for early stopping', required=False, default=10, type=int)
parser.add_argument(
'-earlystop','--earlystop', help='metric for early stopping', required=False, default="loss", type=str)
parser.add_argument(
'--my_model', help='my cutomized model', required=False, default="")
parser.add_argument(
'-dr','--dropout', help='Dropout ratio', required=False, type=float, default=0.2)
parser.add_argument(
'-lr','--learning_rate', help='Learning Rate', required=False, type=float, default=5e-5)
parser.add_argument(
'-bsz','--batch_size', help='Batch_size', required=False, type=int, default=16)
parser.add_argument(
'-ebsz','--eval_batch_size', help='Batch_size', required=False, type=int, default=16)
parser.add_argument(
'-hdd','--hdd_size', help='Hidden size', required=False, type=int, default=400)
parser.add_argument(
'-emb','--emb_size', help='Embedding size', required=False, type=int, default=400)
parser.add_argument(
'-clip','--grad_clip', help='gradient clipping', required=False, default=1, type=int)
parser.add_argument(
'-tfr','--teacher_forcing_ratio', help='teacher_forcing_ratio', type=float, required=False, default=0.5)
parser.add_argument(
'-loadEmb','--load_embedding', help='Load Pretrained Glove and Char Embeddings', required=False, default=False, type=bool)
parser.add_argument(
'-fixEmb','--fix_embedding', help='', required=False, default=False, type=bool)
parser.add_argument(
'--n_gpu', help='', required=False, default=1, type=int)
parser.add_argument(
'--eval_by_step', help='', required=False, default=-1, type=int)
parser.add_argument(
'--fix_encoder', action='store_true', help="")
parser.add_argument(
'--model_type', help='', required=False, default="bert", type=str)
parser.add_argument(
'--model_name_or_path', help='', required=False, default="bert", type=str)
parser.add_argument(
'--usr_token', help='', required=False, default="[USR]", type=str)
parser.add_argument(
'--sys_token', help='', required=False, default="[SYS]", type=str)
parser.add_argument(
'--warmup_proportion', help='warm up training in the begining', required=False, default=0.1, type=float)
parser.add_argument(
"--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--gradient_accumulation_steps", type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",)
parser.add_argument(
"--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument(
"--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument(
"--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument(
"--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",)
parser.add_argument(
"--fp16_opt_level", type=str, default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",)
parser.add_argument(
"--output_mode", default="classification", type=str, help="")
parser.add_argument(
"--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.",)
parser.add_argument(
"--rand_seed", default=0, type=int, help="")
parser.add_argument(
"--fix_rand_seed", action="store_true", help="fix random seed for training",)
parser.add_argument(
"--nb_runs", default=1, type=int, help="number of runs to conduct during training")
parser.add_argument(
"--nb_evals", default=1, type=int, help="number of runs to conduct during inference")
parser.add_argument(
"--max_seq_length", default=512, type=int, help="")
parser.add_argument(
"--input_name", default="context", type=str, help="")
## Dataset or Input/Output Setting
parser.add_argument(
'-dpath','--data_path', help='path to dataset folder, need to change to your local folder',
required=False, default='./dialog_datasets', type=str)
parser.add_argument(
'-task','--task', help='task in ["nlu", "dst", "dm", "nlg", "usdl"] to decide which dataloader to use', required=True)
parser.add_argument(
'-task_name','--task_name', help='task in ["intent", "sysact","rs"]', required=False, default="")
parser.add_argument(
'--example_type', help='type in ["turn", "dial"]', required=False, default="turn")
parser.add_argument(
'-ds','--dataset', help='which dataset to be used.', required=False, default='["multiwoz"]', type=str)
parser.add_argument(
'-load_path','--load_path', help='path of the saved model to load from', required=False)
parser.add_argument(
'-an','--add_name', help='An added name for the save folder', required=False, default='')
parser.add_argument(
'--max_line', help='maximum line for reading data (for quick testing)', required=False, default=None, type=int)
parser.add_argument(
'--output_dir', help='', required=False, default="save/temp/", type=str)
parser.add_argument(
'--overwrite', action='store_true', help="")
parser.add_argument(
"--cache_dir", default=None, type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",)
parser.add_argument(
"--logging_steps", default=500, type=int, help="")
parser.add_argument(
"--save_steps", default=1000, type=int, help="")
parser.add_argument(
"--save_total_limit", type=int, default=1,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir",)
parser.add_argument(
"--train_data_ratio", default=1.0, type=float, help="")
parser.add_argument(
"--domain_act", action="store_true", help="",)
parser.add_argument(
"--only_last_turn", action="store_true", help="",)
parser.add_argument(
"--error_analysis", action="store_true", help="",)
parser.add_argument(
"--not_save_model", action="store_true", help="")
parser.add_argument(
"--nb_shots", default=-1, type=int, help="")
parser.add_argument(
"--continue_ft", action="store_true", help="",)
## Others (May be able to delete or not used in this repo)
parser.add_argument(
'--do_embeddings', action='store_true')
parser.add_argument(
'--create_own_vocab', action='store_true', help="")
parser.add_argument(
'-um','--unk_mask', help='mask out input token to UNK', type=bool, required=False, default=True)
parser.add_argument(
'-paral','--parallel_decode', help='', required=False, default=True, type=bool)
parser.add_argument(
'--self_supervised', help='', required=False, default="generative", type=str)
parser.add_argument(
"--oracle_domain", action="store_true", help="",)
parser.add_argument(
"--more_linear_mapping", action="store_true", help="",)
parser.add_argument(
"--gate_supervision_for_dst", action="store_true", help="",)
parser.add_argument(
"--sum_token_emb_for_value", action="store_true", help="",)
parser.add_argument(
"--nb_neg_sample_rs", default=0, type=int, help="")
parser.add_argument(
"--sample_negative_by_kmeans", action="store_true", help="",)
parser.add_argument(
"--nb_kmeans", default=1000, type=int, help="")
parser.add_argument(
"--bidirect", action="store_true", help="",)
parser.add_argument(
'--rnn_type', help='rnn type ["gru", "lstm"]', required=False, type=str, default="gru")
parser.add_argument(
'--num_rnn_layers', help='rnn layers size', required=False, type=int, default=1)
parser.add_argument(
'--zero_init_rnn',action='store_true', help="set initial hidden of rnns zero")
parser.add_argument(
"--do_zeroshot", action="store_true", help="",)
parser.add_argument(
"--oos_threshold", action="store_true", help="",)
parser.add_argument(
"--ontology_version", default="", type=str, help="1.0 is the cleaned version but not used")
parser.add_argument(
"--dstlm", action="store_true", help="",)
parser.add_argument(
"--domain", default="all", type=str, help="select one of the following domains for multiwoz: taxi, restaurant, attraction, hotel, train, all")
parser.add_argument(
"--domain_option", default="multi", type=str, help="select one of the following domain options for multiwoz: single, multi")
parser.add_argument(
"--tgt_lang", default="de", type=str, help="select one of the following languages: de, cn, ar, ru")
parser.add_argument(
"--adapter_name_or_path", default="", type=str, help="load pretrained adapter from adapter_name_or_path")
parser.add_argument(
"--adapter_name_or_path_2", default="", type=str, help="load pretrained adapter from adapter_name_or_path")
parser.add_argument(
"--adapter_name_or_path_3", default="", type=str, help="load pretrained adapter from adapter_name_or_path")
parser.add_argument(
"--continue_model_path", default="", type=str, help="load previous pretrained model from continue_model_path")
parser.add_argument(
"--save_adapter_path", default="", type=str, help="path to save fine-tuned adapter")
parser.add_argument(
'-viz','--vizualization', help='vizualization', type=int, required=False, default=0)
args = vars(parser.parse_args())
# args = parser.parse_args()
print(str(args))
# check output_dir
if os.path.exists(args["output_dir"]) and os.listdir(args["output_dir"]) and args["do_train"] and (not args["overwrite"]):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args["output_dir"]))
os.makedirs(args["output_dir"], exist_ok=True)
# Dictionary Predefined
SEEDS = [10, 5, 0] # np.arange(0, 100, 5)
| 10,339 | 47.093023 | 150 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_schema.py
|
import json
import ast
import collections
import os
from .utils_function import get_input_example
def read_langs_turn(args, dial_files, max_line = None, ds_name=""):
print(("Reading from {} for read_langs_turn".format(ds_name)))
data = []
cnt_lin = 1
for dial_file in dial_files:
f_dials = open(dial_file, 'r')
dials = json.load(f_dials)
turn_sys = ""
turn_usr = ""
for dial_dict in dials:
dialog_history = []
for ti, turn in enumerate(dial_dict["turns"]):
if turn["speaker"] == "USER":
turn_usr = turn["utterance"].lower().strip()
data_detail = get_input_example("turn")
data_detail["ID"] = "{}-{}".format(ds_name, cnt_lin)
data_detail["turn_id"] = ti % 2
data_detail["turn_usr"] = turn_usr
data_detail["turn_sys"] = turn_sys
data_detail["dialog_history"] = list(dialog_history)
if (not args["only_last_turn"]):
data.append(data_detail)
dialog_history.append(turn_sys)
dialog_history.append(turn_usr)
elif turn["speaker"] == "SYSTEM":
turn_sys = turn["utterance"].lower().strip()
if args["only_last_turn"]:
data.append(data_detail)
cnt_lin += 1
if(max_line and cnt_lin >= max_line):
break
return data
def read_langs_dial(file_name, ontology, dialog_act, max_line = None, domain_act_flag=False):
print(("Reading from {} for read_langs_dial".format(file_name)))
raise NotImplementedError
def prepare_data_schema(args):
ds_name = "Schema"
example_type = args["example_type"]
max_line = args["max_line"]
onlyfiles_trn = [os.path.join(args["data_path"], 'dstc8-schema-guided-dialogue/train/{}'.format(f)) for f in os.listdir(os.path.join(args["data_path"], "dstc8-schema-guided-dialogue/train/")) if "dialogues" in f]
onlyfiles_dev = [os.path.join(args["data_path"], 'dstc8-schema-guided-dialogue/dev/{}'.format(f)) for f in os.listdir(os.path.join(args["data_path"],"dstc8-schema-guided-dialogue/dev/")) if "dialogues" in f]
onlyfiles_tst = [os.path.join(args["data_path"], 'dstc8-schema-guided-dialogue/test/{}'.format(f)) for f in os.listdir(os.path.join(args["data_path"], "dstc8-schema-guided-dialogue/test/")) if "dialogues" in f]
_example_type = "dial" if "dial" in example_type else example_type
pair_trn = globals()["read_langs_{}".format(_example_type)](args, onlyfiles_trn, max_line, ds_name)
pair_dev = globals()["read_langs_{}".format(_example_type)](args, onlyfiles_dev, max_line, ds_name)
pair_tst = globals()["read_langs_{}".format(_example_type)](args, onlyfiles_tst, max_line, ds_name)
print("Read {} pairs train from {}".format(len(pair_trn), ds_name))
print("Read {} pairs valid from {}".format(len(pair_dev), ds_name))
print("Read {} pairs test from {}".format(len(pair_tst), ds_name))
meta_data = {"num_labels":0}
return pair_trn, pair_dev, pair_tst, meta_data
| 3,333 | 38.690476 | 216 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/dataloader_dst.py
|
import torch
import numpy as np
import torch.utils.data as data
from .utils_function import to_cuda, merge, merge_multi_response, merge_sent_and_word
# SLOT_GATE = {"ptr":0, "dontcare":1, "none":2}
class Dataset_dst(torch.utils.data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data_info, tokenizer, args, unified_meta, mode, max_length=512):
"""Reads source and target sequences from txt files."""
self.data = data_info
self.tokenizer = tokenizer
self.num_total_seqs = len(data_info["ID"])
self.usr_token = args["usr_token"]
self.sys_token = args["sys_token"]
self.max_length = max_length
self.args = args
self.unified_meta = unified_meta
self.slots = list(unified_meta["slots"].keys())
self.mask_token_idx = tokenizer.convert_tokens_to_ids("[MASK]")
self.sep_token_idx = tokenizer.convert_tokens_to_ids("[SEP]")
self.start_token = self.tokenizer.cls_token if "bert" in self.args["model_type"] else self.tokenizer.bos_token
self.sep_token = self.tokenizer.sep_token if "bert" in self.args["model_type"] else self.tokenizer.eos_token
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
if self.args["example_type"] == "turn":
dialog_history_str = self.get_concat_context(self.data["dialog_history"][index])
gate_label = self.data["slot_gate"][index]
context_plain = self.concat_dh_sys_usr(dialog_history_str,
self.data["turn_sys"][index],
self.data["turn_usr"][index])
slot_values_plain = self.data["slot_values"][index]
slot_values = self.preprocess_slot(slot_values_plain)
triggered_domains = set([domain_slot.split("-")[0] for domain_slot in self.data["belief"][index].keys()])
triggered_domains.add(self.data["turn_domain"][index])
assert len(triggered_domains) != 0
triggered_ds_mask = [1 if s.split("-")[0] in triggered_domains else 0 for s in self.slots]
triggered_ds_idx = []
triggered_ds_pos = []
context = self.preprocess(context_plain)
ontology_idx = []
for si, sv in enumerate(slot_values_plain):
try:
ontology_idx.append(self.unified_meta["slots"][self.slots[si]][sv])
except Exception as e:
print("Not In Ontology")
print(e)
print(self.slots[si], sv)
ontology_idx.append(-1)
elif self.args["example_type"] == "dial":
raise NotImplemented()
item_info = {
"ID":self.data["ID"][index],
"turn_id":self.data["turn_id"][index],
"del_belief":self.data["del_belief"][index],
"slot_gate":gate_label,
"context":context,
"context_plain":context_plain,
"slot_values":slot_values,
"belief":self.data["belief"][index],
"slots":self.data["slots"][index],
"belief_ontology":ontology_idx,
"triggered_ds_mask":triggered_ds_mask,
"triggered_ds_idx":triggered_ds_idx,
"triggered_ds_pos":triggered_ds_pos}
return item_info
def __len__(self):
return self.num_total_seqs
def concat_dh_sys_usr(self, dialog_history, sys, usr):
return dialog_history + " {} ".format(self.sep_token) + " {} ".format(self.sys_token) + sys + " {} ".format(self.usr_token) + usr
def preprocess(self, sequence):
"""Converts words to ids."""
#story = torch.Tensor(self.tokenizer.encode(sequence))
tokens = self.tokenizer.tokenize(self.start_token) + self.tokenizer.tokenize(sequence)[-self.max_length+1:]
story = torch.Tensor(self.tokenizer.convert_tokens_to_ids(tokens))
return story
def preprocess_slot(self, sequence):
"""Converts words to ids."""
story = []
for value in sequence:
v = list(self.tokenizer.encode(value + " {}".format(self.sep_token)))
story.append(v)
return story
def get_concat_context(self, dialog_history):
dialog_history_str = ""
for ui, uttr in enumerate(dialog_history):
if ui%2 == 0:
dialog_history_str += "{} {} ".format(self.sys_token, uttr)
else:
dialog_history_str += "{} {} ".format(self.usr_token, uttr)
dialog_history_str = dialog_history_str.strip()
return dialog_history_str
def collate_fn_dst_turn(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge(item_info['context'])
y_seqs, y_lengths = merge_multi_response(item_info["slot_values"])
gates = torch.tensor(item_info["slot_gate"])
belief_ontology = torch.tensor(item_info["belief_ontology"])
triggered_ds_mask = torch.tensor(item_info["triggered_ds_mask"])
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
item_info["slot_gate"] = to_cuda(gates)
item_info["slot_values"] = to_cuda(y_seqs)
item_info["slot_values_len"] = y_lengths
item_info["belief_ontology"] = to_cuda(belief_ontology)
item_info["triggered_ds_mask"] = to_cuda(triggered_ds_mask)
return item_info
def collate_fn_dst_dial(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge_sent_and_word(item_info['context'])
y = [merge_multi_response(sv) for sv in item_info["slot_values"]]
y_seqs = [_y[0] for _y in y]
y_lengths = [_y[1] for _y in y]
gates, gate_lengths = merge_sent_and_word(item_info['slot_gate'], ignore_idx=-1)
belief_ontology = torch.tensor(item_info["belief_ontology"])
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
item_info["slot_gate"] = to_cuda(gates)
item_info["slot_values"] = [to_cuda(y) for y in y_seqs] # TODO
item_info["slot_values_len"] = y_lengths # TODO
return item_info
| 6,765 | 41.2875 | 137 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_general.py
|
import torch
import torch.utils.data as data
import random
import math
from .dataloader_dst import *
from .dataloader_nlg import *
from .dataloader_nlu import *
from .dataloader_dm import *
from .dataloader_usdl import *
def get_loader(args, mode, tokenizer, datasets, unified_meta, shuffle=False):
task = args["task"]
batch_size = args["batch_size"] if mode == "train" else args["eval_batch_size"]
combined_ds = []
for ds in datasets:
combined_ds += datasets[ds][mode]
# do not consider empty system responses
if (args["task_name"] == "rs") or (args["task"] == "dm"):
print("[Info] Remove turns with empty system response...")
combined_ds = [d for d in combined_ds if d["turn_sys"]!=""]
## Ignore the first system utterance for response selection task
if (args["task_name"] == "rs"):
print("[Info] Remove turn=0 system response...")
combined_ds = [d for d in combined_ds if d["turn_id"]!=0]
# control data ratio
if (args["train_data_ratio"] != 1 or args["nb_shots"] != -1) and (mode == "train"):
original_len = len(combined_ds)
if ("oos_intent" in args["dataset"]):
nb_train_sample_per_class = int(100 * args["train_data_ratio"])
class_count = {k: 0 for k in unified_meta["intent"]}
random.Random(args["rand_seed"]).shuffle(combined_ds)
pair_trn_new = []
for d in combined_ds:
if class_count[d["intent"]] < nb_train_sample_per_class:
pair_trn_new.append(d)
class_count[d["intent"]] += 1
combined_ds = pair_trn_new
else:
if args["train_data_ratio"] != 1:
random.Random(args["rand_seed"]).shuffle(combined_ds)
combined_ds = combined_ds[:int(len(combined_ds)*args["train_data_ratio"])]
else:
random.Random(args["rand_seed"]).shuffle(combined_ds)
combined_ds = combined_ds[:args["nb_shots"]]
print("[INFO] Use Training Data: from {} to {}".format(original_len, len(combined_ds)))
data_info = {k: [] for k in combined_ds[0].keys()}
for d in combined_ds:
for k in combined_ds[0].keys():
data_info[k].append(d[k])
dataset = globals()["Dataset_"+task](data_info, tokenizer, args, unified_meta, mode, args["max_seq_length"])
bool_shuffle = (mode=="train" or shuffle)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=bool_shuffle,
collate_fn=globals()["collate_fn_{}_{}".format(task, args["example_type"])])
return data_loader
def get_unified_meta(datasets):
unified_meta = {"others":None}
for ds in datasets:
for key, value in datasets[ds]["meta"].items():
if key not in unified_meta.keys():
unified_meta[key] = {}
if type(value) == list:
for v in value:
if v not in unified_meta[key].keys():
unified_meta[key][v] = len(unified_meta[key])
else:
unified_meta[key] = value
return unified_meta
| 3,348 | 39.349398 | 122 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/utils_oos_intent.py
|
import json
import ast
import os
import random
from .utils_function import get_input_example
def read_langs(args, dtype, _data, _oos_data):
print(("Reading [OOS Intent] for read_langs {}".format(dtype)))
data = []
intent_counter = {}
for cur_data in [_data, _oos_data]:
for d in cur_data:
sentence, label = d[0], d[1]
data_detail = get_input_example("turn")
data_detail["ID"] = "OOS-INTENT-{}-{}".format(dtype, len(data))
data_detail["turn_usr"] = sentence
data_detail["intent"] = label
data.append(data_detail)
# count number of each label
if label not in intent_counter.keys():
intent_counter[label] = 0
intent_counter[label] += 1
#print("len of OOS Intent counter: ", len(intent_counter))
return data, intent_counter
def prepare_data_oos_intent(args):
example_type = args["example_type"]
max_line = args["max_line"]
file_input = os.path.join(args["data_path"], 'oos-intent/data/data_full.json')
data = json.load(open(file_input, "r"))
pair_trn, intent_counter_trn = read_langs(args, "trn", data["train"], data["oos_train"])
pair_dev, intent_counter_dev = read_langs(args, "dev", data["val"], data["oos_val"])
pair_tst, intent_counter_tst = read_langs(args, "tst", data["test"], data["oos_test"])
print("Read %s pairs train from OOS Intent" % len(pair_trn))
print("Read %s pairs valid from OOS Intent" % len(pair_dev))
print("Read %s pairs test from OOS Intent" % len(pair_tst))
intent_class = list(intent_counter_trn.keys())
meta_data = {"intent":intent_class, "num_labels":len(intent_class)}
print("len(intent_class)", len(intent_class))
return pair_trn, pair_dev, pair_tst, meta_data
| 1,844 | 31.946429 | 92 |
py
|
Multi2WOZ
|
Multi2WOZ-main/downstream/utils/dataloader_usdl.py
|
import torch
import torch.utils.data as data
from .utils_function import to_cuda, merge, merge_multi_response, merge_sent_and_word
class Dataset_usdl(torch.utils.data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data_info, tokenizer, args, unified_meta, mode, max_length=512):
"""Reads source and target sequences from txt files."""
self.data = data_info
self.tokenizer = tokenizer
self.num_total_seqs = len(data_info["ID"])
self.usr_token = args["usr_token"]
self.sys_token = args["sys_token"]
self.usr_token_id = self.tokenizer.convert_tokens_to_ids(args["usr_token"])
self.sys_token_id = self.tokenizer.convert_tokens_to_ids(args["sys_token"])
self.max_length = max_length
self.args = args
self.unified_meta = unified_meta
self.start_token = self.tokenizer.cls_token if "bert" in self.args["model_type"] else self.tokenizer.bos_token
self.sep_token = self.tokenizer.sep_token if "bert" in self.args["model_type"] else self.tokenizer.eos_token
self.mode = mode
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
item_info = {}
if self.args["example_type"] == "turn":
dialog_history_str = self.get_concat_context(self.data["dialog_history"][index])
context_plain = self.concat_dh_sys_usr(dialog_history_str,
self.data["turn_sys"][index],
self.data["turn_usr"][index])
context = self.preprocess(context_plain)
elif self.args["example_type"] == "dial":
context_plain = self.data["dialog_history"][index]
context = self.preprocess_slot(context_plain)
item_info["ID"] = self.data["ID"][index]
item_info["turn_id"] = self.data["turn_id"][index]
item_info["context"] = context
item_info["context_plain"] = context_plain
return item_info
def __len__(self):
return self.num_total_seqs
def concat_dh_sys_usr(self, dialog_history, sys, usr):
return dialog_history + " {} ".format(self.sys_token) + sys + " {} ".format(self.usr_token) + usr
def preprocess(self, sequence):
"""Converts words to ids."""
tokens = self.tokenizer.tokenize(self.start_token) + self.tokenizer.tokenize(sequence)[-self.max_length+1:]
story = torch.Tensor(self.tokenizer.convert_tokens_to_ids(tokens))
return story
def preprocess_slot(self, sequence):
"""Converts words to ids."""
story = []
for value in sequence:
#v = list(self.tokenizer.encode(value))# + self.tokenizer.encode("[SEP]"))
v = list(self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(value)))
story.append(v)
return story
def get_concat_context(self, dialog_history):
candidate_sys_responses = []
dialog_history_str = ""
for ui, uttr in enumerate(dialog_history):
if ui%2 == 0:
dialog_history_str += "{} {} ".format(self.sys_token, uttr)
else:
dialog_history_str += "{} {} ".format(self.usr_token, uttr)
dialog_history_str = dialog_history_str.strip()
return dialog_history_str
def collate_fn_usdl_turn(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge(item_info['context'])
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
return item_info
def collate_fn_usdl_dial(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_seqs, src_lengths = merge_sent_and_word(item_info['context'])
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
return item_info
def collate_fn_usdl_dial_flat(data):
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x['context_flat']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# merge sequences
src_flat_seqs, src_flat_lengths = merge(item_info['context_flat'])
src_seqs, src_lengths = merge_sent_and_word(item_info['context'])
src_pos_seqs, src_pos_lengths = merge(item_info["sys_usr_id_positions"])
item_info["context"] = to_cuda(src_seqs)
item_info["context_len"] = src_lengths
item_info["context_flat"] = to_cuda(src_flat_seqs)
item_info["context_flat_len"] = src_flat_lengths
item_info["sys_usr_id_positions"] = to_cuda(src_pos_seqs)
return item_info
| 5,245 | 38.742424 | 118 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.