repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
nixpkgs
nixpkgs-master/pkgs/development/interpreters/python/tests/test_nix_pythonprefix/typeddep/setup.py
from setuptools import setup setup(**{ 'name': 'typeddep', 'version': '1.3.3.7', 'description': 'Minimal repro to test mypy and site prefixes with Nix', 'long_description': None, 'author': 'adisbladis', 'author_email': '[email protected]', 'maintainer': None, 'maintainer_email': None, 'url': None, 'packages': ['typeddep'], 'package_data': {'': ['*']}, 'install_requires': [], 'entry_points': {}, 'python_requires': '>=3.7,<4.0', })
494
25.052632
75
py
nixpkgs
nixpkgs-master/pkgs/development/interpreters/python/tests/test_nix_pythonprefix/typeddep/typeddep/util.py
def echo(s: str) -> str: return s
38
12
24
py
nixpkgs
nixpkgs-master/pkgs/development/interpreters/python/tests/test_nix_pythonprefix/typeddep/typeddep/__init__.py
0
0
0
py
nixpkgs
nixpkgs-master/pkgs/development/interpreters/python/tests/test_environments/test_python.py
""" Python interpreter and environment tests. These need to be executed with the standard library unittest. Third party test runners such as pytest cannot be used because that would interfere with the tests. """ import platform import sys import unittest import site ENV = "@env@" INTERPRETER = "@interpreter@" PYTHON_VERSION = "@pythonVersion@" IS_VIRTUALENV = @is_virtualenv@ IS_VENV = @is_venv@ IS_NIXENV = @is_nixenv@ IS_PYPY = platform.python_implementation() == "PyPy" class TestCasePython(unittest.TestCase): @unittest.skipIf(IS_PYPY, "Executable is incorrect and needs to be fixed.") def test_interpreter(self): self.assertEqual(sys.executable, INTERPRETER) @unittest.skipIf(IS_PYPY, "Prefix is incorrect and needs to be fixed.") def test_prefix(self): self.assertEqual(sys.prefix, ENV) self.assertEqual(sys.prefix, sys.exec_prefix) def test_site_prefix(self): self.assertTrue(sys.prefix in site.PREFIXES) @unittest.skipIf(IS_PYPY or sys.version_info.major==2, "Python 2 does not have base_prefix") def test_base_prefix(self): if IS_VENV or IS_NIXENV or IS_VIRTUALENV: self.assertNotEqual(sys.prefix, sys.base_prefix) else: self.assertEqual(sys.prefix, sys.base_prefix) @unittest.skipIf(sys.version_info.major==3, "sys.real_prefix is only set by virtualenv in case of Python 2.") def test_real_prefix(self): self.assertTrue(hasattr(sys, "real_prefix") == IS_VIRTUALENV) def test_python_version(self): self.assertTrue(platform.python_version().startswith(PYTHON_VERSION)) if __name__ == "__main__": unittest.main()
1,669
28.821429
113
py
nixpkgs
nixpkgs-master/pkgs/development/libraries/ndi/update.py
#!/usr/bin/env nix-shell #! nix-shell -i python -p "python3.withPackages (ps: with ps; [ ps.absl-py ps.requests ])" import hashlib import io import json import os.path import tarfile import requests from absl import app, flags BASE_NAME = "Install_NDI_SDK_v5_Linux" NDI_SDK_URL = f"https://downloads.ndi.tv/SDK/NDI_SDK_Linux/{BASE_NAME}.tar.gz" NDI_EXEC = f"{BASE_NAME}.sh" NDI_ARCHIVE_MAGIC = b"__NDI_ARCHIVE_BEGIN__\n" FLAG_out = flags.DEFINE_string("out", None, "Path to read/write version.json from/to.") def find_version_json() -> str: if FLAG_out.value: return FLAG_out.value try_paths = ["pkgs/development/libraries/ndi/version.json", "version.json"] for path in try_paths: if os.path.exists(path): return path raise Exception( "Couldn't figure out where to write version.json; try specifying --out" ) def fetch_tarball() -> bytes: r = requests.get(NDI_SDK_URL) r.raise_for_status() return r.content def read_version(tarball: bytes) -> str: # Find the inner script. outer_tarfile = tarfile.open(fileobj=io.BytesIO(tarball), mode="r:gz") eula_script = outer_tarfile.extractfile(NDI_EXEC).read() # Now find the archive embedded within the script. archive_start = eula_script.find(NDI_ARCHIVE_MAGIC) + len(NDI_ARCHIVE_MAGIC) inner_tarfile = tarfile.open( fileobj=io.BytesIO(eula_script[archive_start:]), mode="r:gz" ) # Now find Version.txt... version_txt = ( inner_tarfile.extractfile("NDI SDK for Linux/Version.txt") .read() .decode("utf-8") ) _, _, version = version_txt.strip().partition(" v") return version def main(argv): tarball = fetch_tarball() sha256 = hashlib.sha256(tarball).hexdigest() version = { "hash": f"sha256:{sha256}", "version": read_version(tarball), } out_path = find_version_json() with open(out_path, "w") as f: json.dump(version, f) f.write("\n") if __name__ == "__main__": app.run(main)
2,046
25.24359
90
py
nixpkgs
nixpkgs-master/pkgs/development/libraries/libxcrypt/check_passthru_matches.py
import tarfile import sys def process_columns(line: list[str]) -> tuple[str, list[str]]: match line: case [name, h_prefix, nrbytes, flags]: return (h_prefix, flags.lower().split(",")) case other: raise Exception("Unsupported hashes.conf line format", other) def find_tar_file(tar: tarfile.TarFile, requested_name: str): """Attempts to find a single file with given name in tarball.""" all_names = tar.getnames() if requested_name in all_names: return requested_name requested_suffix = f"/{requested_name}" candidate_names = [name for name in all_names if name.endswith(requested_suffix)] match candidate_names: case [real_name]: return real_name case other: raise KeyError( f"Could not locate a single {requested_name} in the contents of the tarball." ) hashes_path = "lib/hashes.conf" def main() -> None: match sys.argv: case [_name, src, enable_hashes, "--", *enabled_crypt_scheme_ids]: pass case other: raise Exception( "Incorrect number of arguments. Usage: check_passthru_matches.py <src> <enable_hashes> -- <enabled_crypt_scheme_ids...>" ) with tarfile.open(src, "r") as tar: real_hashes_path = find_tar_file(tar, hashes_path) config = tar.extractfile(real_hashes_path).read().decode("utf-8") formats = [ process_columns(columns) for line in config.splitlines() if not line.startswith("#") and len(columns := line.split()) > 0 ] expected_supported_formats = set( prefix for (prefix, flags) in formats if enable_hashes in flags or enable_hashes == "all" ) passthru_supported_schemes = set( f"${scheme}$" for scheme in enabled_crypt_scheme_ids ) assert ( len(expected_supported_formats - passthru_supported_schemes) == 0 ), f"libxcrypt package enables the following crypt schemes that are not listed in passthru.enabledCryptSchemeIds: {expected_supported_formats - passthru_supported_schemes}" assert ( len(passthru_supported_schemes - expected_supported_formats) == 0 ), f"libxcrypt package lists the following crypt schemes in passthru.enabledCryptSchemeIds that are not supported: {passthru_supported_schemes - expected_supported_formats}" if __name__ == "__main__": main()
2,448
33.492958
177
py
nixpkgs
nixpkgs-master/pkgs/development/node-packages/remove-attr.py
#!/usr/bin/env nix-shell #!nix-shell -i python3 -p import collections.abc import fileinput import json import os.path import re import sys def remove(attr): with open(os.path.join(os.path.dirname(__file__), 'node-packages.json'), 'r+') as node_packages_json: packages = json.load(node_packages_json) idx = 0 while idx < len(packages): if packages[idx] == attr or (isinstance(packages[idx], collections.abc.Mapping) and next(iter(packages[idx].keys())) == attr): del packages[idx] else: idx += 1 node_packages_json.seek(0) for idx, package in enumerate(packages): if idx == 0: node_packages_json.write('[\n ') else: node_packages_json.write(', ') json.dump(package, node_packages_json) node_packages_json.write('\n') node_packages_json.write(']\n') node_packages_json.truncate() with fileinput.input(os.path.join(os.path.dirname(__file__), 'node-packages.nix'), inplace=1) as node_packages: safe_attr = re.escape(attr) in_attr = False for line in node_packages: if in_attr: if re.fullmatch(r' \};\n', line): in_attr = False else: if re.fullmatch(rf' (?:{safe_attr}|"{safe_attr}") = nodeEnv\.buildNodePackage \{{\n', line): in_attr = True else: sys.stdout.write(line) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Remove a given package from the node-packages.nix file') parser.add_argument('attr', help='The package attribute to remove') args = parser.parse_args() remove(args.attr)
1,813
31.981818
138
py
nixpkgs
nixpkgs-master/pkgs/development/compilers/llvm/update-git.py
#! /usr/bin/env nix-shell #! nix-shell -i python3 -p python3 nix import csv import fileinput import json import os import re import subprocess import sys from codecs import iterdecode from datetime import datetime from urllib.request import urlopen, Request DEFAULT_NIX = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'git/default.nix') def get_latest_chromium_build(): RELEASES_URL = 'https://versionhistory.googleapis.com/v1/chrome/platforms/linux/channels/dev/versions/all/releases?filter=endtime=none&order_by=version%20desc' print(f'GET {RELEASES_URL}') with urlopen(RELEASES_URL) as resp: return json.load(resp)['releases'][0] def get_file_revision(revision, file_path): """Fetches the requested Git revision of the given Chromium file.""" url = f'https://raw.githubusercontent.com/chromium/chromium/{revision}/{file_path}' with urlopen(url) as http_response: return http_response.read().decode() def get_commit(ref): url = f'https://api.github.com/repos/llvm/llvm-project/commits/{ref}' headers = {'Accept': 'application/vnd.github.v3+json'} request = Request(url, headers=headers) with urlopen(request) as http_response: return json.loads(http_response.read().decode()) def get_current_revision(): """Get the current revision of llvmPackages_git.""" with open(DEFAULT_NIX) as f: for line in f: rev = re.search(r'^ rev = "(.*)";', line) if rev: return rev.group(1) sys.exit(1) def nix_prefetch_url(url, algo='sha256'): """Prefetches the content of the given URL.""" print(f'nix-prefetch-url {url}') out = subprocess.check_output(['nix-prefetch-url', '--type', algo, '--unpack', url]) return out.decode('utf-8').rstrip() chromium_build = get_latest_chromium_build() chromium_version = chromium_build['version'] print(f'chromiumDev version: {chromium_version}') print('Getting LLVM commit...') clang_update_script = get_file_revision(chromium_version, 'tools/clang/scripts/update.py') clang_revision = re.search(r"^CLANG_REVISION = '(.+)'$", clang_update_script, re.MULTILINE).group(1) clang_commit_short = re.search(r"llvmorg-[0-9]+-init-[0-9]+-g([0-9a-f]{8})", clang_revision).group(1) release_version = re.search(r"^RELEASE_VERSION = '(.+)'$", clang_update_script, re.MULTILINE).group(1) commit = get_commit(clang_commit_short) if get_current_revision() == commit["sha"]: print('No new update available.') sys.exit(0) date = datetime.fromisoformat(commit['commit']['committer']['date'].rstrip('Z')).date().isoformat() version = f'unstable-{date}' print('Prefetching source tarball...') hash = nix_prefetch_url(f'https://github.com/llvm/llvm-project/archive/{commit["sha"]}.tar.gz') print('Updating default.nix...') with fileinput.FileInput(DEFAULT_NIX, inplace=True) as f: for line in f: if match := re.search(r'^ rev-version = "unstable-(.+)";', line): old_date = match.group(1) result = re.sub(r'^ release_version = ".+";', f' release_version = "{release_version}";', line) result = re.sub(r'^ rev = ".*";', f' rev = "{commit["sha"]}";', result) result = re.sub(r'^ rev-version = ".+";', f' rev-version = "{version}";', result) result = re.sub(r'^ sha256 = ".+";', f' sha256 = "{hash}";', result) print(result, end='') # Commit the result: commit_message = f"llvmPackages_git: {old_date} -> {date}" subprocess.run(['git', 'add', DEFAULT_NIX], check=True) subprocess.run(['git', 'commit', '--file=-'], input=commit_message.encode(), check=True)
3,611
39.58427
163
py
nixpkgs
nixpkgs-master/pkgs/development/compilers/semeru-bin/generate-sources.py
#!/usr/bin/env nix-shell #!nix-shell --pure -i python3 -p "python3.withPackages (ps: with ps; [ requests ])" import json import re import requests import sys feature_versions = (8, 11, 16, 17) oses = ("mac", "linux") types = ("jre", "jdk") impls = ("openj9") arch_to_nixos = { "x64": ("x86_64",), "aarch64": ("aarch64",), "arm": ("armv6l", "armv7l"), } def get_sha256(url): resp = requests.get(url) if resp.status_code != 200: print("error: could not fetch checksum from url {}: code {}".format(url, resp.status_code), file=sys.stderr) sys.exit(1) return resp.text.strip().split(" ")[0] def generate_sources(releases, feature_version, out): latest_version = None for release in releases: if release["prerelease"]: continue if not re.search("_openj9-", release["name"]): continue for asset in release["assets"]: match = re.match("ibm-semeru-open-(?P<image_type>[a-z]*)_(?P<architecture>[a-z0-9]*)_(?P<os>[a-z]*)_(?:(?P<major1>[0-9]*)u(?P<security1>[0-9]*)b(?P<build1>[0-9]*)|(?P<major2>[0-9]*)\\.(?P<minor2>[0-9]*)\\.(?P<security2>[0-9]*)_(?P<build2>[0-9]*))_(?P<jvm_impl>[a-z0-9]*)-[0-9]*\\.[0-9]*\\.[0-9]\\.tar\\.gz$", asset["name"]) if not match: continue if match["os"] not in oses: continue if match["image_type"] not in types: continue if match["jvm_impl"] not in impls: continue if match["architecture"] not in arch_to_nixos: continue version = ".".join([ match["major1"] or match["major2"], match["minor2"] or "0", match["security1"] or match["security2"] ]) build = match["build1"] or match["build2"] if latest_version and latest_version != (version, build): continue latest_version = (version, build) arch_map = ( out .setdefault(match["jvm_impl"], {}) .setdefault(match["os"], {}) .setdefault(match["image_type"], {}) .setdefault(feature_version, { "packageType": match["image_type"], "vmType": match["jvm_impl"], }) ) for nixos_arch in arch_to_nixos[match["architecture"]]: arch_map[nixos_arch] = { "url": asset["browser_download_url"], "sha256": get_sha256(asset["browser_download_url"] + ".sha256.txt"), "version": version, "build": build, } return out out = {} for feature_version in feature_versions: resp = requests.get(f"https://api.github.com/repos/ibmruntimes/semeru{feature_version}-binaries/releases") if resp.status_code != 200: print("error: could not fetch data for release {} (code {}) {}".format(feature_version, resp.status_code, resp.content), file=sys.stderr) sys.exit(1) generate_sources(resp.json(), f"openjdk{feature_version}", out) with open("sources.json", "w") as f: json.dump(out, f, indent=2, sort_keys=True) f.write('\n')
3,152
35.662791
335
py
nixpkgs
nixpkgs-master/pkgs/development/compilers/adoptopenjdk-bin/generate-sources.py
#!/usr/bin/env nix-shell #!nix-shell --pure -i python3 -p "python3.withPackages (ps: with ps; [ requests ])" import json import re import requests import sys # openjdk15 is only for bootstrapping openjdk releases = ("openjdk8", "openjdk11", "openjdk13", "openjdk14", "openjdk15", "openjdk16", "openjdk17") oses = ("mac", "linux", "alpine_linux") types = ("jre", "jdk") impls = ("hotspot", "openj9") arch_to_nixos = { "x64": ("x86_64",), "aarch64": ("aarch64",), "arm": ("armv6l", "armv7l"), "ppc64le": ("powerpc64le",), } def get_sha256(url): resp = requests.get(url) if resp.status_code != 200: print("error: could not fetch checksum from url {}: code {}".format(url, resp.status_code), file=sys.stderr) sys.exit(1) return resp.text.strip().split(" ")[0] def generate_sources(release, assets): out = {} for asset in assets: if asset["os"] not in oses: continue if asset["binary_type"] not in types: continue if asset["openjdk_impl"] not in impls: continue if asset["heap_size"] != "normal": continue if asset["architecture"] not in arch_to_nixos: continue # examples: 11.0.1+13, 8.0.222+10 version, build = asset["version_data"]["semver"].split("+") type_map = out.setdefault(asset["os"], {}) impl_map = type_map.setdefault(asset["binary_type"], {}) arch_map = impl_map.setdefault(asset["openjdk_impl"], { "packageType": asset["binary_type"], "vmType": asset["openjdk_impl"], }) for nixos_arch in arch_to_nixos[asset["architecture"]]: arch_map[nixos_arch] = { "url": asset["binary_link"], "sha256": get_sha256(asset["checksum_link"]), "version": version, "build": build, } return out out = {} for release in releases: resp = requests.get("https://api.adoptopenjdk.net/v2/latestAssets/releases/" + release) if resp.status_code != 200: print("error: could not fetch data for release {} (code {})".format(release, resp.code), file=sys.stderr) sys.exit(1) out[release] = generate_sources(release, resp.json()) with open("sources.json", "w") as f: json.dump(out, f, indent=2, sort_keys=True) f.write('\n')
2,313
32.536232
116
py
nixpkgs
nixpkgs-master/pkgs/development/compilers/temurin-bin/generate-sources.py
#!/usr/bin/env nix-shell #!nix-shell --pure -i python3 -p "python3.withPackages (ps: with ps; [ requests ])" import json import re import requests import sys feature_versions = (8, 11, 16, 17, 18, 19, 20) oses = ("mac", "linux", "alpine-linux") types = ("jre", "jdk") impls = ("hotspot") arch_to_nixos = { "x64": ("x86_64",), "aarch64": ("aarch64",), "arm": ("armv6l", "armv7l"), "ppc64le": ("powerpc64le",), } def generate_sources(assets, feature_version, out): for asset in assets: binary = asset["binary"] if binary["os"] not in oses: continue if binary["image_type"] not in types: continue if binary["jvm_impl"] not in impls: continue if binary["heap_size"] != "normal": continue if binary["architecture"] not in arch_to_nixos: continue version = ".".join(str(v) for v in [ asset["version"]["major"], asset["version"]["minor"], asset["version"]["security"] ]) build = str(asset["version"]["build"]) arch_map = ( out .setdefault(binary["jvm_impl"], {}) .setdefault(binary["os"], {}) .setdefault(binary["image_type"], {}) .setdefault(feature_version, { "packageType": binary["image_type"], "vmType": binary["jvm_impl"], }) ) for nixos_arch in arch_to_nixos[binary["architecture"]]: arch_map[nixos_arch] = { "url": binary["package"]["link"], "sha256": binary["package"]["checksum"], "version": version, "build": build, } return out out = {} for feature_version in feature_versions: # Default user-agenet is blocked by Azure WAF. headers = {'user-agent': 'nixpkgs-temurin-generate-sources/1.0.0'} resp = requests.get(f"https://api.adoptium.net/v3/assets/latest/{feature_version}/hotspot", headers=headers) if resp.status_code != 200: print("error: could not fetch data for release {} (code {}) {}".format(feature_version, resp.status_code, resp.content), file=sys.stderr) sys.exit(1) generate_sources(resp.json(), f"openjdk{feature_version}", out) with open("sources.json", "w") as f: json.dump(out, f, indent=2, sort_keys=True) f.write('\n')
2,347
31.164384
145
py
nixpkgs
nixpkgs-master/maintainers/scripts/pluginupdate.py
# python library used to update plugins: # - pkgs/applications/editors/vim/plugins/update.py # - pkgs/applications/editors/kakoune/plugins/update.py # - maintainers/scripts/update-luarocks-packages # format: # $ nix run nixpkgs.python3Packages.black -c black update.py # type-check: # $ nix run nixpkgs.python3Packages.mypy -c mypy update.py # linted: # $ nix run nixpkgs.python3Packages.flake8 -c flake8 --ignore E501,E265 update.py import argparse import csv import functools import http import json import os import subprocess import logging import sys import time import traceback import urllib.error import urllib.parse import urllib.request import xml.etree.ElementTree as ET from datetime import datetime from functools import wraps from multiprocessing.dummy import Pool from pathlib import Path from typing import Dict, List, Optional, Tuple, Union, Any, Callable from urllib.parse import urljoin, urlparse from tempfile import NamedTemporaryFile from dataclasses import dataclass, asdict import git ATOM_ENTRY = "{http://www.w3.org/2005/Atom}entry" # " vim gets confused here ATOM_LINK = "{http://www.w3.org/2005/Atom}link" # " ATOM_UPDATED = "{http://www.w3.org/2005/Atom}updated" # " LOG_LEVELS = { logging.getLevelName(level): level for level in [ logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR ] } log = logging.getLogger() def retry(ExceptionToCheck: Any, tries: int = 4, delay: float = 3, backoff: float = 2): """Retry calling the decorated function using an exponential backoff. http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/ original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry (BSD licensed) :param ExceptionToCheck: the exception on which to retry :param tries: number of times to try (not retry) before giving up :param delay: initial delay between retries in seconds :param backoff: backoff multiplier e.g. value of 2 will double the delay each retry """ def deco_retry(f: Callable) -> Callable: @wraps(f) def f_retry(*args: Any, **kwargs: Any) -> Any: mtries, mdelay = tries, delay while mtries > 1: try: return f(*args, **kwargs) except ExceptionToCheck as e: print(f"{str(e)}, Retrying in {mdelay} seconds...") time.sleep(mdelay) mtries -= 1 mdelay *= backoff return f(*args, **kwargs) return f_retry # true decorator return deco_retry @dataclass class FetchConfig: proc: int github_token: str def make_request(url: str, token=None) -> urllib.request.Request: headers = {} if token is not None: headers["Authorization"] = f"token {token}" return urllib.request.Request(url, headers=headers) # a dictionary of plugins and their new repositories Redirects = Dict['PluginDesc', 'Repo'] class Repo: def __init__( self, uri: str, branch: str ) -> None: self.uri = uri '''Url to the repo''' self._branch = branch # Redirect is the new Repo to use self.redirect: Optional['Repo'] = None self.token = "dummy_token" @property def name(self): return self.uri.split('/')[-1] @property def branch(self): return self._branch or "HEAD" def __str__(self) -> str: return f"{self.uri}" def __repr__(self) -> str: return f"Repo({self.name}, {self.uri})" @retry(urllib.error.URLError, tries=4, delay=3, backoff=2) def has_submodules(self) -> bool: return True @retry(urllib.error.URLError, tries=4, delay=3, backoff=2) def latest_commit(self) -> Tuple[str, datetime]: log.debug("Latest commit") loaded = self._prefetch(None) updated = datetime.strptime(loaded['date'], "%Y-%m-%dT%H:%M:%S%z") return loaded['rev'], updated def _prefetch(self, ref: Optional[str]): cmd = ["nix-prefetch-git", "--quiet", "--fetch-submodules", self.uri] if ref is not None: cmd.append(ref) log.debug(cmd) data = subprocess.check_output(cmd) loaded = json.loads(data) return loaded def prefetch(self, ref: Optional[str]) -> str: print("Prefetching") loaded = self._prefetch(ref) return loaded["sha256"] def as_nix(self, plugin: "Plugin") -> str: return f'''fetchgit {{ url = "{self.uri}"; rev = "{plugin.commit}"; sha256 = "{plugin.sha256}"; }}''' class RepoGitHub(Repo): def __init__( self, owner: str, repo: str, branch: str ) -> None: self.owner = owner self.repo = repo self.token = None '''Url to the repo''' super().__init__(self.url(""), branch) log.debug("Instantiating github repo owner=%s and repo=%s", self.owner, self.repo) @property def name(self): return self.repo def url(self, path: str) -> str: res = urljoin(f"https://github.com/{self.owner}/{self.repo}/", path) return res @retry(urllib.error.URLError, tries=4, delay=3, backoff=2) def has_submodules(self) -> bool: try: req = make_request(self.url(f"blob/{self.branch}/.gitmodules"), self.token) urllib.request.urlopen(req, timeout=10).close() except urllib.error.HTTPError as e: if e.code == 404: return False else: raise return True @retry(urllib.error.URLError, tries=4, delay=3, backoff=2) def latest_commit(self) -> Tuple[str, datetime]: commit_url = self.url(f"commits/{self.branch}.atom") log.debug("Sending request to %s", commit_url) commit_req = make_request(commit_url, self.token) with urllib.request.urlopen(commit_req, timeout=10) as req: self._check_for_redirect(commit_url, req) xml = req.read() root = ET.fromstring(xml) latest_entry = root.find(ATOM_ENTRY) assert latest_entry is not None, f"No commits found in repository {self}" commit_link = latest_entry.find(ATOM_LINK) assert commit_link is not None, f"No link tag found feed entry {xml}" url = urlparse(commit_link.get("href")) updated_tag = latest_entry.find(ATOM_UPDATED) assert ( updated_tag is not None and updated_tag.text is not None ), f"No updated tag found feed entry {xml}" updated = datetime.strptime(updated_tag.text, "%Y-%m-%dT%H:%M:%SZ") return Path(str(url.path)).name, updated def _check_for_redirect(self, url: str, req: http.client.HTTPResponse): response_url = req.geturl() if url != response_url: new_owner, new_name = ( urllib.parse.urlsplit(response_url).path.strip("/").split("/")[:2] ) new_repo = RepoGitHub(owner=new_owner, repo=new_name, branch=self.branch) self.redirect = new_repo def prefetch(self, commit: str) -> str: if self.has_submodules(): sha256 = super().prefetch(commit) else: sha256 = self.prefetch_github(commit) return sha256 def prefetch_github(self, ref: str) -> str: cmd = ["nix-prefetch-url", "--unpack", self.url(f"archive/{ref}.tar.gz")] log.debug("Running %s", cmd) data = subprocess.check_output(cmd) return data.strip().decode("utf-8") def as_nix(self, plugin: "Plugin") -> str: if plugin.has_submodules: submodule_attr = "\n fetchSubmodules = true;" else: submodule_attr = "" return f'''fetchFromGitHub {{ owner = "{self.owner}"; repo = "{self.repo}"; rev = "{plugin.commit}"; sha256 = "{plugin.sha256}";{submodule_attr} }}''' @dataclass(frozen=True) class PluginDesc: repo: Repo branch: str alias: Optional[str] @property def name(self): if self.alias is None: return self.repo.name else: return self.alias def __lt__(self, other): return self.repo.name < other.repo.name @staticmethod def load_from_csv(config: FetchConfig, row: Dict[str, str]) -> 'PluginDesc': branch = row["branch"] repo = make_repo(row['repo'], branch.strip()) repo.token = config.github_token return PluginDesc(repo, branch.strip(), row["alias"]) @staticmethod def load_from_string(config: FetchConfig, line: str) -> 'PluginDesc': branch = "HEAD" alias = None uri = line if " as " in uri: uri, alias = uri.split(" as ") alias = alias.strip() if "@" in uri: uri, branch = uri.split("@") repo = make_repo(uri.strip(), branch.strip()) repo.token = config.github_token return PluginDesc(repo, branch.strip(), alias) @dataclass class Plugin: name: str commit: str has_submodules: bool sha256: str date: Optional[datetime] = None @property def normalized_name(self) -> str: return self.name.replace(".", "-") @property def version(self) -> str: assert self.date is not None return self.date.strftime("%Y-%m-%d") def as_json(self) -> Dict[str, str]: copy = self.__dict__.copy() del copy["date"] return copy def load_plugins_from_csv(config: FetchConfig, input_file: Path,) -> List[PluginDesc]: log.debug("Load plugins from csv %s", input_file) plugins = [] with open(input_file, newline='') as csvfile: log.debug("Writing into %s", input_file) reader = csv.DictReader(csvfile,) for line in reader: plugin = PluginDesc.load_from_csv(config, line) plugins.append(plugin) return plugins def run_nix_expr(expr): with CleanEnvironment(): cmd = ["nix", "eval", "--extra-experimental-features", "nix-command", "--impure", "--json", "--expr", expr] log.debug("Running command %s", " ".join(cmd)) out = subprocess.check_output(cmd) data = json.loads(out) return data class Editor: """The configuration of the update script.""" def __init__( self, name: str, root: Path, get_plugins: str, default_in: Optional[Path] = None, default_out: Optional[Path] = None, deprecated: Optional[Path] = None, cache_file: Optional[str] = None, ): log.debug("get_plugins:", get_plugins) self.name = name self.root = root self.get_plugins = get_plugins self.default_in = default_in or root.joinpath(f"{name}-plugin-names") self.default_out = default_out or root.joinpath("generated.nix") self.deprecated = deprecated or root.joinpath("deprecated.json") self.cache_file = cache_file or f"{name}-plugin-cache.json" self.nixpkgs_repo = None def add(self, args): '''CSV spec''' log.debug("called the 'add' command") fetch_config = FetchConfig(args.proc, args.github_token) editor = self for plugin_line in args.add_plugins: log.debug("using plugin_line", plugin_line) pdesc = PluginDesc.load_from_string(fetch_config, plugin_line) log.debug("loaded as pdesc", pdesc) append = [ pdesc ] editor.rewrite_input(fetch_config, args.input_file, editor.deprecated, append=append) plugin, _ = prefetch_plugin(pdesc, ) autocommit = not args.no_commit if autocommit: commit( editor.nixpkgs_repo, "{drv_name}: init at {version}".format( drv_name=editor.get_drv_name(plugin.normalized_name), version=plugin.version ), [args.outfile, args.input_file], ) # Expects arguments generated by 'update' subparser def update(self, args ): '''CSV spec''' print("the update member function should be overriden in subclasses") def get_current_plugins(self) -> List[Plugin]: """To fill the cache""" data = run_nix_expr(self.get_plugins) plugins = [] for name, attr in data.items(): p = Plugin(name, attr["rev"], attr["submodules"], attr["sha256"]) plugins.append(p) return plugins def load_plugin_spec(self, config: FetchConfig, plugin_file) -> List[PluginDesc]: '''CSV spec''' return load_plugins_from_csv(config, plugin_file) def generate_nix(self, _plugins, _outfile: str): '''Returns nothing for now, writes directly to outfile''' raise NotImplementedError() def get_update(self, input_file: str, outfile: str, config: FetchConfig): cache: Cache = Cache(self.get_current_plugins(), self.cache_file) _prefetch = functools.partial(prefetch, cache=cache) def update() -> dict: plugins = self.load_plugin_spec(config, input_file) try: pool = Pool(processes=config.proc) results = pool.map(_prefetch, plugins) finally: cache.store() plugins, redirects = check_results(results) self.generate_nix(plugins, outfile) return redirects return update @property def attr_path(self): return self.name + "Plugins" def get_drv_name(self, name: str): return self.attr_path + "." + name def rewrite_input(self, *args, **kwargs): return rewrite_input(*args, **kwargs) def create_parser(self): common = argparse.ArgumentParser( add_help=False, description=(f""" Updates nix derivations for {self.name} plugins.\n By default from {self.default_in} to {self.default_out}""" ) ) common.add_argument( "--input-names", "-i", dest="input_file", default=self.default_in, help="A list of plugins in the form owner/repo", ) common.add_argument( "--out", "-o", dest="outfile", default=self.default_out, help="Filename to save generated nix code", ) common.add_argument( "--proc", "-p", dest="proc", type=int, default=30, help="Number of concurrent processes to spawn. Setting --github-token allows higher values.", ) common.add_argument( "--github-token", "-t", type=str, default=os.getenv("GITHUB_API_TOKEN"), help="""Allows to set --proc to higher values. Uses GITHUB_API_TOKEN environment variables as the default value.""", ) common.add_argument( "--no-commit", "-n", action="store_true", default=False, help="Whether to autocommit changes" ) common.add_argument( "--debug", "-d", choices=LOG_LEVELS.keys(), default=logging.getLevelName(logging.WARN), help="Adjust log level" ) main = argparse.ArgumentParser( parents=[common], description=(f""" Updates nix derivations for {self.name} plugins.\n By default from {self.default_in} to {self.default_out}""" ) ) subparsers = main.add_subparsers(dest="command", required=False) padd = subparsers.add_parser( "add", parents=[], description="Add new plugin", add_help=False, ) padd.set_defaults(func=self.add) padd.add_argument( "add_plugins", default=None, nargs="+", help=f"Plugin to add to {self.attr_path} from Github in the form owner/repo", ) pupdate = subparsers.add_parser( "update", description="Update all or a subset of existing plugins", add_help=False, ) pupdate.set_defaults(func=self.update) return main def run(self,): ''' Convenience function ''' parser = self.create_parser() args = parser.parse_args() command = args.command or "update" log.setLevel(LOG_LEVELS[args.debug]) log.info("Chose to run command: %s", command) if not args.no_commit: self.nixpkgs_repo = git.Repo(self.root, search_parent_directories=True) getattr(self, command)(args) class CleanEnvironment(object): def __enter__(self) -> None: self.old_environ = os.environ.copy() local_pkgs = str(Path(__file__).parent.parent.parent) os.environ["NIX_PATH"] = f"localpkgs={local_pkgs}" self.empty_config = NamedTemporaryFile() self.empty_config.write(b"{}") self.empty_config.flush() os.environ["NIXPKGS_CONFIG"] = self.empty_config.name def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: os.environ.update(self.old_environ) self.empty_config.close() def prefetch_plugin( p: PluginDesc, cache: "Optional[Cache]" = None, ) -> Tuple[Plugin, Optional[Repo]]: repo, branch, alias = p.repo, p.branch, p.alias name = alias or p.repo.name commit = None log.info(f"Fetching last commit for plugin {name} from {repo.uri}@{branch}") commit, date = repo.latest_commit() cached_plugin = cache[commit] if cache else None if cached_plugin is not None: log.debug("Cache hit !") cached_plugin.name = name cached_plugin.date = date return cached_plugin, repo.redirect has_submodules = repo.has_submodules() log.debug(f"prefetch {name}") sha256 = repo.prefetch(commit) return ( Plugin(name, commit, has_submodules, sha256, date=date), repo.redirect, ) def print_download_error(plugin: PluginDesc, ex: Exception): print(f"{plugin}: {ex}", file=sys.stderr) ex_traceback = ex.__traceback__ tb_lines = [ line.rstrip("\n") for line in traceback.format_exception(ex.__class__, ex, ex_traceback) ] print("\n".join(tb_lines)) def check_results( results: List[Tuple[PluginDesc, Union[Exception, Plugin], Optional[Repo]]] ) -> Tuple[List[Tuple[PluginDesc, Plugin]], Redirects]: ''' ''' failures: List[Tuple[PluginDesc, Exception]] = [] plugins = [] redirects: Redirects = {} for (pdesc, result, redirect) in results: if isinstance(result, Exception): failures.append((pdesc, result)) else: new_pdesc = pdesc if redirect is not None: redirects.update({pdesc: redirect}) new_pdesc = PluginDesc(redirect, pdesc.branch, pdesc.alias) plugins.append((new_pdesc, result)) print(f"{len(results) - len(failures)} plugins were checked", end="") if len(failures) == 0: print() return plugins, redirects else: print(f", {len(failures)} plugin(s) could not be downloaded:\n") for (plugin, exception) in failures: print_download_error(plugin, exception) sys.exit(1) def make_repo(uri: str, branch) -> Repo: '''Instantiate a Repo with the correct specialization depending on server (gitub spec)''' # dumb check to see if it's of the form owner/repo (=> github) or https://... res = urlparse(uri) if res.netloc in [ "github.com", ""]: res = res.path.strip('/').split('/') repo = RepoGitHub(res[0], res[1], branch) else: repo = Repo(uri.strip(), branch) return repo def get_cache_path(cache_file_name: str) -> Optional[Path]: xdg_cache = os.environ.get("XDG_CACHE_HOME", None) if xdg_cache is None: home = os.environ.get("HOME", None) if home is None: return None xdg_cache = str(Path(home, ".cache")) return Path(xdg_cache, cache_file_name) class Cache: def __init__(self, initial_plugins: List[Plugin], cache_file_name: str) -> None: self.cache_file = get_cache_path(cache_file_name) downloads = {} for plugin in initial_plugins: downloads[plugin.commit] = plugin downloads.update(self.load()) self.downloads = downloads def load(self) -> Dict[str, Plugin]: if self.cache_file is None or not self.cache_file.exists(): return {} downloads: Dict[str, Plugin] = {} with open(self.cache_file) as f: data = json.load(f) for attr in data.values(): p = Plugin( attr["name"], attr["commit"], attr["has_submodules"], attr["sha256"] ) downloads[attr["commit"]] = p return downloads def store(self) -> None: if self.cache_file is None: return os.makedirs(self.cache_file.parent, exist_ok=True) with open(self.cache_file, "w+") as f: data = {} for name, attr in self.downloads.items(): data[name] = attr.as_json() json.dump(data, f, indent=4, sort_keys=True) def __getitem__(self, key: str) -> Optional[Plugin]: return self.downloads.get(key, None) def __setitem__(self, key: str, value: Plugin) -> None: self.downloads[key] = value def prefetch( pluginDesc: PluginDesc, cache: Cache ) -> Tuple[PluginDesc, Union[Exception, Plugin], Optional[Repo]]: try: plugin, redirect = prefetch_plugin(pluginDesc, cache) cache[plugin.commit] = plugin return (pluginDesc, plugin, redirect) except Exception as e: return (pluginDesc, e, None) def rewrite_input( config: FetchConfig, input_file: Path, deprecated: Path, # old pluginDesc and the new redirects: Redirects = {}, append: List[PluginDesc] = [], ): plugins = load_plugins_from_csv(config, input_file,) plugins.extend(append) if redirects: cur_date_iso = datetime.now().strftime("%Y-%m-%d") with open(deprecated, "r") as f: deprecations = json.load(f) for pdesc, new_repo in redirects.items(): new_pdesc = PluginDesc(new_repo, pdesc.branch, pdesc.alias) old_plugin, _ = prefetch_plugin(pdesc) new_plugin, _ = prefetch_plugin(new_pdesc) if old_plugin.normalized_name != new_plugin.normalized_name: deprecations[old_plugin.normalized_name] = { "new": new_plugin.normalized_name, "date": cur_date_iso, } with open(deprecated, "w") as f: json.dump(deprecations, f, indent=4, sort_keys=True) f.write("\n") with open(input_file, "w") as f: log.debug("Writing into %s", input_file) # fields = dataclasses.fields(PluginDesc) fieldnames = ['repo', 'branch', 'alias'] writer = csv.DictWriter(f, fieldnames, dialect='unix', quoting=csv.QUOTE_NONE) writer.writeheader() for plugin in sorted(plugins): writer.writerow(asdict(plugin)) def commit(repo: git.Repo, message: str, files: List[Path]) -> None: repo.index.add([str(f.resolve()) for f in files]) if repo.index.diff("HEAD"): print(f'committing to nixpkgs "{message}"') repo.index.commit(message) else: print("no changes in working tree to commit") def update_plugins(editor: Editor, args): """The main entry function of this module. All input arguments are grouped in the `Editor`.""" log.info("Start updating plugins") fetch_config = FetchConfig(args.proc, args.github_token) update = editor.get_update(args.input_file, args.outfile, fetch_config) redirects = update() editor.rewrite_input(fetch_config, args.input_file, editor.deprecated, redirects) autocommit = not args.no_commit if autocommit: editor.nixpkgs_repo = git.Repo(editor.root, search_parent_directories=True) commit(editor.nixpkgs_repo, f"{editor.attr_path}: update", [args.outfile]) if redirects: update() if autocommit: commit( editor.nixpkgs_repo, f"{editor.attr_path}: resolve github repository redirects", [args.outfile, args.input_file, editor.deprecated], )
24,750
31.782781
105
py
nixpkgs
nixpkgs-master/maintainers/scripts/update.py
from __future__ import annotations from typing import Dict, Generator, List, Optional, Tuple import argparse import asyncio import contextlib import json import os import re import subprocess import sys import tempfile class CalledProcessError(Exception): process: asyncio.subprocess.Process class UpdateFailedException(Exception): pass def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) async def check_subprocess(*args, **kwargs): """ Emulate check argument of subprocess.run function. """ process = await asyncio.create_subprocess_exec(*args, **kwargs) returncode = await process.wait() if returncode != 0: error = CalledProcessError() error.process = process raise error return process async def run_update_script(nixpkgs_root: str, merge_lock: asyncio.Lock, temp_dir: Optional[Tuple[str, str]], package: Dict, keep_going: bool): worktree: Optional[str] = None update_script_command = package['updateScript'] if temp_dir is not None: worktree, _branch = temp_dir # Ensure the worktree is clean before update. await check_subprocess('git', 'reset', '--hard', '--quiet', 'HEAD', cwd=worktree) # Update scripts can use $(dirname $0) to get their location but we want to run # their clones in the git worktree, not in the main nixpkgs repo. update_script_command = map(lambda arg: re.sub(r'^{0}'.format(re.escape(nixpkgs_root)), worktree, arg), update_script_command) eprint(f" - {package['name']}: UPDATING ...") try: update_process = await check_subprocess( 'env', f"UPDATE_NIX_NAME={package['name']}", f"UPDATE_NIX_PNAME={package['pname']}", f"UPDATE_NIX_OLD_VERSION={package['oldVersion']}", f"UPDATE_NIX_ATTR_PATH={package['attrPath']}", *update_script_command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=worktree, ) update_info = await update_process.stdout.read() await merge_changes(merge_lock, package, update_info, temp_dir) except KeyboardInterrupt as e: eprint('Cancelling…') raise asyncio.exceptions.CancelledError() except CalledProcessError as e: eprint(f" - {package['name']}: ERROR") eprint() eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------") eprint() stderr = await e.process.stderr.read() eprint(stderr.decode('utf-8')) with open(f"{package['pname']}.log", 'wb') as logfile: logfile.write(stderr) eprint() eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------") if not keep_going: raise UpdateFailedException(f"The update script for {package['name']} failed with exit code {e.process.returncode}") @contextlib.contextmanager def make_worktree() -> Generator[Tuple[str, str], None, None]: with tempfile.TemporaryDirectory() as wt: branch_name = f'update-{os.path.basename(wt)}' target_directory = f'{wt}/nixpkgs' subprocess.run(['git', 'worktree', 'add', '-b', branch_name, target_directory]) yield (target_directory, branch_name) subprocess.run(['git', 'worktree', 'remove', '--force', target_directory]) subprocess.run(['git', 'branch', '-D', branch_name]) async def commit_changes(name: str, merge_lock: asyncio.Lock, worktree: str, branch: str, changes: List[Dict]) -> None: for change in changes: # Git can only handle a single index operation at a time async with merge_lock: await check_subprocess('git', 'add', *change['files'], cwd=worktree) commit_message = '{attrPath}: {oldVersion} -> {newVersion}'.format(**change) if 'commitMessage' in change: commit_message = change['commitMessage'] elif 'commitBody' in change: commit_message = commit_message + '\n\n' + change['commitBody'] await check_subprocess('git', 'commit', '--quiet', '-m', commit_message, cwd=worktree) await check_subprocess('git', 'cherry-pick', branch) async def check_changes(package: Dict, worktree: str, update_info: str): if 'commit' in package['supportedFeatures']: changes = json.loads(update_info) else: changes = [{}] # Try to fill in missing attributes when there is just a single change. if len(changes) == 1: # Dynamic data from updater take precedence over static data from passthru.updateScript. if 'attrPath' not in changes[0]: # update.nix is always passing attrPath changes[0]['attrPath'] = package['attrPath'] if 'oldVersion' not in changes[0]: # update.nix is always passing oldVersion changes[0]['oldVersion'] = package['oldVersion'] if 'newVersion' not in changes[0]: attr_path = changes[0]['attrPath'] obtain_new_version_process = await check_subprocess('nix-instantiate', '--expr', f'with import ./. {{}}; lib.getVersion {attr_path}', '--eval', '--strict', '--json', stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=worktree) changes[0]['newVersion'] = json.loads((await obtain_new_version_process.stdout.read()).decode('utf-8')) if 'files' not in changes[0]: changed_files_process = await check_subprocess('git', 'diff', '--name-only', 'HEAD', stdout=asyncio.subprocess.PIPE, cwd=worktree) changed_files = (await changed_files_process.stdout.read()).splitlines() changes[0]['files'] = changed_files if len(changed_files) == 0: return [] return changes async def merge_changes(merge_lock: asyncio.Lock, package: Dict, update_info: str, temp_dir: Optional[Tuple[str, str]]) -> None: if temp_dir is not None: worktree, branch = temp_dir changes = await check_changes(package, worktree, update_info) if len(changes) > 0: await commit_changes(package['name'], merge_lock, worktree, branch, changes) else: eprint(f" - {package['name']}: DONE, no changes.") else: eprint(f" - {package['name']}: DONE.") async def updater(nixpkgs_root: str, temp_dir: Optional[Tuple[str, str]], merge_lock: asyncio.Lock, packages_to_update: asyncio.Queue[Optional[Dict]], keep_going: bool, commit: bool): while True: package = await packages_to_update.get() if package is None: # A sentinel received, we are done. return if not ('commit' in package['supportedFeatures'] or 'attrPath' in package): temp_dir = None await run_update_script(nixpkgs_root, merge_lock, temp_dir, package, keep_going) async def start_updates(max_workers: int, keep_going: bool, commit: bool, packages: List[Dict]): merge_lock = asyncio.Lock() packages_to_update: asyncio.Queue[Optional[Dict]] = asyncio.Queue() with contextlib.ExitStack() as stack: temp_dirs: List[Optional[Tuple[str, str]]] = [] # Do not create more workers than there are packages. num_workers = min(max_workers, len(packages)) nixpkgs_root_process = await check_subprocess('git', 'rev-parse', '--show-toplevel', stdout=asyncio.subprocess.PIPE) nixpkgs_root = (await nixpkgs_root_process.stdout.read()).decode('utf-8').strip() # Set up temporary directories when using auto-commit. for i in range(num_workers): temp_dir = stack.enter_context(make_worktree()) if commit else None temp_dirs.append(temp_dir) # Fill up an update queue, for package in packages: await packages_to_update.put(package) # Add sentinels, one for each worker. # A workers will terminate when it gets sentinel from the queue. for i in range(num_workers): await packages_to_update.put(None) # Prepare updater workers for each temp_dir directory. # At most `num_workers` instances of `run_update_script` will be running at one time. updaters = asyncio.gather(*[updater(nixpkgs_root, temp_dir, merge_lock, packages_to_update, keep_going, commit) for temp_dir in temp_dirs]) try: # Start updater workers. await updaters except asyncio.exceptions.CancelledError: # When one worker is cancelled, cancel the others too. updaters.cancel() except UpdateFailedException as e: # When one worker fails, cancel the others, as this exception is only thrown when keep_going is false. updaters.cancel() eprint(e) sys.exit(1) def main(max_workers: int, keep_going: bool, commit: bool, packages_path: str) -> None: with open(packages_path) as f: packages = json.load(f) eprint() eprint('Going to be running update for following packages:') for package in packages: eprint(f" - {package['name']}") eprint() confirm = input('Press Enter key to continue...') if confirm == '': eprint() eprint('Running update for:') asyncio.run(start_updates(max_workers, keep_going, commit, packages)) eprint() eprint('Packages updated!') sys.exit() else: eprint('Aborting!') sys.exit(130) parser = argparse.ArgumentParser(description='Update packages') parser.add_argument('--max-workers', '-j', dest='max_workers', type=int, help='Number of updates to run concurrently', nargs='?', default=4) parser.add_argument('--keep-going', '-k', dest='keep_going', action='store_true', help='Do not stop after first failure') parser.add_argument('--commit', '-c', dest='commit', action='store_true', help='Commit the changes') parser.add_argument('packages', help='JSON file containing the list of package names and their update scripts') if __name__ == '__main__': args = parser.parse_args() try: main(args.max_workers, args.keep_going, args.commit, args.packages) except KeyboardInterrupt as e: # Let’s cancel outside of the main loop too. sys.exit(130)
10,275
40.435484
255
py
nixpkgs
nixpkgs-master/maintainers/scripts/remove-old-aliases.py
#!/usr/bin/env nix-shell #!nix-shell -i python3 -p "python3.withPackages(ps: with ps; [ ])" nix """ A program to remove old aliases or convert old aliases to throws Example usage: ./maintainers/scripts/remove-old-aliases.py --year 2018 --file ./pkgs/top-level/aliases.nix Check this file with mypy after every change! $ mypy --strict maintainers/scripts/remove-old-aliases.py """ import argparse import shutil import subprocess from datetime import date as datetimedate from datetime import datetime from pathlib import Path def process_args() -> argparse.Namespace: """process args""" arg_parser = argparse.ArgumentParser() arg_parser.add_argument( "--year", required=True, type=int, help="operate on aliases older than $year" ) arg_parser.add_argument( "--month", type=int, default=1, help="operate on aliases older than $year-$month", ) arg_parser.add_argument( "--only-throws", action="store_true", help="only operate on throws. e.g remove throws older than $date", ) arg_parser.add_argument("--file", required=True, type=Path, help="alias file") arg_parser.add_argument( "--dry-run", action="store_true", help="don't modify files, only print results" ) return arg_parser.parse_args() def get_date_lists( txt: list[str], cutoffdate: datetimedate, only_throws: bool ) -> tuple[list[str], list[str], list[str]]: """get a list of lines in which the date is older than $cutoffdate""" date_older_list: list[str] = [] date_older_throw_list: list[str] = [] date_sep_line_list: list[str] = [] for lineno, line in enumerate(txt, start=1): line = line.rstrip() my_date = None for string in line.split(): string = string.strip(":") try: # strip ':' incase there is a string like 2019-11-01: my_date = datetime.strptime(string, "%Y-%m-%d").date() except ValueError: try: my_date = datetime.strptime(string, "%Y-%m").date() except ValueError: continue if ( my_date is None or my_date > cutoffdate or "preserve, reason:" in line.lower() ): continue if "=" not in line: date_sep_line_list.append(f"{lineno} {line}") # 'if' lines could be complicated elif "if " in line and "if =" not in line: print(f"RESOLVE MANUALLY {line}") elif "throw" in line: date_older_throw_list.append(line) elif not only_throws: date_older_list.append(line) return ( date_older_list, date_sep_line_list, date_older_throw_list, ) def convert_to_throw(date_older_list: list[str]) -> list[tuple[str, str]]: """convert a list of lines to throws""" converted_list = [] for line in date_older_list.copy(): indent: str = " " * (len(line) - len(line.lstrip())) before_equal = "" after_equal = "" try: before_equal, after_equal = (x.strip() for x in line.split("=", maxsplit=2)) except ValueError as err: print(err, line, "\n") date_older_list.remove(line) continue alias = before_equal alias_unquoted = before_equal.strip('"') after_equal_list = [x.strip(";:") for x in after_equal.split()] converted = ( f"{indent}{alias} = throw \"'{alias_unquoted}' has been renamed to/replaced by" f" '{after_equal_list.pop(0)}'\";" f' # Converted to throw {datetime.today().strftime("%Y-%m-%d")}' ) converted_list.append((line, converted)) return converted_list def generate_text_to_write( txt: list[str], date_older_list: list[str], converted_to_throw: list[tuple[str, str]], date_older_throw_list: list[str], ) -> list[str]: """generate a list of text to be written to the aliasfile""" text_to_write: list[str] = [] for line in txt: text_to_append: str = "" if converted_to_throw: for tupl in converted_to_throw: if line == tupl[0]: text_to_append = f"{tupl[1]}\n" if line not in date_older_list and line not in date_older_throw_list: text_to_append = f"{line}\n" if text_to_append: text_to_write.append(text_to_append) return text_to_write def write_file( aliasfile: Path, text_to_write: list[str], ) -> None: """write file""" temp_aliasfile = Path(f"{aliasfile}.raliases") with open(temp_aliasfile, "w", encoding="utf-8") as far: for line in text_to_write: far.write(line) print("\nChecking the syntax of the new aliasfile") try: subprocess.run( ["nix-instantiate", "--eval", temp_aliasfile], check=True, stdout=subprocess.DEVNULL, ) except subprocess.CalledProcessError: print( "\nSyntax check failed,", "there may have been a line which only has\n" 'aliasname = "reason why";\n' "when it should have been\n" 'aliasname = throw "reason why";', ) temp_aliasfile.unlink() return shutil.move(f"{aliasfile}.raliases", aliasfile) print(f"{aliasfile} modified! please verify with 'git diff'.") def main() -> None: """main""" args = process_args() only_throws = args.only_throws aliasfile = Path(args.file).absolute() cutoffdate = (datetime.strptime(f"{args.year}-{args.month}-01", "%Y-%m-%d")).date() txt: list[str] = (aliasfile.read_text(encoding="utf-8")).splitlines() date_older_list: list[str] = [] date_sep_line_list: list[str] = [] date_older_throw_list: list[str] = [] date_older_list, date_sep_line_list, date_older_throw_list = get_date_lists( txt, cutoffdate, only_throws ) converted_to_throw: list[tuple[str, str]] = [] if date_older_list: converted_to_throw = convert_to_throw(date_older_list) print(" Will be converted to throws. ".center(100, "-")) for l_n in date_older_list: print(l_n) if date_older_throw_list: print(" Will be removed. ".center(100, "-")) for l_n in date_older_throw_list: print(l_n) if date_sep_line_list: print(" On separate line, resolve manually. ".center(100, "-")) for l_n in date_sep_line_list: print(l_n) if not args.dry_run: text_to_write = generate_text_to_write( txt, date_older_list, converted_to_throw, date_older_throw_list ) write_file(aliasfile, text_to_write) if __name__ == "__main__": main()
6,845
31.140845
91
py
nixpkgs
nixpkgs-master/maintainers/scripts/hydra-eval-failures.py
#!/usr/bin/env nix-shell #!nix-shell -i python3 -p "python3.withPackages(ps: with ps; [ requests pyquery click ])" # To use, just execute this script with --help to display help. import subprocess import json import sys import click import requests from pyquery import PyQuery as pq def map_dict (f, d): for k,v in d.items(): d[k] = f(v) maintainers_json = subprocess.check_output([ 'nix-instantiate', '-A', 'lib.maintainers', '--eval', '--strict', '--json' ]) maintainers = json.loads(maintainers_json) MAINTAINERS = map_dict(lambda v: v.get('github', None), maintainers) def get_response_text(url): return pq(requests.get(url).text) # IO EVAL_FILE = { 'nixos': 'nixos/release.nix', 'nixpkgs': 'pkgs/top-level/release.nix', } def get_maintainers(attr_name): try: nixname = attr_name.split('.') meta_json = subprocess.check_output([ 'nix-instantiate', '--eval', '--strict', '-A', '.'.join(nixname[1:]) + '.meta', EVAL_FILE[nixname[0]], '--arg', 'nixpkgs', './.', '--json']) meta = json.loads(meta_json) return meta.get('maintainers', []) except: return [] def filter_github_users(maintainers): github_only = [] for i in maintainers: if i.get('github'): github_only.append(i) return github_only def print_build(table_row): a = pq(table_row)('a')[1] print("- [ ] [{}]({})".format(a.text, a.get('href')), flush=True) job_maintainers = filter_github_users(get_maintainers(a.text)) if job_maintainers: print(" - maintainers: {}".format(" ".join(map(lambda u: '@' + u.get('github'), job_maintainers)))) # TODO: print last three persons that touched this file # TODO: pinpoint the diff that broke this build, or maybe it's transient or maybe it never worked? sys.stdout.flush() @click.command() @click.option( '--jobset', default="nixos/release-19.09", help='Hydra project like nixos/release-19.09') def cli(jobset): """ Given a Hydra project, inspect latest evaluation and print a summary of failed builds """ url = "https://hydra.nixos.org/jobset/{}".format(jobset) # get the last evaluation click.echo(click.style( 'Getting latest evaluation for {}'.format(url), fg='green')) d = get_response_text(url) evaluations = d('#tabs-evaluations').find('a[class="row-link"]') latest_eval_url = evaluations[0].get('href') # parse last evaluation page click.echo(click.style( 'Parsing evaluation {}'.format(latest_eval_url), fg='green')) d = get_response_text(latest_eval_url + '?full=1') # TODO: aborted evaluations # TODO: dependency failed without propagated builds print('\nFailures:') for tr in d('img[alt="Failed"]').parents('tr'): print_build(tr) print('\nDependency failures:') for tr in d('img[alt="Dependency failed"]').parents('tr'): print_build(tr) if __name__ == "__main__": try: cli() except Exception as e: import pdb;pdb.post_mortem()
3,168
27.044248
108
py
nixpkgs
nixpkgs-master/maintainers/scripts/doc/escape-code-markup.py
#! /usr/bin/env nix-shell #! nix-shell -I nixpkgs=channel:nixos-unstable -i python3 -p python3 -p python3.pkgs.lxml """ Pandoc will strip any markup within code elements so let’s escape them so that they can be handled manually. """ import lxml.etree as ET import re import sys def replace_element_by_text(el: ET.Element, text: str) -> None: """ Author: bernulf Source: https://stackoverflow.com/a/10520552/160386 SPDX-License-Identifier: CC-BY-SA-3.0 """ text = text + (el.tail or "") parent = el.getparent() if parent is not None: previous = el.getprevious() if previous is not None: previous.tail = (previous.tail or "") + text else: parent.text = (parent.text or "") + text parent.remove(el) DOCBOOK_NS = "http://docbook.org/ns/docbook" # List of elements that pandoc’s DocBook reader strips markup from. # https://github.com/jgm/pandoc/blob/master/src/Text/Pandoc/Readers/DocBook.hs code_elements = [ # CodeBlock "literallayout", "screen", "programlisting", # Code (inline) "classname", "code", "filename", "envar", "literal", "computeroutput", "prompt", "parameter", "option", "markup", "wordasword", "command", "varname", "function", "type", "symbol", "constant", "userinput", "systemitem", ] XMLNS_REGEX = re.compile(r'\s+xmlns(?::[^=]+)?="[^"]*"') ROOT_ELEMENT_REGEX = re.compile(r'^\s*<[^>]+>') def remove_xmlns(match: re.Match) -> str: """ Removes xmlns attributes. Expects a match containing an opening tag. """ return XMLNS_REGEX.sub('', match.group(0)) if __name__ == '__main__': assert len(sys.argv) >= 3, "usage: escape-code-markup.py <input> <output>" tree = ET.parse(sys.argv[1]) name_predicate = " or ".join([f"local-name()='{el}'" for el in code_elements]) for markup in tree.xpath(f"//*[({name_predicate}) and namespace-uri()='{DOCBOOK_NS}']/*"): text = ET.tostring(markup, encoding=str) # tostring adds xmlns attributes to the element we want to stringify # as if it was supposed to be usable standalone. # We are just converting it to CDATA so we do not care. # Let’s strip the namespace declarations to keep the code clean. # # Note that this removes even namespaces that were potentially # in the original file. Though, that should be very rare – # most of the time, we will stringify empty DocBook elements # like <xref> or <co> or, at worst, <link> with xlink:href attribute. # # Also note that the regex expects the root element to be first # thing in the string. But that should be fine, the tostring method # does not produce XML declaration or doctype by default. text = ROOT_ELEMENT_REGEX.sub(remove_xmlns, text) replace_element_by_text(markup, text) tree.write(sys.argv[2])
2,973
29.346939
94
py
nixpkgs
nixpkgs-master/maintainers/scripts/doc/replace-xrefs-by-empty-links.py
#! /usr/bin/env nix-shell #! nix-shell -I nixpkgs=channel:nixos-unstable -i python3 -p python3 -p python3.pkgs.lxml """ Pandoc will try to resolve xrefs and replace them with regular links. let’s replace them with links with empty labels which MyST and our pandoc filters recognize as cross-references. """ import lxml.etree as ET import sys XLINK_NS = "http://www.w3.org/1999/xlink" ns = { "db": "http://docbook.org/ns/docbook", } if __name__ == '__main__': assert len(sys.argv) >= 3, "usage: replace-xrefs-by-empty-links.py <input> <output>" tree = ET.parse(sys.argv[1]) for xref in tree.findall(".//db:xref", ns): text = ET.tostring(xref, encoding=str) parent = xref.getparent() link = parent.makeelement('link') target_name = xref.get("linkend") link.set(f"{{{XLINK_NS}}}href", f"#{target_name}") parent.replace(xref, link) tree.write(sys.argv[2])
928
27.151515
89
py
nixpkgs
nixpkgs-master/nixos/modules/services/misc/taskserver/helper-tool.py
import grp import json import pwd import os import re import string import subprocess import sys from contextlib import contextmanager from shutil import rmtree from tempfile import NamedTemporaryFile import click IS_AUTO_CONFIG = @isAutoConfig@ # NOQA CERTTOOL_COMMAND = "@certtool@" CERT_BITS = "@certBits@" CLIENT_EXPIRATION = "@clientExpiration@" CRL_EXPIRATION = "@crlExpiration@" TASKD_COMMAND = "@taskd@" TASKD_DATA_DIR = "@dataDir@" TASKD_USER = "@user@" TASKD_GROUP = "@group@" FQDN = "@fqdn@" CA_KEY = os.path.join(TASKD_DATA_DIR, "keys", "ca.key") CA_CERT = os.path.join(TASKD_DATA_DIR, "keys", "ca.cert") CRL_FILE = os.path.join(TASKD_DATA_DIR, "keys", "server.crl") RE_CONFIGUSER = re.compile(r'^\s*user\s*=(.*)$') RE_USERKEY = re.compile(r'New user key: (.+)$', re.MULTILINE) def lazyprop(fun): """ Decorator which only evaluates the specified function when accessed. """ name = '_lazy_' + fun.__name__ @property def _lazy(self): val = getattr(self, name, None) if val is None: val = fun(self) setattr(self, name, val) return val return _lazy class TaskdError(OSError): pass def run_as_taskd_user(): uid = pwd.getpwnam(TASKD_USER).pw_uid gid = grp.getgrnam(TASKD_GROUP).gr_gid os.setgid(gid) os.setuid(uid) def taskd_cmd(cmd, *args, **kwargs): """ Invoke taskd with the specified command with the privileges of the 'taskd' user and 'taskd' group. If 'capture_stdout' is passed as a keyword argument with the value True, the return value are the contents the command printed to stdout. """ capture_stdout = kwargs.pop("capture_stdout", False) fun = subprocess.check_output if capture_stdout else subprocess.check_call return fun( [TASKD_COMMAND, cmd, "--data", TASKD_DATA_DIR] + list(args), preexec_fn=run_as_taskd_user, **kwargs ) def certtool_cmd(*args, **kwargs): """ Invoke certtool from GNUTLS and return the output of the command. The provided arguments are added to the certtool command and keyword arguments are added to subprocess.check_output(). Note that this will suppress all output of certtool and it will only be printed whenever there is an unsuccessful return code. """ return subprocess.check_output( [CERTTOOL_COMMAND] + list(args), preexec_fn=lambda: os.umask(0o077), stderr=subprocess.STDOUT, **kwargs ) def label(msg): if sys.stdout.isatty() or sys.stderr.isatty(): sys.stderr.write(msg + "\n") def mkpath(*args): return os.path.join(TASKD_DATA_DIR, "orgs", *args) def mark_imperative(*path): """ Mark the specified path as being imperatively managed by creating an empty file called ".imperative", so that it doesn't interfere with the declarative configuration. """ open(os.path.join(mkpath(*path), ".imperative"), 'a').close() def is_imperative(*path): """ Check whether the given path is marked as imperative, see mark_imperative() for more information. """ full_path = [] for component in path: full_path.append(component) if os.path.exists(os.path.join(mkpath(*full_path), ".imperative")): return True return False def fetch_username(org, key): for line in open(mkpath(org, "users", key, "config"), "r"): match = RE_CONFIGUSER.match(line) if match is None: continue return match.group(1).strip() return None @contextmanager def create_template(contents): """ Generate a temporary file with the specified contents as a list of strings and yield its path as the context. """ template = NamedTemporaryFile(mode="w", prefix="certtool-template") template.writelines(map(lambda l: l + "\n", contents)) template.flush() yield template.name template.close() def generate_key(org, user): if not IS_AUTO_CONFIG: msg = "Automatic PKI handling is disabled, you need to " \ "manually issue a client certificate for user {}.\n" sys.stderr.write(msg.format(user)) return basedir = os.path.join(TASKD_DATA_DIR, "keys", org, user) if os.path.exists(basedir): raise OSError("Keyfile directory for {} already exists.".format(user)) privkey = os.path.join(basedir, "private.key") pubcert = os.path.join(basedir, "public.cert") try: os.makedirs(basedir, mode=0o700) certtool_cmd("-p", "--bits", CERT_BITS, "--outfile", privkey) template_data = [ "organization = {0}".format(org), "cn = {}".format(FQDN), "expiration_days = {}".format(CLIENT_EXPIRATION), "tls_www_client", "encryption_key", "signing_key" ] with create_template(template_data) as template: certtool_cmd( "-c", "--load-privkey", privkey, "--load-ca-privkey", CA_KEY, "--load-ca-certificate", CA_CERT, "--template", template, "--outfile", pubcert ) except: rmtree(basedir) raise def revoke_key(org, user): basedir = os.path.join(TASKD_DATA_DIR, "keys", org, user) if not os.path.exists(basedir): raise OSError("Keyfile directory for {} doesn't exist.".format(user)) pubcert = os.path.join(basedir, "public.cert") expiration = "expiration_days = {}".format(CRL_EXPIRATION) with create_template([expiration]) as template: oldcrl = NamedTemporaryFile(mode="wb", prefix="old-crl") oldcrl.write(open(CRL_FILE, "rb").read()) oldcrl.flush() certtool_cmd( "--generate-crl", "--load-crl", oldcrl.name, "--load-ca-privkey", CA_KEY, "--load-ca-certificate", CA_CERT, "--load-certificate", pubcert, "--template", template, "--outfile", CRL_FILE ) oldcrl.close() rmtree(basedir) def is_key_line(line, match): return line.startswith("---") and line.lstrip("- ").startswith(match) def getkey(*args): path = os.path.join(TASKD_DATA_DIR, "keys", *args) buf = [] for line in open(path, "r"): if len(buf) == 0: if is_key_line(line, "BEGIN"): buf.append(line) continue buf.append(line) if is_key_line(line, "END"): return ''.join(buf) raise IOError("Unable to get key from {}.".format(path)) def mktaskkey(cfg, path, keydata): heredoc = 'cat > "{}" <<EOF\n{}EOF'.format(path, keydata) cmd = 'task config taskd.{} -- "{}"'.format(cfg, path) return heredoc + "\n" + cmd class User(object): def __init__(self, org, name, key): self.__org = org self.name = name self.key = key def export(self): credentials = '/'.join([self.__org, self.name, self.key]) allow_unquoted = string.ascii_letters + string.digits + "/-_." if not all((c in allow_unquoted) for c in credentials): credentials = "'" + credentials.replace("'", r"'\''") + "'" script = [] if IS_AUTO_CONFIG: pubcert = getkey(self.__org, self.name, "public.cert") privkey = getkey(self.__org, self.name, "private.key") cacert = getkey("ca.cert") keydir = "${TASKDATA:-$HOME/.task}/keys" script += [ "umask 0077", 'mkdir -p "{}"'.format(keydir), mktaskkey("certificate", os.path.join(keydir, "public.cert"), pubcert), mktaskkey("key", os.path.join(keydir, "private.key"), privkey), mktaskkey("ca", os.path.join(keydir, "ca.cert"), cacert) ] script.append( "task config taskd.credentials -- {}".format(credentials) ) return "\n".join(script) + "\n" class Group(object): def __init__(self, org, name): self.__org = org self.name = name class Organisation(object): def __init__(self, name, ignore_imperative): self.name = name self.ignore_imperative = ignore_imperative def add_user(self, name): """ Create a new user along with a certificate and key. Returns a 'User' object or None if the user already exists. """ if self.ignore_imperative and is_imperative(self.name): return None if name not in self.users.keys(): output = taskd_cmd("add", "user", self.name, name, capture_stdout=True, encoding='utf-8') key = RE_USERKEY.search(output) if key is None: msg = "Unable to find key while creating user {}." raise TaskdError(msg.format(name)) generate_key(self.name, name) newuser = User(self.name, name, key.group(1)) self._lazy_users[name] = newuser return newuser return None def del_user(self, name): """ Delete a user and revoke its keys. """ if name in self.users.keys(): user = self.get_user(name) if self.ignore_imperative and \ is_imperative(self.name, "users", user.key): return # Work around https://bug.tasktools.org/browse/TD-40: rmtree(mkpath(self.name, "users", user.key)) revoke_key(self.name, name) del self._lazy_users[name] def add_group(self, name): """ Create a new group. Returns a 'Group' object or None if the group already exists. """ if self.ignore_imperative and is_imperative(self.name): return None if name not in self.groups.keys(): taskd_cmd("add", "group", self.name, name) newgroup = Group(self.name, name) self._lazy_groups[name] = newgroup return newgroup return None def del_group(self, name): """ Delete a group. """ if name in self.users.keys(): if self.ignore_imperative and \ is_imperative(self.name, "groups", name): return taskd_cmd("remove", "group", self.name, name) del self._lazy_groups[name] def get_user(self, name): return self.users.get(name) @lazyprop def users(self): result = {} for key in os.listdir(mkpath(self.name, "users")): user = fetch_username(self.name, key) if user is not None: result[user] = User(self.name, user, key) return result def get_group(self, name): return self.groups.get(name) @lazyprop def groups(self): result = {} for group in os.listdir(mkpath(self.name, "groups")): result[group] = Group(self.name, group) return result class Manager(object): def __init__(self, ignore_imperative=False): """ Instantiates an organisations manager. If ignore_imperative is True, all actions that modify data are checked whether they're created imperatively and if so, they will result in no operation. """ self.ignore_imperative = ignore_imperative def add_org(self, name): """ Create a new organisation. Returns an 'Organisation' object or None if the organisation already exists. """ if name not in self.orgs.keys(): taskd_cmd("add", "org", name) neworg = Organisation(name, self.ignore_imperative) self._lazy_orgs[name] = neworg return neworg return None def del_org(self, name): """ Delete and revoke keys of an organisation with all its users and groups. """ org = self.get_org(name) if org is not None: if self.ignore_imperative and is_imperative(name): return for user in list(org.users.keys()): org.del_user(user) for group in list(org.groups.keys()): org.del_group(group) taskd_cmd("remove", "org", name) del self._lazy_orgs[name] def get_org(self, name): return self.orgs.get(name) @lazyprop def orgs(self): result = {} for org in os.listdir(mkpath()): result[org] = Organisation(org, self.ignore_imperative) return result class OrganisationType(click.ParamType): name = 'organisation' def convert(self, value, param, ctx): org = Manager().get_org(value) if org is None: self.fail("Organisation {} does not exist.".format(value)) return org ORGANISATION = OrganisationType() @click.group() @click.pass_context def cli(ctx): """ Manage Taskserver users and certificates """ if not IS_AUTO_CONFIG: return for path in (CA_KEY, CA_CERT, CRL_FILE): if not os.path.exists(path): msg = "CA setup not done or incomplete, missing file {}." ctx.fail(msg.format(path)) @cli.group("org") def org_cli(): """ Manage organisations """ pass @cli.group("user") def user_cli(): """ Manage users """ pass @cli.group("group") def group_cli(): """ Manage groups """ pass @user_cli.command("list") @click.argument("organisation", type=ORGANISATION) def list_users(organisation): """ List all users belonging to the specified organisation. """ label("The following users exists for {}:".format(organisation.name)) for user in organisation.users.values(): sys.stdout.write(user.name + "\n") @group_cli.command("list") @click.argument("organisation", type=ORGANISATION) def list_groups(organisation): """ List all users belonging to the specified organisation. """ label("The following users exists for {}:".format(organisation.name)) for group in organisation.groups.values(): sys.stdout.write(group.name + "\n") @org_cli.command("list") def list_orgs(): """ List available organisations """ label("The following organisations exist:") for org in Manager().orgs: sys.stdout.write(org.name + "\n") @user_cli.command("getkey") @click.argument("organisation", type=ORGANISATION) @click.argument("user") def get_uuid(organisation, user): """ Get the UUID of the specified user belonging to the specified organisation. """ userobj = organisation.get_user(user) if userobj is None: msg = "User {} doesn't exist in organisation {}." sys.exit(msg.format(userobj.name, organisation.name)) label("User {} has the following UUID:".format(userobj.name)) sys.stdout.write(user.key + "\n") @user_cli.command("export") @click.argument("organisation", type=ORGANISATION) @click.argument("user") def export_user(organisation, user): """ Export user of the specified organisation as a series of shell commands that can be used on the client side to easily import the certificates. Note that the private key will be exported as well, so use this with care! """ userobj = organisation.get_user(user) if userobj is None: msg = "User {} doesn't exist in organisation {}." sys.exit(msg.format(user, organisation.name)) sys.stdout.write(userobj.export()) @org_cli.command("add") @click.argument("name") def add_org(name): """ Create an organisation with the specified name. """ if os.path.exists(mkpath(name)): msg = "Organisation with name {} already exists." sys.exit(msg.format(name)) taskd_cmd("add", "org", name) mark_imperative(name) @org_cli.command("remove") @click.argument("name") def del_org(name): """ Delete the organisation with the specified name. All of the users and groups will be deleted as well and client certificates will be revoked. """ Manager().del_org(name) msg = ("Organisation {} deleted. Be sure to restart the Taskserver" " using 'systemctl restart taskserver.service' in order for" " the certificate revocation to apply.") click.echo(msg.format(name), err=True) @user_cli.command("add") @click.argument("organisation", type=ORGANISATION) @click.argument("user") def add_user(organisation, user): """ Create a user for the given organisation along with a client certificate and print the key of the new user. The client certificate along with it's public key can be shown via the 'user export' subcommand. """ userobj = organisation.add_user(user) if userobj is None: msg = "User {} already exists in organisation {}." sys.exit(msg.format(user, organisation)) else: mark_imperative(organisation.name, "users", userobj.key) @user_cli.command("remove") @click.argument("organisation", type=ORGANISATION) @click.argument("user") def del_user(organisation, user): """ Delete a user from the given organisation. This will also revoke the client certificate of the given user. """ organisation.del_user(user) msg = ("User {} deleted. Be sure to restart the Taskserver using" " 'systemctl restart taskserver.service' in order for the" " certificate revocation to apply.") click.echo(msg.format(user), err=True) @group_cli.command("add") @click.argument("organisation", type=ORGANISATION) @click.argument("group") def add_group(organisation, group): """ Create a group for the given organisation. """ groupobj = organisation.add_group(group) if groupobj is None: msg = "Group {} already exists in organisation {}." sys.exit(msg.format(group, organisation)) else: mark_imperative(organisation.name, "groups", groupobj.name) @group_cli.command("remove") @click.argument("organisation", type=ORGANISATION) @click.argument("group") def del_group(organisation, group): """ Delete a group from the given organisation. """ organisation.del_group(group) click("Group {} deleted.".format(group), err=True) def add_or_delete(old, new, add_fun, del_fun): """ Given an 'old' and 'new' list, figure out the intersections and invoke 'add_fun' against every element that is not in the 'old' list and 'del_fun' against every element that is not in the 'new' list. Returns a tuple where the first element is the list of elements that were added and the second element consisting of elements that were deleted. """ old_set = set(old) new_set = set(new) to_delete = old_set - new_set to_add = new_set - old_set for elem in to_delete: del_fun(elem) for elem in to_add: add_fun(elem) return to_add, to_delete @cli.command("process-json") @click.argument('json-file', type=click.File('rb')) def process_json(json_file): """ Create and delete users, groups and organisations based on a JSON file. The structure of this file is exactly the same as the 'services.taskserver.organisations' option of the NixOS module and is used for declaratively adding and deleting users. Hence this subcommand is not recommended outside of the scope of the NixOS module. """ data = json.load(json_file) mgr = Manager(ignore_imperative=True) add_or_delete(mgr.orgs.keys(), data.keys(), mgr.add_org, mgr.del_org) for org in mgr.orgs.values(): if is_imperative(org.name): continue add_or_delete(org.users.keys(), data[org.name]['users'], org.add_user, org.del_user) add_or_delete(org.groups.keys(), data[org.name]['groups'], org.add_group, org.del_group) if __name__ == '__main__': cli()
19,986
28.008708
79
py
nixpkgs
nixpkgs-master/nixos/modules/services/x11/display-managers/set-session.py
#!/usr/bin/env python import gi, argparse, os, logging, sys gi.require_version("AccountsService", "1.0") from gi.repository import AccountsService, GLib from ordered_set import OrderedSet def get_session_file(session): system_data_dirs = GLib.get_system_data_dirs() session_dirs = OrderedSet( os.path.join(data_dir, session) for data_dir in system_data_dirs for session in {"wayland-sessions", "xsessions"} ) session_files = OrderedSet( os.path.join(dir, session + ".desktop") for dir in session_dirs if os.path.exists(os.path.join(dir, session + ".desktop")) ) # Deal with duplicate wayland-sessions and xsessions. # Needed for the situation in gnome-session, where there's # a xsession named the same as a wayland session. if any(map(is_session_wayland, session_files)): session_files = OrderedSet( session for session in session_files if is_session_wayland(session) ) else: session_files = OrderedSet( session for session in session_files if is_session_xsession(session) ) if len(session_files) == 0: logging.warning("No session files are found.") sys.exit(0) else: return session_files[0] def is_session_xsession(session_file): return "/xsessions/" in session_file def is_session_wayland(session_file): return "/wayland-sessions/" in session_file def main(): parser = argparse.ArgumentParser( description="Set session type for all normal users." ) parser.add_argument("session", help="Name of session to set.") args = parser.parse_args() session = getattr(args, "session") session_file = get_session_file(session) user_manager = AccountsService.UserManager.get_default() users = user_manager.list_users() for user in users: if user.is_system_account(): continue else: if is_session_wayland(session_file): logging.debug( f"Setting session name: {session}, as we found the existing wayland-session: {session_file}" ) user.set_session(session) user.set_session_type("wayland") elif is_session_xsession(session_file): logging.debug( f"Setting session name: {session}, as we found the existing xsession: {session_file}" ) user.set_x_session(session) user.set_session(session) user.set_session_type("x11") else: logging.error(f"Couldn't figure out session type for {session_file}") sys.exit(1) if __name__ == "__main__": main()
2,758
29.655556
112
py
nixpkgs
nixpkgs-master/nixos/modules/virtualisation/includes-to-excludes.py
# Convert a list of strings to a regex that matches everything but those strings # ... and it had to be a POSIX regex; no negative lookahead :( # This is a workaround for erofs supporting only exclude regex, not an include list import sys import re from collections import defaultdict # We can configure this script to match in different ways if we need to. # The regex got too long for the argument list, so we had to truncate the # hashes and use MATCH_STRING_PREFIX. That's less accurate, and might pick up some # garbage like .lock files, but only if the sandbox doesn't hide those. Even # then it should be harmless. # Produce the negation of ^a$ MATCH_EXACTLY = ".+" # Produce the negation of ^a MATCH_STRING_PREFIX = "//X" # //X should be epsilon regex instead. Not supported?? # Produce the negation of ^a/? MATCH_SUBPATHS = "[^/].*$" # match_end = MATCH_SUBPATHS match_end = MATCH_STRING_PREFIX # match_end = MATCH_EXACTLY def chars_to_inverted_class(letters): assert len(letters) > 0 letters = list(letters) s = "[^" if "]" in letters: s += "]" letters.remove("]") final = "" if "-" in letters: final = "-" letters.remove("-") s += "".join(letters) s += final s += "]" return s # There's probably at least one bug in here, but it seems to works well enough # for filtering store paths. def strings_to_inverted_regex(strings): s = "(" # Match anything that starts with the wrong character chars = defaultdict(list) for item in strings: if item != "": chars[item[0]].append(item[1:]) if len(chars) == 0: s += match_end else: s += chars_to_inverted_class(chars) # Now match anything that starts with the right char, but then goes wrong for char, sub in chars.items(): s += "|(" + re.escape(char) + strings_to_inverted_regex(sub) + ")" s += ")" return s if __name__ == "__main__": stdin_lines = [] for line in sys.stdin: if line.strip() != "": stdin_lines.append(line.strip()) print("^" + strings_to_inverted_regex(stdin_lines)) # Test: # (echo foo; echo fo/; echo foo/; echo foo/ba/r; echo b; echo az; echo az/; echo az/a; echo ab; echo ab/a; echo ab/; echo abc; echo abcde; echo abb; echo ac; echo b) | grep -vE "$((echo ab; echo az; echo foo;) | python includes-to-excludes.py | tee /dev/stderr )" # should print ab, az, foo and their subpaths
2,465
27.344828
263
py
nixpkgs
nixpkgs-master/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
#! @python3@/bin/python3 -B import argparse import shutil import os import sys import errno import subprocess import glob import tempfile import errno import warnings import ctypes libc = ctypes.CDLL("libc.so.6") import re import datetime import glob import os.path from typing import NamedTuple, List, Optional from packaging import version class SystemIdentifier(NamedTuple): profile: Optional[str] generation: int specialisation: Optional[str] def copy_if_not_exists(source: str, dest: str) -> None: if not os.path.exists(dest): shutil.copyfile(source, dest) def generation_dir(profile: Optional[str], generation: int) -> str: if profile: return "/nix/var/nix/profiles/system-profiles/%s-%d-link" % (profile, generation) else: return "/nix/var/nix/profiles/system-%d-link" % (generation) def system_dir(profile: Optional[str], generation: int, specialisation: Optional[str]) -> str: d = generation_dir(profile, generation) if specialisation: return os.path.join(d, "specialisation", specialisation) else: return d BOOT_ENTRY = """title {title} version Generation {generation} {description} linux {kernel} initrd {initrd} options {kernel_params} """ def generation_conf_filename(profile: Optional[str], generation: int, specialisation: Optional[str]) -> str: pieces = [ "nixos", profile or None, "generation", str(generation), f"specialisation-{specialisation}" if specialisation else None, ] return "-".join(p for p in pieces if p) + ".conf" def write_loader_conf(profile: Optional[str], generation: int, specialisation: Optional[str]) -> None: with open("@efiSysMountPoint@/loader/loader.conf.tmp", 'w') as f: if "@timeout@" != "": f.write("timeout @timeout@\n") f.write("default %s\n" % generation_conf_filename(profile, generation, specialisation)) if not @editor@: f.write("editor 0\n"); f.write("console-mode @consoleMode@\n"); os.rename("@efiSysMountPoint@/loader/loader.conf.tmp", "@efiSysMountPoint@/loader/loader.conf") def profile_path(profile: Optional[str], generation: int, specialisation: Optional[str], name: str) -> str: return os.path.realpath("%s/%s" % (system_dir(profile, generation, specialisation), name)) def copy_from_profile(profile: Optional[str], generation: int, specialisation: Optional[str], name: str, dry_run: bool = False) -> str: store_file_path = profile_path(profile, generation, specialisation, name) suffix = os.path.basename(store_file_path) store_dir = os.path.basename(os.path.dirname(store_file_path)) efi_file_path = "/efi/nixos/%s-%s.efi" % (store_dir, suffix) if not dry_run: copy_if_not_exists(store_file_path, "@efiSysMountPoint@%s" % (efi_file_path)) return efi_file_path def describe_generation(profile: Optional[str], generation: int, specialisation: Optional[str]) -> str: try: with open(profile_path(profile, generation, specialisation, "nixos-version")) as f: nixos_version = f.read() except IOError: nixos_version = "Unknown" kernel_dir = os.path.dirname(profile_path(profile, generation, specialisation, "kernel")) module_dir = glob.glob("%s/lib/modules/*" % kernel_dir)[0] kernel_version = os.path.basename(module_dir) build_time = int(os.path.getctime(system_dir(profile, generation, specialisation))) build_date = datetime.datetime.fromtimestamp(build_time).strftime('%F') description = "@distroName@ {}, Linux Kernel {}, Built on {}".format( nixos_version, kernel_version, build_date ) return description def write_entry(profile: Optional[str], generation: int, specialisation: Optional[str], machine_id: str, current: bool) -> None: kernel = copy_from_profile(profile, generation, specialisation, "kernel") initrd = copy_from_profile(profile, generation, specialisation, "initrd") title = "@distroName@{profile}{specialisation}".format( profile=" [" + profile + "]" if profile else "", specialisation=" (%s)" % specialisation if specialisation else "") try: append_initrd_secrets = profile_path(profile, generation, specialisation, "append-initrd-secrets") subprocess.check_call([append_initrd_secrets, "@efiSysMountPoint@%s" % (initrd)]) except FileNotFoundError: pass except subprocess.CalledProcessError: if current: print("failed to create initrd secrets!", file=sys.stderr) sys.exit(1) else: print("warning: failed to create initrd secrets " f'for "{title} - Configuration {generation}", an older generation', file=sys.stderr) print("note: this is normal after having removed " "or renamed a file in `boot.initrd.secrets`", file=sys.stderr) entry_file = "@efiSysMountPoint@/loader/entries/%s" % ( generation_conf_filename(profile, generation, specialisation)) tmp_path = "%s.tmp" % (entry_file) kernel_params = "init=%s " % profile_path(profile, generation, specialisation, "init") with open(profile_path(profile, generation, specialisation, "kernel-params")) as params_file: kernel_params = kernel_params + params_file.read() with open(tmp_path, 'w') as f: f.write(BOOT_ENTRY.format(title=title, generation=generation, kernel=kernel, initrd=initrd, kernel_params=kernel_params, description=describe_generation(profile, generation, specialisation))) if machine_id is not None: f.write("machine-id %s\n" % machine_id) os.rename(tmp_path, entry_file) def mkdir_p(path: str) -> None: try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST or not os.path.isdir(path): raise def get_generations(profile: Optional[str] = None) -> List[SystemIdentifier]: gen_list = subprocess.check_output([ "@nix@/bin/nix-env", "--list-generations", "-p", "/nix/var/nix/profiles/%s" % ("system-profiles/" + profile if profile else "system"), "--option", "build-users-group", ""], universal_newlines=True) gen_lines = gen_list.split('\n') gen_lines.pop() configurationLimit = @configurationLimit@ configurations = [ SystemIdentifier( profile=profile, generation=int(line.split()[0]), specialisation=None ) for line in gen_lines ] return configurations[-configurationLimit:] def get_specialisations(profile: Optional[str], generation: int, _: Optional[str]) -> List[SystemIdentifier]: specialisations_dir = os.path.join( system_dir(profile, generation, None), "specialisation") if not os.path.exists(specialisations_dir): return [] return [SystemIdentifier(profile, generation, spec) for spec in os.listdir(specialisations_dir)] def remove_old_entries(gens: List[SystemIdentifier]) -> None: rex_profile = re.compile("^@efiSysMountPoint@/loader/entries/nixos-(.*)-generation-.*\.conf$") rex_generation = re.compile("^@efiSysMountPoint@/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$") known_paths = [] for gen in gens: known_paths.append(copy_from_profile(*gen, "kernel", True)) known_paths.append(copy_from_profile(*gen, "initrd", True)) for path in glob.iglob("@efiSysMountPoint@/loader/entries/nixos*-generation-[1-9]*.conf"): if rex_profile.match(path): prof = rex_profile.sub(r"\1", path) else: prof = None try: gen_number = int(rex_generation.sub(r"\1", path)) except ValueError: continue if not (prof, gen_number, None) in gens: os.unlink(path) for path in glob.iglob("@efiSysMountPoint@/efi/nixos/*"): if not path in known_paths and not os.path.isdir(path): os.unlink(path) def get_profiles() -> List[str]: if os.path.isdir("/nix/var/nix/profiles/system-profiles/"): return [x for x in os.listdir("/nix/var/nix/profiles/system-profiles/") if not x.endswith("-link")] else: return [] def main() -> None: parser = argparse.ArgumentParser(description='Update @distroName@-related systemd-boot files') parser.add_argument('default_config', metavar='DEFAULT-CONFIG', help='The default @distroName@ config to boot') args = parser.parse_args() try: with open("/etc/machine-id") as machine_file: machine_id = machine_file.readlines()[0] except IOError as e: if e.errno != errno.ENOENT: raise # Since systemd version 232 a machine ID is required and it might not # be there on newly installed systems, so let's generate one so that # bootctl can find it and we can also pass it to write_entry() later. cmd = ["@systemd@/bin/systemd-machine-id-setup", "--print"] machine_id = subprocess.run( cmd, text=True, check=True, stdout=subprocess.PIPE ).stdout.rstrip() if os.getenv("NIXOS_INSTALL_GRUB") == "1": warnings.warn("NIXOS_INSTALL_GRUB env var deprecated, use NIXOS_INSTALL_BOOTLOADER", DeprecationWarning) os.environ["NIXOS_INSTALL_BOOTLOADER"] = "1" # flags to pass to bootctl install/update bootctl_flags = [] if "@canTouchEfiVariables@" != "1": bootctl_flags.append("--no-variables") if "@graceful@" == "1": bootctl_flags.append("--graceful") if os.getenv("NIXOS_INSTALL_BOOTLOADER") == "1": # bootctl uses fopen() with modes "wxe" and fails if the file exists. if os.path.exists("@efiSysMountPoint@/loader/loader.conf"): os.unlink("@efiSysMountPoint@/loader/loader.conf") subprocess.check_call(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@"] + bootctl_flags + ["install"]) else: # Update bootloader to latest if needed available_out = subprocess.check_output(["@systemd@/bin/bootctl", "--version"], universal_newlines=True).split()[2] installed_out = subprocess.check_output(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@", "status"], universal_newlines=True) # See status_binaries() in systemd bootctl.c for code which generates this installed_match = re.search(r"^\W+File:.*/EFI/(?:BOOT|systemd)/.*\.efi \(systemd-boot ([\d.]+[^)]*)\)$", installed_out, re.IGNORECASE | re.MULTILINE) available_match = re.search(r"^\((.*)\)$", available_out) if installed_match is None: raise Exception("could not find any previously installed systemd-boot") if available_match is None: raise Exception("could not determine systemd-boot version") installed_version = version.parse(installed_match.group(1)) available_version = version.parse(available_match.group(1)) # systemd 252 has a regression that leaves some machines unbootable, so we skip that update. # The fix is in 252.2 # See https://github.com/systemd/systemd/issues/25363 and https://github.com/NixOS/nixpkgs/pull/201558#issuecomment-1348603263 if installed_version < available_version: if version.parse('252') <= available_version < version.parse('252.2'): print("skipping systemd-boot update to %s because of known regression" % available_version) else: print("updating systemd-boot from %s to %s" % (installed_version, available_version)) subprocess.check_call(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@"] + bootctl_flags + ["update"]) mkdir_p("@efiSysMountPoint@/efi/nixos") mkdir_p("@efiSysMountPoint@/loader/entries") gens = get_generations() for profile in get_profiles(): gens += get_generations(profile) remove_old_entries(gens) for gen in gens: try: is_default = os.path.dirname(profile_path(*gen, "init")) == args.default_config write_entry(*gen, machine_id, current=is_default) for specialisation in get_specialisations(*gen): write_entry(*specialisation, machine_id, current=is_default) if is_default: write_loader_conf(*gen) except OSError as e: # See https://github.com/NixOS/nixpkgs/issues/114552 if e.errno == errno.EINVAL: profile = f"profile '{gen.profile}'" if gen.profile else "default profile" print("ignoring {} in the list of boot entries because of the following error:\n{}".format(profile, e), file=sys.stderr) else: raise e for root, _, files in os.walk('@efiSysMountPoint@/efi/nixos/.extra-files', topdown=False): relative_root = root.removeprefix("@efiSysMountPoint@/efi/nixos/.extra-files").removeprefix("/") actual_root = os.path.join("@efiSysMountPoint@", relative_root) for file in files: actual_file = os.path.join(actual_root, file) if os.path.exists(actual_file): os.unlink(actual_file) os.unlink(os.path.join(root, file)) if not len(os.listdir(actual_root)): os.rmdir(actual_root) os.rmdir(root) mkdir_p("@efiSysMountPoint@/efi/nixos/.extra-files") subprocess.check_call("@copyExtraFiles@") # Since fat32 provides little recovery facilities after a crash, # it can leave the system in an unbootable state, when a crash/outage # happens shortly after an update. To decrease the likelihood of this # event sync the efi filesystem after each update. rc = libc.syncfs(os.open("@efiSysMountPoint@", os.O_RDONLY)) if rc != 0: print("could not sync @efiSysMountPoint@: {}".format(os.strerror(rc)), file=sys.stderr) if __name__ == '__main__': main()
14,083
40.181287
142
py
nixpkgs
nixpkgs-master/nixos/tests/spark/spark_sample.py
from pyspark.sql import Row, SparkSession from pyspark.sql import functions as F from pyspark.sql.functions import udf from pyspark.sql.types import * from pyspark.sql.functions import explode def explode_col(weight): return int(weight//10) * [10.0] + ([] if weight%10==0 else [weight%10]) spark = SparkSession.builder.getOrCreate() dataSchema = [ StructField("feature_1", FloatType()), StructField("feature_2", FloatType()), StructField("bias_weight", FloatType()) ] data = [ Row(0.1, 0.2, 10.32), Row(0.32, 1.43, 12.8), Row(1.28, 1.12, 0.23) ] df = spark.createDataFrame(spark.sparkContext.parallelize(data), StructType(dataSchema)) normalizing_constant = 100 sum_bias_weight = df.select(F.sum('bias_weight')).collect()[0][0] normalizing_factor = normalizing_constant / sum_bias_weight df = df.withColumn('normalized_bias_weight', df.bias_weight * normalizing_factor) df = df.drop('bias_weight') df = df.withColumnRenamed('normalized_bias_weight', 'bias_weight') my_udf = udf(lambda x: explode_col(x), ArrayType(FloatType())) df1 = df.withColumn('explode_val', my_udf(df.bias_weight)) df1 = df1.withColumn("explode_val_1", explode(df1.explode_val)).drop("explode_val") df1 = df1.drop('bias_weight').withColumnRenamed('explode_val_1', 'bias_weight') df1.show() assert(df1.count() == 12)
1,325
31.341463
88
py
nixpkgs
nixpkgs-master/nixos/tests/google-oslogin/server.py
#!/usr/bin/env python3 import json import sys import time import os import hashlib import base64 from http.server import BaseHTTPRequestHandler, HTTPServer from urllib.parse import urlparse, parse_qs from typing import Dict SNAKEOIL_PUBLIC_KEY = os.environ['SNAKEOIL_PUBLIC_KEY'] MOCKUSER="mockuser_nixos_org" MOCKADMIN="mockadmin_nixos_org" def w(msg: bytes): sys.stderr.write(f"{msg}\n") sys.stderr.flush() def gen_fingerprint(pubkey: str): decoded_key = base64.b64decode(pubkey.encode("ascii").split()[1]) return hashlib.sha256(decoded_key).hexdigest() def gen_email(username: str): """username seems to be a 21 characters long number string, so mimic that in a reproducible way""" return str(int(hashlib.sha256(username.encode()).hexdigest(), 16))[0:21] def gen_mockuser(username: str, uid: str, gid: str, home_directory: str, snakeoil_pubkey: str) -> Dict: snakeoil_pubkey_fingerprint = gen_fingerprint(snakeoil_pubkey) # seems to be a 21 characters long numberstring, so mimic that in a reproducible way email = gen_email(username) return { "loginProfiles": [ { "name": email, "posixAccounts": [ { "primary": True, "username": username, "uid": uid, "gid": gid, "homeDirectory": home_directory, "operatingSystemType": "LINUX" } ], "sshPublicKeys": { snakeoil_pubkey_fingerprint: { "key": snakeoil_pubkey, "expirationTimeUsec": str((time.time() + 600) * 1000000), # 10 minutes in the future "fingerprint": snakeoil_pubkey_fingerprint } } } ] } class ReqHandler(BaseHTTPRequestHandler): def _send_json_ok(self, data: dict): self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() out = json.dumps(data).encode() w(out) self.wfile.write(out) def _send_json_success(self, success=True): self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() out = json.dumps({"success": success}).encode() w(out) self.wfile.write(out) def _send_404(self): self.send_response(404) self.end_headers() def do_GET(self): p = str(self.path) pu = urlparse(p) params = parse_qs(pu.query) # users endpoint if pu.path == "/computeMetadata/v1/oslogin/users": # mockuser and mockadmin are allowed to login, both use the same snakeoil public key if params.get('username') == [MOCKUSER] or params.get('uid') == ["1009719690"]: username = MOCKUSER uid = "1009719690" elif params.get('username') == [MOCKADMIN] or params.get('uid') == ["1009719691"]: username = MOCKADMIN uid = "1009719691" else: self._send_404() return self._send_json_ok(gen_mockuser(username=username, uid=uid, gid=uid, home_directory=f"/home/{username}", snakeoil_pubkey=SNAKEOIL_PUBLIC_KEY)) return # we need to provide something at the groups endpoint. # the nss module does segfault if we don't. elif pu.path == "/computeMetadata/v1/oslogin/groups": self._send_json_ok({ "posixGroups": [ {"name" : "demo", "gid" : 4294967295} ], }) return # authorize endpoint elif pu.path == "/computeMetadata/v1/oslogin/authorize": # is user allowed to login? if params.get("policy") == ["login"]: # mockuser and mockadmin are allowed to login if params.get('email') == [gen_email(MOCKUSER)] or params.get('email') == [gen_email(MOCKADMIN)]: self._send_json_success() return self._send_json_success(False) return # is user allowed to become root? elif params.get("policy") == ["adminLogin"]: # only mockadmin is allowed to become admin self._send_json_success((params['email'] == [gen_email(MOCKADMIN)])) return # send 404 for other policies else: self._send_404() return else: sys.stderr.write(f"Unhandled path: {p}\n") sys.stderr.flush() self.send_response(404) self.end_headers() self.wfile.write(b'') if __name__ == '__main__': s = HTTPServer(('0.0.0.0', 80), ReqHandler) s.serve_forever()
4,983
33.136986
154
py
nixpkgs
nixpkgs-master/nixos/tests/pam/test_chfn.py
expected_lines = { "account required pam_unix.so", "account sufficient @@pam_krb5@@/lib/security/pam_krb5.so", "auth [default=die success=done] @@pam_ccreds@@/lib/security/pam_ccreds.so action=validate use_first_pass", "auth [default=ignore success=1 service_err=reset] @@pam_krb5@@/lib/security/pam_krb5.so use_first_pass", "auth required pam_deny.so", "auth sufficient @@pam_ccreds@@/lib/security/pam_ccreds.so action=store use_first_pass", "auth sufficient pam_rootok.so", "auth sufficient pam_unix.so likeauth try_first_pass", "password sufficient @@pam_krb5@@/lib/security/pam_krb5.so use_first_pass", "password sufficient pam_unix.so nullok yescrypt", "session optional @@pam_krb5@@/lib/security/pam_krb5.so", "session required pam_env.so conffile=/etc/pam/environment readenv=0", "session required pam_unix.so", } actual_lines = set(machine.succeed("cat /etc/pam.d/chfn").splitlines()) missing_lines = expected_lines - actual_lines extra_lines = actual_lines - expected_lines non_functional_lines = set([line for line in extra_lines if (line == "" or line.startswith("#"))]) unexpected_functional_lines = extra_lines - non_functional_lines with subtest("All expected lines are in the file"): assert not missing_lines, f"Missing lines: {missing_lines}" with subtest("All remaining lines are empty or comments"): assert not unexpected_functional_lines, f"Unexpected lines: {unexpected_functional_lines}"
1,474
51.678571
111
py
nixpkgs
nixpkgs-master/nixos/lib/test-script-prepend.py
# This file contains type hints that can be prepended to Nix test scripts so they can be type # checked. from test_driver.driver import Driver from test_driver.vlan import VLan from test_driver.machine import Machine from test_driver.logger import Logger from typing import Callable, Iterator, ContextManager, Optional, List, Dict, Any, Union from typing_extensions import Protocol from pathlib import Path class RetryProtocol(Protocol): def __call__(self, fn: Callable, timeout: int = 900) -> None: raise Exception("This is just type information for the Nix test driver") class PollingConditionProtocol(Protocol): def __call__( self, fun_: Optional[Callable] = None, *, seconds_interval: float = 2.0, description: Optional[str] = None, ) -> Union[Callable[[Callable], ContextManager], ContextManager]: raise Exception("This is just type information for the Nix test driver") start_all: Callable[[], None] subtest: Callable[[str], ContextManager[None]] retry: RetryProtocol test_script: Callable[[], None] machines: List[Machine] vlans: List[VLan] driver: Driver log: Logger create_machine: Callable[[Dict[str, Any]], Machine] run_tests: Callable[[], None] join_all: Callable[[], None] serial_stdout_off: Callable[[], None] serial_stdout_on: Callable[[], None] polling_condition: PollingConditionProtocol
1,380
31.116279
93
py
nixpkgs
nixpkgs-master/nixos/lib/make-options-doc/mergeJSON.py
import collections import json import os import sys from typing import Any, Dict, List JSON = Dict[str, Any] class Key: def __init__(self, path: List[str]): self.path = path def __hash__(self): result = 0 for id in self.path: result ^= hash(id) return result def __eq__(self, other): return type(self) is type(other) and self.path == other.path Option = collections.namedtuple('Option', ['name', 'value']) # pivot a dict of options keyed by their display name to a dict keyed by their path def pivot(options: Dict[str, JSON]) -> Dict[Key, Option]: result: Dict[Key, Option] = dict() for (name, opt) in options.items(): result[Key(opt['loc'])] = Option(name, opt) return result # pivot back to indexed-by-full-name # like the docbook build we'll just fail if multiple options with differing locs # render to the same option name. def unpivot(options: Dict[Key, Option]) -> Dict[str, JSON]: result: Dict[str, Dict] = dict() for (key, opt) in options.items(): if opt.name in result: raise RuntimeError( 'multiple options with colliding ids found', opt.name, result[opt.name]['loc'], opt.value['loc'], ) result[opt.name] = opt.value return result warningsAreErrors = False optOffset = 0 for arg in sys.argv[1:]: if arg == "--warnings-are-errors": optOffset += 1 warningsAreErrors = True options = pivot(json.load(open(sys.argv[1 + optOffset], 'r'))) overrides = pivot(json.load(open(sys.argv[2 + optOffset], 'r'))) # fix up declaration paths in lazy options, since we don't eval them from a full nixpkgs dir for (k, v) in options.items(): # The _module options are not declared in nixos/modules if v.value['loc'][0] != "_module": v.value['declarations'] = list(map(lambda s: f'nixos/modules/{s}' if isinstance(s, str) else s, v.value['declarations'])) # merge both descriptions for (k, v) in overrides.items(): cur = options.setdefault(k, v).value for (ok, ov) in v.value.items(): if ok == 'declarations': decls = cur[ok] for d in ov: if d not in decls: decls += [d] elif ok == "type": # ignore types of placeholder options if ov != "_unspecified" or cur[ok] == "_unspecified": cur[ok] = ov elif ov is not None or cur.get(ok, None) is None: cur[ok] = ov severity = "error" if warningsAreErrors else "warning" # check that every option has a description hasWarnings = False hasErrors = False for (k, v) in options.items(): if v.value.get('description', None) is None: hasWarnings = True print(f"\x1b[1;31m{severity}: option {v.name} has no description\x1b[0m", file=sys.stderr) v.value['description'] = "This option has no description." if v.value.get('type', "unspecified") == "unspecified": hasWarnings = True print( f"\x1b[1;31m{severity}: option {v.name} has no type. Please specify a valid type, see " + "https://nixos.org/manual/nixos/stable/index.html#sec-option-types\x1b[0m", file=sys.stderr) if hasErrors: sys.exit(1) if hasWarnings and warningsAreErrors: print( "\x1b[1;31m" + "Treating warnings as errors. Set documentation.nixos.options.warningsAreErrors " + "to false to ignore these warnings." + "\x1b[0m", file=sys.stderr) sys.exit(1) json.dump(unpivot(options), fp=sys.stdout)
3,606
33.352381
129
py
nixpkgs
nixpkgs-master/nixos/lib/test-driver/setup.py
from setuptools import setup, find_packages setup( name="nixos-test-driver", version='1.1', packages=find_packages(), package_data={"test_driver": ["py.typed"]}, entry_points={ "console_scripts": [ "nixos-test-driver=test_driver:main", "generate-driver-symbols=test_driver:generate_driver_symbols" ] }, )
338
21.6
67
py
nixpkgs
nixpkgs-master/nixos/lib/test-driver/test_driver/machine.py
from contextlib import _GeneratorContextManager, nullcontext from pathlib import Path from queue import Queue from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import base64 import io import os import queue import re import select import shlex import shutil import socket import subprocess import sys import tempfile import threading import time from test_driver.logger import rootlog CHAR_TO_KEY = { "A": "shift-a", "N": "shift-n", "-": "0x0C", "_": "shift-0x0C", "B": "shift-b", "O": "shift-o", "=": "0x0D", "+": "shift-0x0D", "C": "shift-c", "P": "shift-p", "[": "0x1A", "{": "shift-0x1A", "D": "shift-d", "Q": "shift-q", "]": "0x1B", "}": "shift-0x1B", "E": "shift-e", "R": "shift-r", ";": "0x27", ":": "shift-0x27", "F": "shift-f", "S": "shift-s", "'": "0x28", '"': "shift-0x28", "G": "shift-g", "T": "shift-t", "`": "0x29", "~": "shift-0x29", "H": "shift-h", "U": "shift-u", "\\": "0x2B", "|": "shift-0x2B", "I": "shift-i", "V": "shift-v", ",": "0x33", "<": "shift-0x33", "J": "shift-j", "W": "shift-w", ".": "0x34", ">": "shift-0x34", "K": "shift-k", "X": "shift-x", "/": "0x35", "?": "shift-0x35", "L": "shift-l", "Y": "shift-y", " ": "spc", "M": "shift-m", "Z": "shift-z", "\n": "ret", "!": "shift-0x02", "@": "shift-0x03", "#": "shift-0x04", "$": "shift-0x05", "%": "shift-0x06", "^": "shift-0x07", "&": "shift-0x08", "*": "shift-0x09", "(": "shift-0x0A", ")": "shift-0x0B", } def make_command(args: list) -> str: return " ".join(map(shlex.quote, (map(str, args)))) def _perform_ocr_on_screenshot( screenshot_path: str, model_ids: Iterable[int] ) -> List[str]: if shutil.which("tesseract") is None: raise Exception("OCR requested but enableOCR is false") magick_args = ( "-filter Catrom -density 72 -resample 300 " + "-contrast -normalize -despeckle -type grayscale " + "-sharpen 1 -posterize 3 -negate -gamma 100 " + "-blur 1x65535" ) tess_args = "-c debug_file=/dev/null --psm 11" cmd = f"convert {magick_args} '{screenshot_path}' 'tiff:{screenshot_path}.tiff'" ret = subprocess.run(cmd, shell=True, capture_output=True) if ret.returncode != 0: raise Exception(f"TIFF conversion failed with exit code {ret.returncode}") model_results = [] for model_id in model_ids: cmd = f"tesseract '{screenshot_path}.tiff' - {tess_args} --oem '{model_id}'" ret = subprocess.run(cmd, shell=True, capture_output=True) if ret.returncode != 0: raise Exception(f"OCR failed with exit code {ret.returncode}") model_results.append(ret.stdout.decode("utf-8")) return model_results def retry(fn: Callable, timeout: int = 900) -> None: """Call the given function repeatedly, with 1 second intervals, until it returns True or a timeout is reached. """ for _ in range(timeout): if fn(False): return time.sleep(1) if not fn(True): raise Exception(f"action timed out after {timeout} seconds") class StartCommand: """The Base Start Command knows how to append the necessary runtime qemu options as determined by a particular test driver run. Any such start command is expected to happily receive and append additional qemu args. """ _cmd: str def cmd( self, monitor_socket_path: Path, shell_socket_path: Path, allow_reboot: bool = False, ) -> str: display_opts = "" display_available = any(x in os.environ for x in ["DISPLAY", "WAYLAND_DISPLAY"]) if not display_available: display_opts += " -nographic" # qemu options qemu_opts = ( " -device virtio-serial" # Note: virtconsole will map to /dev/hvc0 in Linux guests " -device virtconsole,chardev=shell" " -device virtio-rng-pci" " -serial stdio" ) if not allow_reboot: qemu_opts += " -no-reboot" # TODO: qemu script already catpures this env variable, legacy? qemu_opts += " " + os.environ.get("QEMU_OPTS", "") return ( f"{self._cmd}" f" -monitor unix:{monitor_socket_path}" f" -chardev socket,id=shell,path={shell_socket_path}" f"{qemu_opts}" f"{display_opts}" ) @staticmethod def build_environment( state_dir: Path, shared_dir: Path, ) -> dict: # We make a copy to not update the current environment env = dict(os.environ) env.update( { "TMPDIR": str(state_dir), "SHARED_DIR": str(shared_dir), "USE_TMPDIR": "1", } ) return env def run( self, state_dir: Path, shared_dir: Path, monitor_socket_path: Path, shell_socket_path: Path, allow_reboot: bool, ) -> subprocess.Popen: return subprocess.Popen( self.cmd(monitor_socket_path, shell_socket_path, allow_reboot), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=state_dir, env=self.build_environment(state_dir, shared_dir), ) class NixStartScript(StartCommand): """A start script from nixos/modules/virtualiation/qemu-vm.nix that also satisfies the requirement of the BaseStartCommand. These Nix commands have the particular characteristic that the machine name can be extracted out of them via a regex match. (Admittedly a _very_ implicit contract, evtl. TODO fix) """ def __init__(self, script: str): self._cmd = script @property def machine_name(self) -> str: match = re.search("run-(.+)-vm$", self._cmd) name = "machine" if match: name = match.group(1) return name class LegacyStartCommand(StartCommand): """Used in some places to create an ad-hoc machine instead of using nix test instrumentation + module system for that purpose. Legacy. """ def __init__( self, netBackendArgs: Optional[str] = None, netFrontendArgs: Optional[str] = None, hda: Optional[Tuple[Path, str]] = None, cdrom: Optional[str] = None, usb: Optional[str] = None, bios: Optional[str] = None, qemuBinary: Optional[str] = None, qemuFlags: Optional[str] = None, ): if qemuBinary is not None: self._cmd = qemuBinary else: self._cmd = "qemu-kvm" self._cmd += " -m 384" # networking net_backend = "-netdev user,id=net0" net_frontend = "-device virtio-net-pci,netdev=net0" if netBackendArgs is not None: net_backend += "," + netBackendArgs if netFrontendArgs is not None: net_frontend += "," + netFrontendArgs self._cmd += f" {net_backend} {net_frontend}" # hda hda_cmd = "" if hda is not None: hda_path = hda[0].resolve() hda_interface = hda[1] if hda_interface == "scsi": hda_cmd += ( f" -drive id=hda,file={hda_path},werror=report,if=none" " -device scsi-hd,drive=hda" ) else: hda_cmd += f" -drive file={hda_path},if={hda_interface},werror=report" self._cmd += hda_cmd # cdrom if cdrom is not None: self._cmd += f" -cdrom {cdrom}" # usb usb_cmd = "" if usb is not None: # https://github.com/qemu/qemu/blob/master/docs/usb2.txt usb_cmd += ( " -device usb-ehci" f" -drive id=usbdisk,file={usb},if=none,readonly" " -device usb-storage,drive=usbdisk " ) self._cmd += usb_cmd # bios if bios is not None: self._cmd += f" -bios {bios}" # qemu flags if qemuFlags is not None: self._cmd += f" {qemuFlags}" class Machine: """A handle to the machine with this name, that also knows how to manage the machine lifecycle with the help of a start script / command.""" name: str out_dir: Path tmp_dir: Path shared_dir: Path state_dir: Path monitor_path: Path shell_path: Path start_command: StartCommand keep_vm_state: bool process: Optional[subprocess.Popen] pid: Optional[int] monitor: Optional[socket.socket] shell: Optional[socket.socket] serial_thread: Optional[threading.Thread] booted: bool connected: bool # Store last serial console lines for use # of wait_for_console_text last_lines: Queue = Queue() callbacks: List[Callable] def __repr__(self) -> str: return f"<Machine '{self.name}'>" def __init__( self, out_dir: Path, tmp_dir: Path, start_command: StartCommand, name: str = "machine", keep_vm_state: bool = False, callbacks: Optional[List[Callable]] = None, ) -> None: self.out_dir = out_dir self.tmp_dir = tmp_dir self.keep_vm_state = keep_vm_state self.name = name self.start_command = start_command self.callbacks = callbacks if callbacks is not None else [] # set up directories self.shared_dir = self.tmp_dir / "shared-xchg" self.shared_dir.mkdir(mode=0o700, exist_ok=True) self.state_dir = self.tmp_dir / f"vm-state-{self.name}" self.monitor_path = self.state_dir / "monitor" self.shell_path = self.state_dir / "shell" if (not self.keep_vm_state) and self.state_dir.exists(): self.cleanup_statedir() self.state_dir.mkdir(mode=0o700, exist_ok=True) self.process = None self.pid = None self.monitor = None self.shell = None self.serial_thread = None self.booted = False self.connected = False @staticmethod def create_startcommand(args: Dict[str, str]) -> StartCommand: rootlog.warning( "Using legacy create_startcommand(), " "please use proper nix test vm instrumentation, instead " "to generate the appropriate nixos test vm qemu startup script" ) hda = None if args.get("hda"): hda_arg: str = args.get("hda", "") hda_arg_path: Path = Path(hda_arg) hda = (hda_arg_path, args.get("hdaInterface", "")) return LegacyStartCommand( netBackendArgs=args.get("netBackendArgs"), netFrontendArgs=args.get("netFrontendArgs"), hda=hda, cdrom=args.get("cdrom"), usb=args.get("usb"), bios=args.get("bios"), qemuBinary=args.get("qemuBinary"), qemuFlags=args.get("qemuFlags"), ) def is_up(self) -> bool: return self.booted and self.connected def log(self, msg: str) -> None: rootlog.log(msg, {"machine": self.name}) def log_serial(self, msg: str) -> None: rootlog.log_serial(msg, self.name) def nested(self, msg: str, attrs: Dict[str, str] = {}) -> _GeneratorContextManager: my_attrs = {"machine": self.name} my_attrs.update(attrs) return rootlog.nested(msg, my_attrs) def wait_for_monitor_prompt(self) -> str: assert self.monitor is not None answer = "" while True: undecoded_answer = self.monitor.recv(1024) if not undecoded_answer: break answer += undecoded_answer.decode() if answer.endswith("(qemu) "): break return answer def send_monitor_command(self, command: str) -> str: self.run_callbacks() message = f"{command}\n".encode() assert self.monitor is not None self.monitor.send(message) return self.wait_for_monitor_prompt() def wait_for_unit( self, unit: str, user: Optional[str] = None, timeout: int = 900 ) -> None: """Wait for a systemd unit to get into "active" state. Throws exceptions on "failed" and "inactive" states as well as after timing out. """ def check_active(_: Any) -> bool: info = self.get_unit_info(unit, user) state = info["ActiveState"] if state == "failed": raise Exception(f'unit "{unit}" reached state "{state}"') if state == "inactive": status, jobs = self.systemctl("list-jobs --full 2>&1", user) if "No jobs" in jobs: info = self.get_unit_info(unit, user) if info["ActiveState"] == state: raise Exception( f'unit "{unit}" is inactive and there are no pending jobs' ) return state == "active" with self.nested( f"waiting for unit {unit}" + (f" with user {user}" if user is not None else "") ): retry(check_active, timeout) def get_unit_info(self, unit: str, user: Optional[str] = None) -> Dict[str, str]: status, lines = self.systemctl(f'--no-pager show "{unit}"', user) if status != 0: raise Exception( f'retrieving systemctl info for unit "{unit}"' + ("" if user is None else f' under user "{user}"') + f" failed with exit code {status}" ) line_pattern = re.compile(r"^([^=]+)=(.*)$") def tuple_from_line(line: str) -> Tuple[str, str]: match = line_pattern.match(line) assert match is not None return match[1], match[2] return dict( tuple_from_line(line) for line in lines.split("\n") if line_pattern.match(line) ) def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]: if user is not None: q = q.replace("'", "\\'") return self.execute( f"su -l {user} --shell /bin/sh -c " "$'XDG_RUNTIME_DIR=/run/user/`id -u` " f"systemctl --user {q}'" ) return self.execute(f"systemctl {q}") def require_unit_state(self, unit: str, require_state: str = "active") -> None: with self.nested( f"checking if unit '{unit}' has reached state '{require_state}'" ): info = self.get_unit_info(unit) state = info["ActiveState"] if state != require_state: raise Exception( f"Expected unit '{unit}' to to be in state " f"'{require_state}' but it is in state '{state}'" ) def _next_newline_closed_block_from_shell(self) -> str: assert self.shell output_buffer = [] while True: # This receives up to 4096 bytes from the socket chunk = self.shell.recv(4096) if not chunk: # Probably a broken pipe, return the output we have break decoded = chunk.decode() output_buffer += [decoded] if decoded[-1] == "\n": break return "".join(output_buffer) def execute( self, command: str, check_return: bool = True, check_output: bool = True, timeout: Optional[int] = 900, ) -> Tuple[int, str]: self.run_callbacks() self.connect() # Always run command with shell opts command = f"set -euo pipefail; {command}" timeout_str = "" if timeout is not None: timeout_str = f"timeout {timeout}" # While sh is bash on NixOS, this is not the case for every distro. # We explicitly call bash here to allow for the driver to boot other distros as well. out_command = ( f"{timeout_str} bash -c {shlex.quote(command)} | (base64 --wrap 0; echo)\n" ) assert self.shell self.shell.send(out_command.encode()) if not check_output: return (-2, "") # Get the output output = base64.b64decode(self._next_newline_closed_block_from_shell()) if not check_return: return (-1, output.decode()) # Get the return code self.shell.send("echo ${PIPESTATUS[0]}\n".encode()) rc = int(self._next_newline_closed_block_from_shell().strip()) return (rc, output.decode(errors="replace")) def shell_interact(self, address: Optional[str] = None) -> None: """Allows you to interact with the guest shell for debugging purposes. @address string passed to socat that will be connected to the guest shell. Check the `Running Tests interactivly` chapter of NixOS manual for an example. """ self.connect() if address is None: address = "READLINE,prompt=$ " self.log("Terminal is ready (there is no initial prompt):") assert self.shell try: subprocess.run( ["socat", address, f"FD:{self.shell.fileno()}"], pass_fds=[self.shell.fileno()], ) # allow users to cancel this command without breaking the test except KeyboardInterrupt: pass def console_interact(self) -> None: """Allows you to interact with QEMU's stdin The shell can be exited with Ctrl+D. Note that Ctrl+C is not allowed to be used. QEMU's stdout is read line-wise. Should only be used during test development, not in the production test.""" self.log("Terminal is ready (there is no prompt):") assert self.process assert self.process.stdin while True: try: char = sys.stdin.buffer.read(1) except KeyboardInterrupt: break if char == b"": # ctrl+d self.log("Closing connection to the console") break self.send_console(char.decode()) def succeed(self, *commands: str, timeout: Optional[int] = None) -> str: """Execute each command and check that it succeeds.""" output = "" for command in commands: with self.nested(f"must succeed: {command}"): (status, out) = self.execute(command, timeout=timeout) if status != 0: self.log(f"output: {out}") raise Exception(f"command `{command}` failed (exit code {status})") output += out return output def fail(self, *commands: str, timeout: Optional[int] = None) -> str: """Execute each command and check that it fails.""" output = "" for command in commands: with self.nested(f"must fail: {command}"): (status, out) = self.execute(command, timeout=timeout) if status == 0: raise Exception(f"command `{command}` unexpectedly succeeded") output += out return output def wait_until_succeeds(self, command: str, timeout: int = 900) -> str: """Wait until a command returns success and return its output. Throws an exception on timeout. """ output = "" def check_success(_: Any) -> bool: nonlocal output status, output = self.execute(command, timeout=timeout) return status == 0 with self.nested(f"waiting for success: {command}"): retry(check_success, timeout) return output def wait_until_fails(self, command: str, timeout: int = 900) -> str: """Wait until a command returns failure. Throws an exception on timeout. """ output = "" def check_failure(_: Any) -> bool: nonlocal output status, output = self.execute(command, timeout=timeout) return status != 0 with self.nested(f"waiting for failure: {command}"): retry(check_failure, timeout) return output def wait_for_shutdown(self) -> None: if not self.booted: return with self.nested("waiting for the VM to power off"): sys.stdout.flush() assert self.process self.process.wait() self.pid = None self.booted = False self.connected = False def get_tty_text(self, tty: str) -> str: status, output = self.execute( f"fold -w$(stty -F /dev/tty{tty} size | " f"awk '{{print $2}}') /dev/vcs{tty}" ) return output def wait_until_tty_matches(self, tty: str, regexp: str) -> None: """Wait until the visible output on the chosen TTY matches regular expression. Throws an exception on timeout. """ matcher = re.compile(regexp) def tty_matches(last: bool) -> bool: text = self.get_tty_text(tty) if last: self.log( f"Last chance to match /{regexp}/ on TTY{tty}, " f"which currently contains: {text}" ) return len(matcher.findall(text)) > 0 with self.nested(f"waiting for {regexp} to appear on tty {tty}"): retry(tty_matches) def send_chars(self, chars: str, delay: Optional[float] = 0.01) -> None: with self.nested(f"sending keys {repr(chars)}"): for char in chars: self.send_key(char, delay, log=False) def wait_for_file(self, filename: str) -> None: """Waits until the file exists in machine's file system.""" def check_file(_: Any) -> bool: status, _ = self.execute(f"test -e {filename}") return status == 0 with self.nested(f"waiting for file '{filename}'"): retry(check_file) def wait_for_open_port(self, port: int, addr: str = "localhost") -> None: def port_is_open(_: Any) -> bool: status, _ = self.execute(f"nc -z {addr} {port}") return status == 0 with self.nested(f"waiting for TCP port {port} on {addr}"): retry(port_is_open) def wait_for_closed_port(self, port: int, addr: str = "localhost") -> None: def port_is_closed(_: Any) -> bool: status, _ = self.execute(f"nc -z {addr} {port}") return status != 0 with self.nested(f"waiting for TCP port {port} on {addr} to be closed"): retry(port_is_closed) def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]: return self.systemctl(f"start {jobname}", user) def stop_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]: return self.systemctl(f"stop {jobname}", user) def wait_for_job(self, jobname: str) -> None: self.wait_for_unit(jobname) def connect(self) -> None: def shell_ready(timeout_secs: int) -> bool: """We sent some data from the backdoor service running on the guest to indicate that the backdoor shell is ready. As soon as we read some data from the socket here, we assume that our root shell is operational. """ (ready, _, _) = select.select([self.shell], [], [], timeout_secs) return bool(ready) if self.connected: return with self.nested("waiting for the VM to finish booting"): self.start() assert self.shell tic = time.time() # TODO: do we want to bail after a set number of attempts? while not shell_ready(timeout_secs=30): self.log("Guest root shell did not produce any data yet...") while True: chunk = self.shell.recv(1024) self.log(f"Guest shell says: {chunk!r}") # NOTE: for this to work, nothing must be printed after this line! if b"Spawning backdoor root shell..." in chunk: break toc = time.time() self.log("connected to guest root shell") self.log(f"(connecting took {toc - tic:.2f} seconds)") self.connected = True def screenshot(self, filename: str) -> None: if "." not in filename: filename += ".png" if "/" not in filename: filename = os.path.join(self.out_dir, filename) tmp = f"{filename}.ppm" with self.nested( f"making screenshot {filename}", {"image": os.path.basename(filename)}, ): self.send_monitor_command(f"screendump {tmp}") ret = subprocess.run(f"pnmtopng '{tmp}' > '{filename}'", shell=True) os.unlink(tmp) if ret.returncode != 0: raise Exception("Cannot convert screenshot") def copy_from_host_via_shell(self, source: str, target: str) -> None: """Copy a file from the host into the guest by piping it over the shell into the destination file. Works without host-guest shared folder. Prefer copy_from_host for whenever possible. """ with open(source, "rb") as fh: content_b64 = base64.b64encode(fh.read()).decode() self.succeed( f"mkdir -p $(dirname {target})", f"echo -n {content_b64} | base64 -d > {target}", ) def copy_from_host(self, source: str, target: str) -> None: """Copy a file from the host into the guest via the `shared_dir` shared among all the VMs (using a temporary directory). """ host_src = Path(source) vm_target = Path(target) with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td: shared_temp = Path(shared_td) host_intermediate = shared_temp / host_src.name vm_shared_temp = Path("/tmp/shared") / shared_temp.name vm_intermediate = vm_shared_temp / host_src.name self.succeed(make_command(["mkdir", "-p", vm_shared_temp])) if host_src.is_dir(): shutil.copytree(host_src, host_intermediate) else: shutil.copy(host_src, host_intermediate) self.succeed(make_command(["mkdir", "-p", vm_target.parent])) self.succeed(make_command(["cp", "-r", vm_intermediate, vm_target])) def copy_from_vm(self, source: str, target_dir: str = "") -> None: """Copy a file from the VM (specified by an in-VM source path) to a path relative to `$out`. The file is copied via the `shared_dir` shared among all the VMs (using a temporary directory). """ # Compute the source, target, and intermediate shared file names vm_src = Path(source) with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td: shared_temp = Path(shared_td) vm_shared_temp = Path("/tmp/shared") / shared_temp.name vm_intermediate = vm_shared_temp / vm_src.name intermediate = shared_temp / vm_src.name # Copy the file to the shared directory inside VM self.succeed(make_command(["mkdir", "-p", vm_shared_temp])) self.succeed(make_command(["cp", "-r", vm_src, vm_intermediate])) abs_target = self.out_dir / target_dir / vm_src.name abs_target.parent.mkdir(exist_ok=True, parents=True) # Copy the file from the shared directory outside VM if intermediate.is_dir(): shutil.copytree(intermediate, abs_target) else: shutil.copy(intermediate, abs_target) def dump_tty_contents(self, tty: str) -> None: """Debugging: Dump the contents of the TTY<n>""" self.execute(f"fold -w 80 /dev/vcs{tty} | systemd-cat") def _get_screen_text_variants(self, model_ids: Iterable[int]) -> List[str]: with tempfile.TemporaryDirectory() as tmpdir: screenshot_path = os.path.join(tmpdir, "ppm") self.send_monitor_command(f"screendump {screenshot_path}") return _perform_ocr_on_screenshot(screenshot_path, model_ids) def get_screen_text_variants(self) -> List[str]: return self._get_screen_text_variants([0, 1, 2]) def get_screen_text(self) -> str: return self._get_screen_text_variants([2])[0] def wait_for_text(self, regex: str) -> None: def screen_matches(last: bool) -> bool: variants = self.get_screen_text_variants() for text in variants: if re.search(regex, text) is not None: return True if last: self.log(f"Last OCR attempt failed. Text was: {variants}") return False with self.nested(f"waiting for {regex} to appear on screen"): retry(screen_matches) def wait_for_console_text(self, regex: str, timeout: int | None = None) -> None: """ Wait for the provided regex to appear on console. For each reads, If timeout is None, timeout is infinite. `timeout` is in seconds. """ # Buffer the console output, this is needed # to match multiline regexes. console = io.StringIO() def console_matches(_: Any) -> bool: nonlocal console try: # This will return as soon as possible and # sleep 1 second. console.write(self.last_lines.get(block=False)) except queue.Empty: pass console.seek(0) matches = re.search(regex, console.read()) return matches is not None with self.nested(f"waiting for {regex} to appear on console"): if timeout is not None: retry(console_matches, timeout) else: while not console_matches(False): pass def send_key( self, key: str, delay: Optional[float] = 0.01, log: Optional[bool] = True ) -> None: key = CHAR_TO_KEY.get(key, key) context = self.nested(f"sending key {repr(key)}") if log else nullcontext() with context: self.send_monitor_command(f"sendkey {key}") if delay is not None: time.sleep(delay) def send_console(self, chars: str) -> None: assert self.process assert self.process.stdin self.process.stdin.write(chars.encode()) self.process.stdin.flush() def start(self, allow_reboot: bool = False) -> None: if self.booted: return self.log("starting vm") def clear(path: Path) -> Path: if path.exists(): path.unlink() return path def create_socket(path: Path) -> socket.socket: s = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM) s.bind(str(path)) s.listen(1) return s monitor_socket = create_socket(clear(self.monitor_path)) shell_socket = create_socket(clear(self.shell_path)) self.process = self.start_command.run( self.state_dir, self.shared_dir, self.monitor_path, self.shell_path, allow_reboot, ) self.monitor, _ = monitor_socket.accept() self.shell, _ = shell_socket.accept() # Store last serial console lines for use # of wait_for_console_text self.last_lines: Queue = Queue() def process_serial_output() -> None: assert self.process assert self.process.stdout for _line in self.process.stdout: # Ignore undecodable bytes that may occur in boot menus line = _line.decode(errors="ignore").replace("\r", "").rstrip() self.last_lines.put(line) self.log_serial(line) self.serial_thread = threading.Thread(target=process_serial_output) self.serial_thread.start() self.wait_for_monitor_prompt() self.pid = self.process.pid self.booted = True self.log(f"QEMU running (pid {self.pid})") def cleanup_statedir(self) -> None: shutil.rmtree(self.state_dir) rootlog.log(f"deleting VM state directory {self.state_dir}") rootlog.log("if you want to keep the VM state, pass --keep-vm-state") def shutdown(self) -> None: if not self.booted: return assert self.shell self.shell.send("poweroff\n".encode()) self.wait_for_shutdown() def crash(self) -> None: if not self.booted: return self.log("forced crash") self.send_monitor_command("quit") self.wait_for_shutdown() def reboot(self) -> None: """Press Ctrl+Alt+Delete in the guest. Prepares the machine to be reconnected which is useful if the machine was started with `allow_reboot = True` """ self.send_key("ctrl-alt-delete") self.connected = False def wait_for_x(self) -> None: """Wait until it is possible to connect to the X server. Note that testing the existence of /tmp/.X11-unix/X0 is insufficient. """ def check_x(_: Any) -> bool: cmd = ( "journalctl -b SYSLOG_IDENTIFIER=systemd | " + 'grep "Reached target Current graphical"' ) status, _ = self.execute(cmd) if status != 0: return False status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]") return status == 0 with self.nested("waiting for the X11 server"): retry(check_x) def get_window_names(self) -> List[str]: return self.succeed( r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'" ).splitlines() def wait_for_window(self, regexp: str) -> None: pattern = re.compile(regexp) def window_is_visible(last_try: bool) -> bool: names = self.get_window_names() if last_try: self.log( f"Last chance to match {regexp} on the window list," + " which currently contains: " + ", ".join(names) ) return any(pattern.search(name) for name in names) with self.nested("waiting for a window to appear"): retry(window_is_visible) def sleep(self, secs: int) -> None: # We want to sleep in *guest* time, not *host* time. self.succeed(f"sleep {secs}") def forward_port(self, host_port: int = 8080, guest_port: int = 80) -> None: """Forward a TCP port on the host to a TCP port on the guest. Useful during interactive testing. """ self.send_monitor_command(f"hostfwd_add tcp::{host_port}-:{guest_port}") def block(self) -> None: """Make the machine unreachable by shutting down eth1 (the multicast interface used to talk to the other VMs). We keep eth0 up so that the test driver can continue to talk to the machine. """ self.send_monitor_command("set_link virtio-net-pci.1 off") def unblock(self) -> None: """Make the machine reachable.""" self.send_monitor_command("set_link virtio-net-pci.1 on") def release(self) -> None: if self.pid is None: return rootlog.info(f"kill machine (pid {self.pid})") assert self.process assert self.shell assert self.monitor assert self.serial_thread self.process.terminate() self.shell.close() self.monitor.close() self.serial_thread.join() def run_callbacks(self) -> None: for callback in self.callbacks: callback()
36,389
32.725672
93
py
nixpkgs
nixpkgs-master/nixos/lib/test-driver/test_driver/driver.py
from contextlib import contextmanager from pathlib import Path from typing import Any, Dict, Iterator, List, Union, Optional, Callable, ContextManager import os import re import tempfile from test_driver.logger import rootlog from test_driver.machine import Machine, NixStartScript, retry from test_driver.vlan import VLan from test_driver.polling_condition import PollingCondition def get_tmp_dir() -> Path: """Returns a temporary directory that is defined by TMPDIR, TEMP, TMP or CWD Raises an exception in case the retrieved temporary directory is not writeable See https://docs.python.org/3/library/tempfile.html#tempfile.gettempdir """ tmp_dir = Path(tempfile.gettempdir()) tmp_dir.mkdir(mode=0o700, exist_ok=True) if not tmp_dir.is_dir(): raise NotADirectoryError( f"The directory defined by TMPDIR, TEMP, TMP or CWD: {tmp_dir} is not a directory" ) if not os.access(tmp_dir, os.W_OK): raise PermissionError( f"The directory defined by TMPDIR, TEMP, TMP, or CWD: {tmp_dir} is not writeable" ) return tmp_dir def pythonize_name(name: str) -> str: return re.sub(r"^[^A-z_]|[^A-z0-9_]", "_", name) class Driver: """A handle to the driver that sets up the environment and runs the tests""" tests: str vlans: List[VLan] machines: List[Machine] polling_conditions: List[PollingCondition] def __init__( self, start_scripts: List[str], vlans: List[int], tests: str, out_dir: Path, keep_vm_state: bool = False, ): self.tests = tests self.out_dir = out_dir tmp_dir = get_tmp_dir() with rootlog.nested("start all VLans"): vlans = list(set(vlans)) self.vlans = [VLan(nr, tmp_dir) for nr in vlans] def cmd(scripts: List[str]) -> Iterator[NixStartScript]: for s in scripts: yield NixStartScript(s) self.polling_conditions = [] self.machines = [ Machine( start_command=cmd, keep_vm_state=keep_vm_state, name=cmd.machine_name, tmp_dir=tmp_dir, callbacks=[self.check_polling_conditions], out_dir=self.out_dir, ) for cmd in cmd(start_scripts) ] def __enter__(self) -> "Driver": return self def __exit__(self, *_: Any) -> None: with rootlog.nested("cleanup"): for machine in self.machines: machine.release() def subtest(self, name: str) -> Iterator[None]: """Group logs under a given test name""" with rootlog.nested("subtest: " + name): try: yield return True except Exception as e: rootlog.error(f'Test "{name}" failed with error: "{e}"') raise e def test_symbols(self) -> Dict[str, Any]: @contextmanager def subtest(name: str) -> Iterator[None]: return self.subtest(name) general_symbols = dict( start_all=self.start_all, test_script=self.test_script, machines=self.machines, vlans=self.vlans, driver=self, log=rootlog, os=os, create_machine=self.create_machine, subtest=subtest, run_tests=self.run_tests, join_all=self.join_all, retry=retry, serial_stdout_off=self.serial_stdout_off, serial_stdout_on=self.serial_stdout_on, polling_condition=self.polling_condition, Machine=Machine, # for typing ) machine_symbols = {pythonize_name(m.name): m for m in self.machines} # If there's exactly one machine, make it available under the name # "machine", even if it's not called that. if len(self.machines) == 1: (machine_symbols["machine"],) = self.machines vlan_symbols = { f"vlan{v.nr}": self.vlans[idx] for idx, v in enumerate(self.vlans) } print( "additionally exposed symbols:\n " + ", ".join(map(lambda m: m.name, self.machines)) + ",\n " + ", ".join(map(lambda v: f"vlan{v.nr}", self.vlans)) + ",\n " + ", ".join(list(general_symbols.keys())) ) return {**general_symbols, **machine_symbols, **vlan_symbols} def test_script(self) -> None: """Run the test script""" with rootlog.nested("run the VM test script"): symbols = self.test_symbols() # call eagerly exec(self.tests, symbols, None) def run_tests(self) -> None: """Run the test script (for non-interactive test runs)""" self.test_script() # TODO: Collect coverage data for machine in self.machines: if machine.is_up(): machine.execute("sync") def start_all(self) -> None: """Start all machines""" with rootlog.nested("start all VMs"): for machine in self.machines: machine.start() def join_all(self) -> None: """Wait for all machines to shut down""" with rootlog.nested("wait for all VMs to finish"): for machine in self.machines: machine.wait_for_shutdown() def create_machine(self, args: Dict[str, Any]) -> Machine: tmp_dir = get_tmp_dir() if args.get("startCommand"): start_command: str = args.get("startCommand", "") cmd = NixStartScript(start_command) name = args.get("name", cmd.machine_name) else: cmd = Machine.create_startcommand(args) # type: ignore name = args.get("name", "machine") return Machine( tmp_dir=tmp_dir, out_dir=self.out_dir, start_command=cmd, name=name, keep_vm_state=args.get("keep_vm_state", False), ) def serial_stdout_on(self) -> None: rootlog._print_serial_logs = True def serial_stdout_off(self) -> None: rootlog._print_serial_logs = False def check_polling_conditions(self) -> None: for condition in self.polling_conditions: condition.maybe_raise() def polling_condition( self, fun_: Optional[Callable] = None, *, seconds_interval: float = 2.0, description: Optional[str] = None, ) -> Union[Callable[[Callable], ContextManager], ContextManager]: driver = self class Poll: def __init__(self, fun: Callable): self.condition = PollingCondition( fun, seconds_interval, description, ) def __enter__(self) -> None: driver.polling_conditions.append(self.condition) def __exit__(self, a, b, c) -> None: # type: ignore res = driver.polling_conditions.pop() assert res is self.condition def wait(self, timeout: int = 900) -> None: def condition(last: bool) -> bool: if last: rootlog.info(f"Last chance for {self.condition.description}") ret = self.condition.check(force=True) if not ret and not last: rootlog.info( f"({self.condition.description} failure not fatal yet)" ) return ret with rootlog.nested(f"waiting for {self.condition.description}"): retry(condition, timeout=timeout) if fun_ is None: return Poll else: return Poll(fun_)
7,887
32.423729
94
py
nixpkgs
nixpkgs-master/nixos/lib/test-driver/test_driver/polling_condition.py
from typing import Callable, Optional from math import isfinite import time from .logger import rootlog class PollingConditionFailed(Exception): pass class PollingCondition: condition: Callable[[], bool] seconds_interval: float description: Optional[str] last_called: float entry_count: int def __init__( self, condition: Callable[[], Optional[bool]], seconds_interval: float = 2.0, description: Optional[str] = None, ): self.condition = condition # type: ignore self.seconds_interval = seconds_interval if description is None: if condition.__doc__: self.description = condition.__doc__ else: self.description = condition.__name__ else: self.description = str(description) self.last_called = float("-inf") self.entry_count = 0 def check(self, force: bool = False) -> bool: if (self.entered or not self.overdue) and not force: return True with self, rootlog.nested(self.nested_message): time_since_last = time.monotonic() - self.last_called last_message = ( f"Time since last: {time_since_last:.2f}s" if isfinite(time_since_last) else "(not called yet)" ) rootlog.info(last_message) try: res = self.condition() # type: ignore except Exception: res = False res = res is None or res rootlog.info(self.status_message(res)) return res def maybe_raise(self) -> None: if not self.check(): raise PollingConditionFailed(self.status_message(False)) def status_message(self, status: bool) -> str: return f"Polling condition {'succeeded' if status else 'failed'}: {self.description}" @property def nested_message(self) -> str: nested_message = ["Checking polling condition"] if self.description is not None: nested_message.append(repr(self.description)) return " ".join(nested_message) @property def overdue(self) -> bool: return self.last_called + self.seconds_interval < time.monotonic() @property def entered(self) -> bool: # entry_count should never dip *below* zero assert self.entry_count >= 0 return self.entry_count > 0 def __enter__(self) -> None: self.entry_count += 1 def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore assert self.entered self.entry_count -= 1 self.last_called = time.monotonic()
2,711
28.16129
93
py
nixpkgs
nixpkgs-master/nixos/lib/test-driver/test_driver/logger.py
from colorama import Style, Fore from contextlib import contextmanager from typing import Any, Dict, Iterator from queue import Queue, Empty from xml.sax.saxutils import XMLGenerator import codecs import os import sys import time import unicodedata class Logger: def __init__(self) -> None: self.logfile = os.environ.get("LOGFILE", "/dev/null") self.logfile_handle = codecs.open(self.logfile, "wb") self.xml = XMLGenerator(self.logfile_handle, encoding="utf-8") self.queue: "Queue[Dict[str, str]]" = Queue() self.xml.startDocument() self.xml.startElement("logfile", attrs={}) self._print_serial_logs = True @staticmethod def _eprint(*args: object, **kwargs: Any) -> None: print(*args, file=sys.stderr, **kwargs) def close(self) -> None: self.xml.endElement("logfile") self.xml.endDocument() self.logfile_handle.close() def sanitise(self, message: str) -> str: return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C") def maybe_prefix(self, message: str, attributes: Dict[str, str]) -> str: if "machine" in attributes: return f"{attributes['machine']}: {message}" return message def log_line(self, message: str, attributes: Dict[str, str]) -> None: self.xml.startElement("line", attributes) self.xml.characters(message) self.xml.endElement("line") def info(self, *args, **kwargs) -> None: # type: ignore self.log(*args, **kwargs) def warning(self, *args, **kwargs) -> None: # type: ignore self.log(*args, **kwargs) def error(self, *args, **kwargs) -> None: # type: ignore self.log(*args, **kwargs) sys.exit(1) def log(self, message: str, attributes: Dict[str, str] = {}) -> None: self._eprint(self.maybe_prefix(message, attributes)) self.drain_log_queue() self.log_line(message, attributes) def log_serial(self, message: str, machine: str) -> None: self.enqueue({"msg": message, "machine": machine, "type": "serial"}) if self._print_serial_logs: self._eprint(Style.DIM + f"{machine} # {message}" + Style.RESET_ALL) def enqueue(self, item: Dict[str, str]) -> None: self.queue.put(item) def drain_log_queue(self) -> None: try: while True: item = self.queue.get_nowait() msg = self.sanitise(item["msg"]) del item["msg"] self.log_line(msg, item) except Empty: pass @contextmanager def nested(self, message: str, attributes: Dict[str, str] = {}) -> Iterator[None]: self._eprint( self.maybe_prefix( Style.BRIGHT + Fore.GREEN + message + Style.RESET_ALL, attributes ) ) self.xml.startElement("nest", attrs={}) self.xml.startElement("head", attributes) self.xml.characters(message) self.xml.endElement("head") tic = time.time() self.drain_log_queue() yield self.drain_log_queue() toc = time.time() self.log(f"(finished: {message}, in {toc - tic:.2f} seconds)") self.xml.endElement("nest") rootlog = Logger()
3,294
30.682692
86
py
nixpkgs
nixpkgs-master/nixos/lib/test-driver/test_driver/__init__.py
from pathlib import Path import argparse import ptpython.repl import os import time from test_driver.logger import rootlog from test_driver.driver import Driver class EnvDefault(argparse.Action): """An argpars Action that takes values from the specified environment variable as the flags default value. """ def __init__(self, envvar, required=False, default=None, nargs=None, **kwargs): # type: ignore if not default and envvar: if envvar in os.environ: if nargs is not None and (nargs.isdigit() or nargs in ["*", "+"]): default = os.environ[envvar].split() else: default = os.environ[envvar] kwargs["help"] = ( kwargs["help"] + f" (default from environment: {default})" ) if required and default: required = False super(EnvDefault, self).__init__( default=default, required=required, nargs=nargs, **kwargs ) def __call__(self, parser, namespace, values, option_string=None): # type: ignore setattr(namespace, self.dest, values) def writeable_dir(arg: str) -> Path: """Raises an ArgumentTypeError if the given argument isn't a writeable directory Note: We want to fail as early as possible if a directory isn't writeable, since an executed nixos-test could fail (very late) because of the test-driver writing in a directory without proper permissions. """ path = Path(arg) if not path.is_dir(): raise argparse.ArgumentTypeError(f"{path} is not a directory") if not os.access(path, os.W_OK): raise argparse.ArgumentTypeError(f"{path} is not a writeable directory") return path def main() -> None: arg_parser = argparse.ArgumentParser(prog="nixos-test-driver") arg_parser.add_argument( "-K", "--keep-vm-state", help="re-use a VM state coming from a previous run", action="store_true", ) arg_parser.add_argument( "-I", "--interactive", help="drop into a python repl and run the tests interactively", action=argparse.BooleanOptionalAction, ) arg_parser.add_argument( "--start-scripts", metavar="START-SCRIPT", action=EnvDefault, envvar="startScripts", nargs="*", help="start scripts for participating virtual machines", ) arg_parser.add_argument( "--vlans", metavar="VLAN", action=EnvDefault, envvar="vlans", nargs="*", help="vlans to span by the driver", ) arg_parser.add_argument( "-o", "--output_directory", help="""The path to the directory where outputs copied from the VM will be placed. By e.g. Machine.copy_from_vm or Machine.screenshot""", default=Path.cwd(), type=writeable_dir, ) arg_parser.add_argument( "testscript", action=EnvDefault, envvar="testScript", help="the test script to run", type=Path, ) args = arg_parser.parse_args() if not args.keep_vm_state: rootlog.info("Machine state will be reset. To keep it, pass --keep-vm-state") with Driver( args.start_scripts, args.vlans, args.testscript.read_text(), args.output_directory.resolve(), args.keep_vm_state, ) as driver: if args.interactive: ptpython.repl.embed(driver.test_symbols(), {}) else: tic = time.time() driver.run_tests() toc = time.time() rootlog.info(f"test script finished in {(toc-tic):.2f}s") def generate_driver_symbols() -> None: """ This generates a file with symbols of the test-driver code that can be used in user's test scripts. That list is then used by pyflakes to lint those scripts. """ d = Driver([], [], "", Path()) test_symbols = d.test_symbols() with open("driver-symbols", "w") as fp: fp.write(",".join(test_symbols.keys()))
4,097
31.267717
99
py
nixpkgs
nixpkgs-master/nixos/lib/test-driver/test_driver/vlan.py
from pathlib import Path import io import os import pty import subprocess from test_driver.logger import rootlog class VLan: """This class handles a VLAN that the run-vm scripts identify via its number handles. The network's lifetime equals the object's lifetime. """ nr: int socket_dir: Path process: subprocess.Popen pid: int fd: io.TextIOBase def __repr__(self) -> str: return f"<Vlan Nr. {self.nr}>" def __init__(self, nr: int, tmp_dir: Path): self.nr = nr self.socket_dir = tmp_dir / f"vde{self.nr}.ctl" # TODO: don't side-effect environment here os.environ[f"QEMU_VDE_SOCKET_{self.nr}"] = str(self.socket_dir) rootlog.info("start vlan") pty_master, pty_slave = pty.openpty() # The --hub is required for the scenario determined by # nixos/tests/networking.nix vlan-ping. # VLAN Tagged traffic (802.1Q) seams to be blocked if a vde_switch is # used without the hub mode (flood packets to all ports). self.process = subprocess.Popen( ["vde_switch", "-s", self.socket_dir, "--dirmode", "0700", "--hub"], stdin=pty_slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, ) self.pid = self.process.pid self.fd = os.fdopen(pty_master, "w") self.fd.write("version\n") # TODO: perl version checks if this can be read from # an if not, dies. we could hang here forever. Fix it. assert self.process.stdout is not None self.process.stdout.readline() if not (self.socket_dir / "ctl").exists(): rootlog.error("cannot start vde_switch") rootlog.info(f"running vlan (pid {self.pid}; ctl {self.socket_dir})") def __del__(self) -> None: rootlog.info(f"kill vlan (pid {self.pid})") self.fd.close() self.process.terminate()
1,951
29.984127
80
py
DeeBERT
DeeBERT-master/setup.py
""" Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py To create the package for pypi. 1. Change the version in __init__.py, setup.py as well as docs/source/conf.py. 2. Commit these changes with the message: "Release: VERSION" 3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' " Push the tag to git: git push --tags origin master 4. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). For the wheel, run: "python setup.py bdist_wheel" in the top level directory. (this will build a wheel for the python version you use to build it - make sure you use python 3.x). For the sources, run: "python setup.py sdist" You should now have a /dist directory with both .whl and .tar.gz source versions. 5. Check that everything looks correct by uploading the package to the pypi test server: twine upload dist/* -r pypitest (pypi suggest using twine as other methods upload files via plaintext.) Check that you can install it in a virtualenv by running: pip install -i https://testpypi.python.org/pypi transformers 6. Upload the final version to actual pypi: twine upload dist/* -r pypi 7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory. """ from io import open from setuptools import find_packages, setup setup( name="transformers", version="2.1.1", author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors", author_email="[email protected]", description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch", long_description=open("README.md", "r", encoding='utf-8').read(), long_description_content_type="text/markdown", keywords='NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU', license='Apache', url="https://github.com/huggingface/transformers", packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), install_requires=['numpy', 'boto3', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses'], entry_points={ 'console_scripts': [ "transformers=transformers.__main__:main", ] }, # python_requires='>=3.5.0', tests_require=['pytest'], classifiers=[ 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], )
2,923
39.054795
183
py
DeeBERT
DeeBERT-master/hubconf.py
from transformers import ( AutoTokenizer, AutoConfig, AutoModel, AutoModelWithLMHead, AutoModelForSequenceClassification, AutoModelForQuestionAnswering ) from transformers.file_utils import add_start_docstrings dependencies = ['torch', 'tqdm', 'boto3', 'requests', 'regex', 'sentencepiece', 'sacremoses'] @add_start_docstrings(AutoConfig.__doc__) def config(*args, **kwargs): r""" # Using torch.hub ! import torch config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased') # Download configuration from S3 and cache. config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')` config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/my_configuration.json') config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attention=True, foo=False) assert config.output_attention == True config, unused_kwargs = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attention=True, foo=False, return_unused_kwargs=True) assert config.output_attention == True assert unused_kwargs == {'foo': False} """ return AutoConfig.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoTokenizer.__doc__) def tokenizer(*args, **kwargs): r""" # Using torch.hub ! import torch tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', 'bert-base-uncased') # Download vocabulary from S3 and cache. tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', './test/bert_saved_model/') # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')` """ return AutoTokenizer.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoModel.__doc__) def model(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased') # Download model and configuration from S3 and cache. model = torch.hub.load('huggingface/transformers', 'model', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModel.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoModelWithLMHead.__doc__) def modelWithLMHead(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased') # Download model and configuration from S3 and cache. model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelWithLMHead.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoModelForSequenceClassification.__doc__) def modelForSequenceClassification(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased') # Download model and configuration from S3 and cache. model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelForSequenceClassification.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__) def modelForQuestionAnswering(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased') # Download model and configuration from S3 and cache. model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelForQuestionAnswering.from_pretrained(*args, **kwargs)
6,489
56.433628
189
py
DeeBERT
DeeBERT-master/examples/run_lm_finetuning.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa). GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned using a masked language modeling (MLM) loss. """ from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import pickle import random import re import shutil import numpy as np import torch from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, BertConfig, BertForMaskedLM, BertTokenizer, GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) logger = logging.getLogger(__name__) MODEL_CLASSES = { 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer), 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) } class TextDataset(Dataset): def __init__(self, tokenizer, args, file_path='train', block_size=512): assert os.path.isfile(file_path) directory, filename = os.path.split(file_path) cached_features_file = os.path.join(directory, args.model_name_or_path + '_cached_lm_' + str(block_size) + '_' + filename) if os.path.exists(cached_features_file): logger.info("Loading features from cached file %s", cached_features_file) with open(cached_features_file, 'rb') as handle: self.examples = pickle.load(handle) else: logger.info("Creating features from dataset file at %s", directory) self.examples = [] with open(file_path, encoding="utf-8") as f: text = f.read() tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) for i in range(0, len(tokenized_text)-block_size+1, block_size): # Truncate in block of block_size self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i:i+block_size])) # Note that we are loosing the last truncated example here for the sake of simplicity (no padding) # If your dataset is small, first you should loook for a bigger one :-) and second you # can change this behavior by adding (model specific) padding. logger.info("Saving features into cached file %s", cached_features_file) with open(cached_features_file, 'wb') as handle: pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) def __len__(self): return len(self.examples) def __getitem__(self, item): return torch.tensor(self.examples[item]) def load_and_cache_examples(args, tokenizer, evaluate=False): dataset = TextDataset(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size) return dataset def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def _rotate_checkpoints(args, checkpoint_prefix, use_mtime=False): if not args.save_total_limit: return if args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) glob_checkpoints = glob.glob(os.path.join(args.output_dir, '{}-*'.format(checkpoint_prefix))) if len(glob_checkpoints) <= args.save_total_limit: return ordering_and_checkpoint_path = [] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match('.*{}-([0-9]+)'.format(checkpoint_prefix), path) if regex_match and regex_match.groups(): ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint)) shutil.rmtree(checkpoint) def mask_tokens(inputs, tokenizer, args): """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.clone() # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa) probability_matrix = torch.full(labels.shape, args.mlm_probability) special_tokens_mask = [tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()] probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[~masked_indices] = -1 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproducibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch) inputs = inputs.to(args.device) labels = labels.to(args.device) model.train() outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: checkpoint_prefix = 'checkpoint' # Save model checkpoint output_dir = os.path.join(args.output_dir, '{}-{}'.format(checkpoint_prefix, global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) _rotate_checkpoints(args, checkpoint_prefix) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_output_dir = args.output_dir eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 model.eval() for batch in tqdm(eval_dataloader, desc="Evaluating"): inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch) inputs = inputs.to(args.device) labels = labels.to(args.device) with torch.no_grad(): outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels) lm_loss = outputs[0] eval_loss += lm_loss.mean().item() nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps perplexity = torch.exp(torch.tensor(eval_loss)) result = { "perplexity": perplexity } output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_data_file", default=None, type=str, required=True, help="The input training data file (a text file).") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--eval_data_file", default=None, type=str, help="An optional input evaluation data file to evaluate the perplexity on (a text file).") parser.add_argument("--model_type", default="bert", type=str, help="The model architecture to be fine-tuned.") parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str, help="The model checkpoint for weights initialization.") parser.add_argument("--mlm", action='store_true', help="Train with masked-language modeling loss instead of language modeling.") parser.add_argument("--mlm_probability", type=float, default=0.15, help="Ratio of tokens to mask for masked language modeling loss") parser.add_argument("--config_name", default="", type=str, help="Optional pretrained config name or path if not the same as model_name_or_path") parser.add_argument("--tokenizer_name", default="", type=str, help="Optional pretrained tokenizer name or path if not the same as model_name_or_path") parser.add_argument("--cache_dir", default="", type=str, help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)") parser.add_argument("--block_size", default=-1, type=int, help="Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens).") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Run evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=1.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument('--save_total_limit', type=int, default=None, help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default') parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") args = parser.parse_args() if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm: raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm " "flag (masked language modeling).") if args.eval_data_file is None and args.do_eval: raise ValueError("Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file " "or remove the --do_eval argument.") if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) if args.block_size <= 0: args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model args.block_size = min(args.block_size, tokenizer.max_len_single_sentence) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) model.to(args.device) if args.local_rank == 0: torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False) if args.local_rank == 0: torch.distributed.barrier() global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
28,845
50.881295
165
py
DeeBERT
DeeBERT-master/examples/utils_squad_evaluate.py
""" Official evaluation script for SQuAD version 2.0. Modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0 In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted probability that a question is unanswerable. """ import argparse import collections import json import numpy as np import os import re import string import sys class EVAL_OPTS(): def __init__(self, data_file, pred_file, out_file="", na_prob_file="na_prob.json", na_prob_thresh=1.0, out_image_dir=None, verbose=False): self.data_file = data_file self.pred_file = pred_file self.out_file = out_file self.na_prob_file = na_prob_file self.na_prob_thresh = na_prob_thresh self.out_image_dir = out_image_dir self.verbose = verbose OPTS = None def parse_args(): parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.') parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.') parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.') parser.add_argument('--out-file', '-o', metavar='eval.json', help='Write accuracy metrics to file (default is stdout).') parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json', help='Model estimates of probability of no answer.') parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0, help='Predict "" if no-answer probability exceeds this (default = 1.0).') parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None, help='Save precision-recall curves to directory.') parser.add_argument('--verbose', '-v', action='store_true') if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def make_qid_to_has_ans(dataset): qid_to_has_ans = {} for article in dataset: for p in article['paragraphs']: for qa in p['qas']: qid_to_has_ans[qa['id']] = bool(qa['answers']) return qid_to_has_ans def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) return re.sub(regex, ' ', text) def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return ''.join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def get_tokens(s): if not s: return [] return normalize_answer(s).split() def compute_exact(a_gold, a_pred): return int(normalize_answer(a_gold) == normalize_answer(a_pred)) def compute_f1(a_gold, a_pred): gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = collections.Counter(gold_toks) & collections.Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1 def get_raw_scores(dataset, preds): exact_scores = {} f1_scores = {} for article in dataset: for p in article['paragraphs']: for qa in p['qas']: qid = qa['id'] gold_answers = [a['text'] for a in qa['answers'] if normalize_answer(a['text'])] if not gold_answers: # For unanswerable questions, only correct answer is empty string gold_answers = [''] if qid not in preds: print('Missing prediction for %s' % qid) continue a_pred = preds[qid] # Take max over all gold answers exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) return exact_scores, f1_scores def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): new_scores = {} for qid, s in scores.items(): pred_na = na_probs[qid] > na_prob_thresh if pred_na: new_scores[qid] = float(not qid_to_has_ans[qid]) else: new_scores[qid] = s return new_scores def make_eval_dict(exact_scores, f1_scores, qid_list=None): if not qid_list: total = len(exact_scores) return collections.OrderedDict([ ('exact', 100.0 * sum(exact_scores.values()) / total), ('f1', 100.0 * sum(f1_scores.values()) / total), ('total', total), ]) else: total = len(qid_list) return collections.OrderedDict([ ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total), ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total), ('total', total), ]) def merge_eval(main_eval, new_eval, prefix): for k in new_eval: main_eval['%s_%s' % (prefix, k)] = new_eval[k] def plot_pr_curve(precisions, recalls, out_image, title): plt.step(recalls, precisions, color='b', alpha=0.2, where='post') plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b') plt.xlabel('Recall') plt.ylabel('Precision') plt.xlim([0.0, 1.05]) plt.ylim([0.0, 1.05]) plt.title(title) plt.savefig(out_image) plt.clf() def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans, out_image=None, title=None): qid_list = sorted(na_probs, key=lambda k: na_probs[k]) true_pos = 0.0 cur_p = 1.0 cur_r = 0.0 precisions = [1.0] recalls = [0.0] avg_prec = 0.0 for i, qid in enumerate(qid_list): if qid_to_has_ans[qid]: true_pos += scores[qid] cur_p = true_pos / float(i+1) cur_r = true_pos / float(num_true_pos) if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(cur_p) recalls.append(cur_r) if out_image: plot_pr_curve(precisions, recalls, out_image, title) return {'ap': 100.0 * avg_prec} def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, out_image_dir): if out_image_dir and not os.path.exists(out_image_dir): os.makedirs(out_image_dir) num_true_pos = sum(1 for v in qid_to_has_ans.values() if v) if num_true_pos == 0: return pr_exact = make_precision_recall_eval( exact_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, 'pr_exact.png'), title='Precision-Recall curve for Exact Match score') pr_f1 = make_precision_recall_eval( f1_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, 'pr_f1.png'), title='Precision-Recall curve for F1 score') oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()} pr_oracle = make_precision_recall_eval( oracle_scores, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, 'pr_oracle.png'), title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)') merge_eval(main_eval, pr_exact, 'pr_exact') merge_eval(main_eval, pr_f1, 'pr_f1') merge_eval(main_eval, pr_oracle, 'pr_oracle') def histogram_na_prob(na_probs, qid_list, image_dir, name): if not qid_list: return x = [na_probs[k] for k in qid_list] weights = np.ones_like(x) / float(len(x)) plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0)) plt.xlabel('Model probability of no-answer') plt.ylabel('Proportion of dataset') plt.title('Histogram of no-answer probability: %s' % name) plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name)) plt.clf() def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for i, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] return 100.0 * best_score / len(scores), best_thresh def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for i, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] has_ans_score, has_ans_cnt = 0, 0 for qid in qid_list: if not qid_to_has_ans[qid]: continue has_ans_cnt += 1 if qid not in scores: continue has_ans_score += scores[qid] return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) main_eval['best_exact'] = best_exact main_eval['best_exact_thresh'] = exact_thresh main_eval['best_f1'] = best_f1 main_eval['best_f1_thresh'] = f1_thresh def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans) main_eval['best_exact'] = best_exact main_eval['best_exact_thresh'] = exact_thresh main_eval['best_f1'] = best_f1 main_eval['best_f1_thresh'] = f1_thresh main_eval['has_ans_exact'] = has_ans_exact main_eval['has_ans_f1'] = has_ans_f1 def main(OPTS): with open(OPTS.data_file) as f: dataset_json = json.load(f) dataset = dataset_json['data'] with open(OPTS.pred_file) as f: preds = json.load(f) if OPTS.na_prob_file: with open(OPTS.na_prob_file) as f: na_probs = json.load(f) else: na_probs = {k: 0.0 for k in preds} qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(dataset, preds) exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh) f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh) out_eval = make_eval_dict(exact_thresh, f1_thresh) if has_ans_qids: has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids) merge_eval(out_eval, has_ans_eval, 'HasAns') if no_ans_qids: no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids) merge_eval(out_eval, no_ans_eval, 'NoAns') if OPTS.na_prob_file: find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, OPTS.out_image_dir) histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns') histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns') if OPTS.out_file: with open(OPTS.out_file, 'w') as f: json.dump(out_eval, f) else: print(json.dumps(out_eval, indent=2)) return out_eval if __name__ == '__main__': OPTS = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main(OPTS)
12,493
36.746224
107
py
DeeBERT
DeeBERT-master/examples/run_squad.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).""" from __future__ import absolute_import, division, print_function import argparse import logging import os import random import glob import timeit import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForQuestionAnswering, BertTokenizer, XLMConfig, XLMForQuestionAnswering, XLMTokenizer, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer, DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup from utils_squad import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions, RawResultExtended, write_predictions_extended) # The follwing import is the official SQuAD evaluation script (2.0). # You can remove it from the dependencies if you are using this script outside of the library # We've added it here for automated tests (see examples/test_examples.py file) from utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ for conf in (BertConfig, XLNetConfig, XLMConfig)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer), 'xlnet': (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer), 'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer), 'distilbert': (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'start_positions': batch[3], 'end_positions': batch[4]} if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] if args.model_type in ['xlnet', 'xlm']: inputs.update({'cls_index': batch[5], 'p_mask': batch[6]}) outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1] } if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids example_indices = batch[3] if args.model_type in ['xlnet', 'xlm']: inputs.update({'cls_index': batch[4], 'p_mask': batch[5]}) outputs = model(**inputs) for i, example_index in enumerate(example_indices): eval_feature = features[example_index.item()] unique_id = int(eval_feature.unique_id) if args.model_type in ['xlnet', 'xlm']: # XLNet uses a more complex post-processing procedure result = RawResultExtended(unique_id = unique_id, start_top_log_probs = to_list(outputs[0][i]), start_top_index = to_list(outputs[1][i]), end_top_log_probs = to_list(outputs[2][i]), end_top_index = to_list(outputs[3][i]), cls_logits = to_list(outputs[4][i])) else: result = RawResult(unique_id = unique_id, start_logits = to_list(outputs[0][i]), end_logits = to_list(outputs[1][i])) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None if args.model_type in ['xlnet', 'xlm']: # XLNet uses a more complex post-processing procedure write_predictions_extended(examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.predict_file, model.config.start_n_top, model.config.end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging) else: write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold) # Evaluate with the official SQuAD script evaluate_options = EVAL_OPTS(data_file=args.predict_file, pred_file=output_prediction_file, na_prob_file=output_null_log_odds_file) results = evaluate_on_squad(evaluate_options) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length))) if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", input_file) examples = read_squad_examples(input_file=input_file, is_training=not evaluate, version_2_with_negative=args.version_2_with_negative) features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0, pad_token_segment_id=3 if args.model_type in ['xlnet'] else 0, cls_token_at_end=True if args.model_type in ['xlnet'] else False, sequence_a_is_doc=True if args.model_type in ['xlnet'] else False) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) if evaluate: all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index, all_cls_index, all_p_mask) else: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask) if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_file", default=None, type=str, required=True, help="SQuAD json for training. E.g., train-v1.1.json") parser.add_argument("--predict_file", default=None, type=str, required=True, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.') parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.") parser.add_argument("--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.") parser.add_argument("--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.") parser.add_argument("--verbose_logging", action='store_true', help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, 'einsum') except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
31,570
54.001742
151
py
DeeBERT
DeeBERT-master/examples/run_highway_glue.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa).""" from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import time import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertTokenizer, RobertaConfig, RobertaTokenizer, XLMConfig, XLMForSequenceClassification, XLMTokenizer, XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer, DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer) from transformers.modeling_highway_bert import BertForSequenceClassification from transformers.modeling_highway_roberta import RobertaForSequenceClassification from transformers import AdamW, get_linear_schedule_with_warmup from transformers import glue_compute_metrics as compute_metrics from transformers import glue_output_modes as output_modes from transformers import glue_processors as processors from transformers import glue_convert_examples_to_features as convert_examples_to_features logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig, RobertaConfig, DistilBertConfig)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForSequenceClassification, BertTokenizer), 'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer), 'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer), 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer), 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer) } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def get_wanted_result(result): if "spearmanr" in result: print_result = result["spearmanr"] elif "f1" in result: print_result = result["f1"] elif "mcc" in result: print_result = result["mcc"] elif "acc" in result: print_result = result["acc"] else: print(result) exit(1) return print_result def train(args, train_dataset, model, tokenizer, train_highway=False): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] if train_highway: optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if ("highway" in n) and (not any(nd in n for nd in no_decay))], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if ("highway" in n) and (any(nd in n for nd in no_decay))], 'weight_decay': 0.0} ] else: optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if ("highway" not in n) and (not any(nd in n for nd in no_decay))], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if ("highway" not in n) and (any(nd in n for nd in no_decay))], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids inputs['train_highway'] = train_highway outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix="", output_layer=-1, eval_highway=False): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None exit_layer_counter = {(i+1):0 for i in range(model.num_layers)} st = time.time() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids if output_layer >= 0: inputs['output_layer'] = output_layer outputs = model(**inputs) if eval_highway: exit_layer_counter[outputs[-1]] += 1 tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) eval_time = time.time() - st print("Eval time:", eval_time) eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) if eval_highway: print("Exit layer counter", exit_layer_counter) actual_cost = sum([l*c for l, c in exit_layer_counter.items()]) full_cost = len(eval_dataloader) * model.num_layers print("Expected saving", actual_cost/full_cost) if args.early_exit_entropy>=0: save_fname = args.plot_data_dir + '/' +\ args.model_name_or_path[2:] +\ "/entropy_{}.npy".format(args.early_exit_entropy) if not os.path.exists(os.path.dirname(save_fname)): os.makedirs(os.path.dirname(save_fname)) print_result = get_wanted_result(result) np.save(save_fname, np.array([exit_layer_counter, eval_time, actual_cost/full_cost, print_result])) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(task))) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']: # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) features = convert_examples_to_features(examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") parser.add_argument("--plot_data_dir", default="./plotting/", type=str, required=False, help="The directory to store data for plotting figures.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--eval_each_highway", action='store_true', help="Set this flag to evaluate each highway.") parser.add_argument("--eval_after_first_stage", action='store_true', help="Set this flag to evaluate after training only bert (not highway).") parser.add_argument("--eval_highway", action='store_true', help="Set this flag if it's evaluating highway models") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--early_exit_entropy", default=-1, type=float, help = "Entropy threshold for early exit.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.model_type == "bert": model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy) model.bert.init_highway_pooler() else: model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy) model.roberta.init_highway_pooler() if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) if args.eval_after_first_stage: result = evaluate(args, model, tokenizer, prefix="") print_result = get_wanted_result(result) train(args, train_dataset, model, tokenizer, train_highway=True) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) if args.model_type=="bert": model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy) else: model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix, eval_highway=args.eval_highway) print_result = get_wanted_result(result) print("Result: {}".format(print_result)) if args.eval_each_highway: last_layer_results = print_result each_layer_results = [] for i in range(model.num_layers): logger.info("\n") _result = evaluate(args, model, tokenizer, prefix=prefix, output_layer=i, eval_highway=args.eval_highway) if i+1 < model.num_layers: each_layer_results.append(get_wanted_result(_result)) each_layer_results.append(last_layer_results) save_fname = args.plot_data_dir + '/' + args.model_name_or_path[2:] + "/each_layer.npy" if not os.path.exists(os.path.dirname(save_fname)): os.makedirs(os.path.dirname(save_fname)) np.save(save_fname, np.array(each_layer_results)) result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
33,078
51.423138
158
py
DeeBERT
DeeBERT-master/examples/utils_multiple_choice.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Multiple choice fine-tuning: utilities to work with multiple choice tasks of reading comprehension """ from __future__ import absolute_import, division, print_function import logging import os import sys from io import open import json import csv import glob import tqdm from typing import List from transformers import PreTrainedTokenizer logger = logging.getLogger(__name__) class InputExample(object): """A single training/test example for multiple choice""" def __init__(self, example_id, question, contexts, endings, label=None): """Constructs a InputExample. Args: example_id: Unique id for the example. contexts: list of str. The untokenized text of the first sequence (context of corresponding question). question: string. The untokenized text of the second sequence (question). endings: list of str. multiple choice's options. Its length must be equal to contexts' length. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.example_id = example_id self.question = question self.contexts = contexts self.endings = endings self.label = label class InputFeatures(object): def __init__(self, example_id, choices_features, label ): self.example_id = example_id self.choices_features = [ { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } for input_ids, input_mask, segment_ids in choices_features ] self.label = label class DataProcessor(object): """Base class for data converters for multiple choice data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for the test set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() class RaceProcessor(DataProcessor): """Processor for the RACE data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) high = os.path.join(data_dir, 'train/high') middle = os.path.join(data_dir, 'train/middle') high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, 'train') def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) high = os.path.join(data_dir, 'dev/high') middle = os.path.join(data_dir, 'dev/middle') high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, 'dev') def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} test".format(data_dir)) high = os.path.join(data_dir, 'test/high') middle = os.path.join(data_dir, 'test/middle') high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, 'test') def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_txt(self, input_dir): lines = [] files = glob.glob(input_dir + "/*txt") for file in tqdm.tqdm(files, desc="read files"): with open(file, 'r', encoding='utf-8') as fin: data_raw = json.load(fin) data_raw["race_id"] = file lines.append(data_raw) return lines def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (_, data_raw) in enumerate(lines): race_id = "%s-%s" % (set_type, data_raw["race_id"]) article = data_raw["article"] for i in range(len(data_raw["answers"])): truth = str(ord(data_raw['answers'][i]) - ord('A')) question = data_raw['questions'][i] options = data_raw['options'][i] examples.append( InputExample( example_id=race_id, question=question, contexts=[article, article, article, article], # this is not efficient but convenient endings=[options[0], options[1], options[2], options[3]], label=truth)) return examples class SwagProcessor(DataProcessor): """Processor for the SWAG data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev") def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) raise ValueError( "For swag testing, the input file does not contain a label column. It can not be tested in current code" "setting!" ) return self._create_examples(self._read_csv(os.path.join(data_dir, "test.csv")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_csv(self, input_file): with open(input_file, 'r', encoding='utf-8') as f: reader = csv.reader(f) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines def _create_examples(self, lines: List[List[str]], type: str): """Creates examples for the training and dev sets.""" if type == "train" and lines[0][-1] != 'label': raise ValueError( "For training, the input file must contain a label column." ) examples = [ InputExample( example_id=line[2], question=line[5], # in the swag dataset, the # common beginning of each # choice is stored in "sent2". contexts = [line[4], line[4], line[4], line[4]], endings = [line[7], line[8], line[9], line[10]], label=line[11] ) for line in lines[1:] # we skip the line with the column names ] return examples class ArcProcessor(DataProcessor): """Processor for the ARC data set (request from allennlp).""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "train.jsonl")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "dev.jsonl")), "dev") def get_test_examples(self, data_dir): logger.info("LOOKING AT {} test".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "test.jsonl")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_json(self, input_file): with open(input_file, 'r', encoding='utf-8') as fin: lines = fin.readlines() return lines def _create_examples(self, lines, type): """Creates examples for the training and dev sets.""" #There are two types of labels. They should be normalized def normalize(truth): if truth in "ABCD": return ord(truth) - ord("A") elif truth in "1234": return int(truth) - 1 else: logger.info("truth ERROR! %s", str(truth)) return None examples = [] three_choice = 0 four_choice = 0 five_choice = 0 other_choices = 0 # we deleted example which has more than or less than four choices for line in tqdm.tqdm(lines, desc="read arc data"): data_raw = json.loads(line.strip("\n")) if len(data_raw["question"]["choices"]) == 3: three_choice += 1 continue elif len(data_raw["question"]["choices"]) == 5: five_choice += 1 continue elif len(data_raw["question"]["choices"]) != 4: other_choices += 1 continue four_choice += 1 truth = str(normalize(data_raw["answerKey"])) assert truth != "None" question_choices = data_raw["question"] question = question_choices["stem"] id = data_raw["id"] options = question_choices["choices"] if len(options) == 4: examples.append( InputExample( example_id = id, question=question, contexts=[options[0]["para"].replace("_", ""), options[1]["para"].replace("_", ""), options[2]["para"].replace("_", ""), options[3]["para"].replace("_", "")], endings=[options[0]["text"], options[1]["text"], options[2]["text"], options[3]["text"]], label=truth)) if type == "train": assert len(examples) > 1 assert examples[0].label is not None logger.info("len examples: %s}", str(len(examples))) logger.info("Three choices: %s", str(three_choice)) logger.info("Five choices: %s", str(five_choice)) logger.info("Other choices: %s", str(other_choices)) logger.info("four choices: %s", str(four_choice)) return examples def convert_examples_to_features( examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer, pad_token_segment_id=0, pad_on_left=False, pad_token=0, mask_padding_with_zero=True, ) -> List[InputFeatures]: """ Loads a data file into a list of `InputFeatures` """ label_map = {label : i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) choices_features = [] for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)): text_a = context if example.question.find("_") != -1: # this is for cloze question text_b = example.question.replace("_", ending) else: text_b = example.question + " " + ending inputs = tokenizer.encode_plus( text_a, text_b, add_special_tokens=True, max_length=max_length, ) if 'num_truncated_tokens' in inputs and inputs['num_truncated_tokens'] > 0: logger.info('Attention! you are cropping tokens (swag task is ok). ' 'If you are training ARC and RACE and you are poping question + options,' 'you need to try to use a bigger max seq length!') input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"] # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_length assert len(attention_mask) == max_length assert len(token_type_ids) == max_length choices_features.append((input_ids, attention_mask, token_type_ids)) label = label_map[example.label] if ex_index < 2: logger.info("*** Example ***") logger.info("race_id: {}".format(example.example_id)) for choice_idx, (input_ids, attention_mask, token_type_ids) in enumerate(choices_features): logger.info("choice: {}".format(choice_idx)) logger.info("input_ids: {}".format(' '.join(map(str, input_ids)))) logger.info("attention_mask: {}".format(' '.join(map(str, attention_mask)))) logger.info("token_type_ids: {}".format(' '.join(map(str, token_type_ids)))) logger.info("label: {}".format(label)) features.append( InputFeatures( example_id=example.example_id, choices_features=choices_features, label=label, ) ) return features processors = { "race": RaceProcessor, "swag": SwagProcessor, "arc": ArcProcessor } MULTIPLE_CHOICE_TASKS_NUM_LABELS = { "race", 4, "swag", 4, "arc", 4 }
15,268
36.701235
116
py
DeeBERT
DeeBERT-master/examples/run_glue.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa).""" from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForSequenceClassification, BertTokenizer, RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer, XLMConfig, XLMForSequenceClassification, XLMTokenizer, XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer, DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer) # from transformers.modeling_highway_roberta import RobertaForSequenceClassification from transformers import AdamW, get_linear_schedule_with_warmup from transformers import glue_compute_metrics as compute_metrics from transformers import glue_output_modes as output_modes from transformers import glue_processors as processors from transformers import glue_convert_examples_to_features as convert_examples_to_features logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig, RobertaConfig, DistilBertConfig)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForSequenceClassification, BertTokenizer), 'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer), 'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer), 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer), 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer) } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(task))) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']: # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) features = convert_examples_to_features(examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
28,140
52.398482
158
py
DeeBERT
DeeBERT-master/examples/benchmarks.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Benchmarking the library on inference and training """ # If checking the tensors placement # tf.debugging.set_log_device_placement(True) from typing import List import timeit from transformers import is_tf_available, is_torch_available from time import time import argparse import csv if is_tf_available(): import tensorflow as tf from transformers import TFAutoModel if is_torch_available(): import torch from transformers import AutoModel from transformers import AutoConfig, AutoTokenizer input_text = """Bent over their instruments, three hundred Fertilizers were plunged, as the Director of Hatcheries and Conditioning entered the room, in the scarcely breathing silence, the absent-minded, soliloquizing hum or whistle, of absorbed concentration. A troop of newly arrived students, very young, pink and callow, followed nervously, rather abjectly, at the Director's heels. Each of them carried a notebook, in which, whenever the great man spoke, he desperately scribbled. Straight from the horse's mouth. It was a rare privilege. The D. H. C. for Central London always made a point of personally conducting his new students round the various departments. "Just to give you a general idea," he would explain to them. For of course some sort of general idea they must have, if they were to do their work intelligently-though as little of one, if they were to be good and happy members of society, as possible. For particulars, as every one knows, make for virtue and happiness; generalities are intellectu- ally necessary evils. Not philosophers but fret-sawyers and stamp col- lectors compose the backbone of society. "To-morrow," he would add, smiling at them with a slightly menacing geniality, "you'll be settling down to serious work. You won't have time for generalities. Meanwhile ..." Meanwhile, it was a privilege. Straight from the horse's mouth into the notebook. The boys scribbled like mad. Tall and rather thin but upright, the Director advanced into the room. He had a long chin and big rather prominent teeth, just covered, when he was not talking, by his full, floridly curved lips. Old, young? Thirty? Fifty? Fifty-five? It was hard to say. And anyhow the question didn't arise; in this year of stability, A. F. 632, it didn't occur to you to ask it. "I shall begin at the beginning," said the D.H.C. and the more zealous students recorded his intention in their notebooks: Begin at the begin- ning. "These," he waved his hand, "are the incubators." And opening an insulated door he showed them racks upon racks of numbered test- tubes. "The week's supply of ova. Kept," he explained, "at blood heat; whereas the male gametes," and here he opened another door, "they have to be kept at thirty-five instead of thirty-seven. Full blood heat sterilizes." Rams wrapped in theremogene beget no lambs. Still leaning against the incubators he gave them, while the pencils scurried illegibly across the pages, a brief description of the modern fertilizing process; spoke first, of course, of its surgical introduc- tion-"the operation undergone voluntarily for the good of Society, not to mention the fact that it carries a bonus amounting to six months' salary"; continued with some account of the technique for preserving the excised ovary alive and actively developing; passed on to a consid- eration of optimum temperature, salinity, viscosity; referred to the liq- uor in which the detached and ripened eggs were kept; and, leading his charges to the work tables, actually showed them how this liquor was drawn off from the test-tubes; how it was let out drop by drop onto the specially warmed slides of the microscopes; how the eggs which it contained were inspected for abnormalities, counted and transferred to a porous receptacle; how (and he now took them to watch the operation) this receptacle was immersed in a warm bouillon containing free-swimming spermatozoa-at a minimum concentration of one hundred thousand per cubic centimetre, he insisted; and how, after ten minutes, the container was lifted out of the liquor and its contents re-examined; how, if any of the eggs remained unfertilized, it was again immersed, and, if necessary, yet again; how the fertilized ova went back to the incubators; where the Alphas and Betas re- mained until definitely bottled; while the Gammas, Deltas and Epsilons were brought out again, after only thirty-six hours, to undergo Bo- kanovsky's Process. "Bokanovsky's Process," repeated the Director, and the students un- derlined the words in their little notebooks. One egg, one embryo, one adult-normality. But a bokanovskified egg will bud, will proliferate, will divide. From eight to ninety-six buds, and every bud will grow into a perfectly formed embryo, and every embryo into a full-sized adult. Making ninety-six human beings grow where only one grew before. Progress. "Essentially," the D.H.C. concluded, "bokanovskification consists of a series of arrests of development. We check the normal growth and, paradoxically enough, the egg responds by budding." Responds by budding. The pencils were busy. He pointed. On a very slowly moving band a rack-full of test-tubes was entering a large metal box, another, rack-full was emerging. Machinery faintly purred. It took eight minutes for the tubes to go through, he told them. Eight minutes of hard X-rays being about as much as an egg can stand. A few died; of the rest, the least susceptible divided into two; most put out four buds; some eight; all were returned to the incubators, where the buds began to develop; then, after two days, were suddenly chilled, chilled and checked. Two, four, eight, the buds in their turn budded; and having budded were dosed almost to death with alcohol; consequently burgeoned again and having budded-bud out of bud out of bud-were thereafter-further arrest being generally fatal-left to develop in peace. By which time the original egg was in a fair way to becoming anything from eight to ninety-six embryos- a prodigious improvement, you will agree, on nature. Identical twins-but not in piddling twos and threes as in the old viviparous days, when an egg would sometimes accidentally divide; actually by dozens, by scores at a time. "Scores," the Director repeated and flung out his arms, as though he were distributing largesse. "Scores." But one of the students was fool enough to ask where the advantage lay. "My good boy!" The Director wheeled sharply round on him. "Can't you see? Can't you see?" He raised a hand; his expression was solemn. "Bokanovsky's Process is one of the major instruments of social stabil- ity!" Major instruments of social stability. Standard men and women; in uniform batches. The whole of a small factory staffed with the products of a single bokanovskified egg. "Ninety-six identical twins working ninety-six identical machines!" The voice was almost tremulous with enthusiasm. "You really know where you are. For the first time in history." He quoted the planetary motto. "Community, Identity, Stability." Grand words. "If we could bo- kanovskify indefinitely the whole problem would be solved." Solved by standard Gammas, unvarying Deltas, uniform Epsilons. Mil- lions of identical twins. The principle of mass production at last applied to biology. "But, alas," the Director shook his head, "we can't bokanovskify indefi- nitely." Ninety-six seemed to be the limit; seventy-two a good average. From the same ovary and with gametes of the same male to manufacture as many batches of identical twins as possible-that was the best (sadly a second best) that they could do. And even that was difficult. "For in nature it takes thirty years for two hundred eggs to reach ma- turity. But our business is to stabilize the population at this moment, here and now. Dribbling out twins over a quarter of a century-what would be the use of that?" Obviously, no use at all. But Podsnap's Technique had immensely ac- celerated the process of ripening. They could make sure of at least a hundred and fifty mature eggs within two years. Fertilize and bo- kanovskify-in other words, multiply by seventy-two-and you get an average of nearly eleven thousand brothers and sisters in a hundred and fifty batches of identical twins, all within two years of the same age. "And in exceptional cases we can make one ovary yield us over fifteen thousand adult individuals." Beckoning to a fair-haired, ruddy young man who happened to be passing at the moment. "Mr. Foster," he called. The ruddy young man approached. "Can you tell us the record for a single ovary, Mr. Foster?" "Sixteen thousand and twelve in this Centre," Mr. Foster replied with- out hesitation. He spoke very quickly, had a vivacious blue eye, and took an evident pleasure in quoting figures. "Sixteen thousand and twelve; in one hundred and eighty-nine batches of identicals. But of course they've done much better," he rattled on, "in some of the tropi- cal Centres. Singapore has often produced over sixteen thousand five hundred; and Mombasa has actually touched the seventeen thousand mark. But then they have unfair advantages. You should see the way a negro ovary responds to pituitary! It's quite astonishing, when you're used to working with European material. Still," he added, with a laugh (but the light of combat was in his eyes and the lift of his chin was challenging), "still, we mean to beat them if we can. I'm working on a wonderful Delta-Minus ovary at this moment. Only just eighteen months old. Over twelve thousand seven hundred children already, ei- ther decanted or in embryo. And still going strong. We'll beat them yet." "That's the spirit I like!" cried the Director, and clapped Mr. Foster on the shoulder. "Come along with us, and give these boys the benefit of your expert knowledge." Mr. Foster smiled modestly. "With pleasure." They went. In the Bottling Room all was harmonious bustle and ordered activity. Flaps of fresh sow's peritoneum ready cut to the proper size came shooting up in little lifts from the Organ Store in the sub-basement. Whizz and then, click! the lift-hatches hew open; the bottle-liner had only to reach out a hand, take the flap, insert, smooth-down, and be- fore the lined bottle had had time to travel out of reach along the end- less band, whizz, click! another flap of peritoneum had shot up from the depths, ready to be slipped into yet another bottle, the next of that slow interminable procession on the band. Next to the Liners stood the Matriculators. The procession advanced; one by one the eggs were transferred from their test-tubes to the larger containers; deftly the peritoneal lining was slit, the morula dropped into place, the saline solution poured in ... and already the bottle had passed, and it was the turn of the labellers. Heredity, date of fertilization, membership of Bokanovsky Group-details were trans- ferred from test-tube to bottle. No longer anonymous, but named, identified, the procession marched slowly on; on through an opening in the wall, slowly on into the Social Predestination Room. "Eighty-eight cubic metres of card-index," said Mr. Foster with relish, as they entered.""" def create_setup_and_compute(model_names: List[str], gpu: bool = True, tensorflow: bool = False, average_over: int = 3, torchscript: bool = False, xla: bool = False, amp: bool = False, fp16: bool = False, save_to_csv: bool = False, csv_filename: str = f"results_{round(time())}.csv"): if xla: tf.config.optimizer.set_jit(True) if amp: tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True}) if tensorflow: dictionary = {model_name: {} for model_name in model_names} results = _compute_tensorflow(model_names, dictionary, average_over, amp) else: device = 'cuda' if (gpu and torch.cuda.is_available()) else 'cpu' dictionary = {model_name: {} for model_name in model_names} results = _compute_pytorch(model_names, dictionary, average_over, device, torchscript, fp16) print("=========== RESULTS ===========") for model_name in model_names: print("\t" + f"======= MODEL CHECKPOINT: {model_name} =======") for batch_size in results[model_name]["bs"]: print("\t\t" + f"===== BATCH SIZE: {batch_size} =====") for slice_size in results[model_name]["ss"]: result = results[model_name]['results'][batch_size][slice_size] if isinstance(result, str): print(f"\t\t{model_name}/{batch_size}/{slice_size}: " f"{result}") else: print(f"\t\t{model_name}/{batch_size}/{slice_size}: " f"{(round(1000 * result) / 1000)}" f"s") if save_to_csv: with open(csv_filename, mode='w') as csv_file: fieldnames = ['model', '1x8', '1x64', '1x128', '1x256', '1x512', '1x1024', '2x8', '2x64', '2x128', '2x256', '2x512', '2x1024', '4x8', '4x64', '4x128', '4x256', '4x512', '4x1024', '8x8', '8x64', '8x128', '8x256', '8x512', '8x1024', ] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for model_name in model_names: model_results = { f'{bs}x{ss}': results[model_name]['results'][bs][ss] for bs in results[model_name]["results"] for ss in results[model_name]['results'][bs] } writer.writerow({'model': model_name, **model_results}) def _compute_pytorch(model_names, dictionary, average_over, device, torchscript, fp16): for c, model_name in enumerate(model_names): print(f"{c + 1} / {len(model_names)}") config = AutoConfig.from_pretrained(model_name, torchscript=torchscript) model = AutoModel.from_pretrained(model_name, config=config) tokenizer = AutoTokenizer.from_pretrained(model_name) tokenized_sequence = tokenizer.encode(input_text, add_special_tokens=False) max_input_size = tokenizer.max_model_input_sizes[model_name] batch_sizes = [1, 2, 4, 8] slice_sizes = [8, 64, 128, 256, 512, 1024] dictionary[model_name] = {"bs": batch_sizes, "ss": slice_sizes, "results": {}} dictionary[model_name]["results"] = {i: {} for i in batch_sizes} for batch_size in batch_sizes: if fp16: model.half() model.to(device) model.eval() for slice_size in slice_sizes: if max_input_size is not None and slice_size > max_input_size: dictionary[model_name]["results"][batch_size][slice_size] = "N/A" else: sequence = torch.tensor(tokenized_sequence[:slice_size], device=device).repeat(batch_size, 1) try: if torchscript: print("Tracing model with sequence size", sequence.shape) inference = torch.jit.trace(model, sequence) inference(sequence) else: inference = model inference(sequence) print("Going through model with sequence of shape", sequence.shape) runtimes = timeit.repeat(lambda: inference(sequence), repeat=average_over, number=3) average_time = sum(runtimes)/float(len(runtimes)) / 3.0 dictionary[model_name]["results"][batch_size][slice_size] = average_time except RuntimeError as e: print("Doesn't fit on GPU.", e) torch.cuda.empty_cache() dictionary[model_name]["results"][batch_size][slice_size] = "N/A" return dictionary def _compute_tensorflow(model_names, dictionary, average_over, amp): for c, model_name in enumerate(model_names): print(f"{c + 1} / {len(model_names)}") config = AutoConfig.from_pretrained(model_name) model = TFAutoModel.from_pretrained(model_name, config=config) tokenizer = AutoTokenizer.from_pretrained(model_name) tokenized_sequence = tokenizer.encode(input_text, add_special_tokens=False) max_input_size = tokenizer.max_model_input_sizes[model_name] batch_sizes = [1, 2, 4, 8] slice_sizes = [8, 64, 128, 256, 512, 1024] dictionary[model_name] = {"bs": batch_sizes, "ss": slice_sizes, "results": {}} dictionary[model_name]["results"] = {i: {} for i in batch_sizes} print("Using model", model) @tf.function def inference(inputs): return model(inputs) for batch_size in batch_sizes: for slice_size in slice_sizes: if max_input_size is not None and slice_size > max_input_size: dictionary[model_name]["results"][batch_size][slice_size] = "N/A" else: sequence = tf.stack([tf.squeeze(tf.constant(tokenized_sequence[:slice_size])[None, :])] * batch_size) try: print("Going through model with sequence of shape", sequence.shape) # To make sure that the model is traced + that the tensors are on the appropriate device inference(sequence) runtimes = timeit.repeat(lambda: inference(sequence), repeat=average_over, number=3) average_time = sum(runtimes)/float(len(runtimes)) / 3.0 dictionary[model_name]["results"][batch_size][slice_size] = average_time except tf.errors.ResourceExhaustedError as e: print("Doesn't fit on GPU.", e) torch.cuda.empty_cache() dictionary[model_name]["results"][batch_size][slice_size] = "N/A" return dictionary def main(): parser = argparse.ArgumentParser() parser.add_argument("--models", required=False, type=str, default='all', help="Model checkpoints to be provided " "to the AutoModel classes. Leave " "blank to benchmark the base version " "of all available model " "architectures.") parser.add_argument("--torch", required=False, action="store_true", help="Benchmark the Pytorch version of the " "models") parser.add_argument("--torch_cuda", required=False, action="store_true", help="Pytorch only: run on available " "cuda devices") parser.add_argument("--torchscript", required=False, action="store_true", help="Pytorch only: trace the models " "using torchscript") parser.add_argument("--tensorflow", required=False, action="store_true", help="Benchmark the TensorFlow version " "of the models. Will run on GPU if " "the correct dependencies are " "installed") parser.add_argument("--xla", required=False, action="store_true", help="TensorFlow only: use XLA acceleration.") parser.add_argument("--amp", required=False, action="store_true", help="TensorFlow only: use automatic mixed precision acceleration.") parser.add_argument("--fp16", required=False, action="store_true", help="PyTorch only: use FP16 to accelerate inference.") parser.add_argument("--keras_predict", required=False, action="store_true", help="Whether to use model.predict " "instead of model() to do a " "forward pass.") parser.add_argument("--save_to_csv", required=False, action="store_true", help="Save to a CSV file.") parser.add_argument("--csv_filename", required=False, default=None, help="CSV filename used if saving results to csv.") parser.add_argument("--average_over", required=False, default=30, type=int, help="Times an experiment will be run.") args = parser.parse_args() if args.models == 'all': args.models = [ "gpt2", "bert-base-cased", "xlnet-base-cased", "xlm-mlm-en-2048", "transfo-xl-wt103", "openai-gpt", "distilbert-base-uncased", "distilgpt2", "roberta-base", "ctrl" ] else: args.models = args.models.split() print("Running with arguments", args) if args.torch: if is_torch_available(): create_setup_and_compute( model_names=args.models, tensorflow=False, gpu=args.torch_cuda, torchscript=args.torchscript, fp16=args.fp16, save_to_csv=args.save_to_csv, csv_filename=args.csv_filename, average_over=args.average_over ) else: raise ImportError("Trying to run a PyTorch benchmark but PyTorch was not found in the environment.") if args.tensorflow: if is_tf_available(): create_setup_and_compute( model_names=args.models, tensorflow=True, xla=args.xla, amp=args.amp, save_to_csv=args.save_to_csv, csv_filename=args.csv_filename, average_over=args.average_over ) else: raise ImportError("Trying to run a TensorFlow benchmark but TensorFlow was not found in the environment.") if __name__ == '__main__': main()
23,631
48.439331
138
py
DeeBERT
DeeBERT-master/examples/run_summarization_finetuning.py
# coding=utf-8 # Copyright 2019 The HuggingFace Inc. team. # Copyright (c) 2019 The HuggingFace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning seq2seq models for sequence generation.""" import argparse import functools import logging import os import random import sys import numpy as np from tqdm import tqdm, trange import torch from torch.optim import Adam from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from transformers import ( AutoTokenizer, BertForMaskedLM, BertConfig, PreTrainedEncoderDecoder, Model2Model, ) from utils_summarization import ( CNNDailyMailDataset, encode_for_summarization, fit_to_block_size, build_lm_labels, build_mask, compute_token_type_ids, ) logger = logging.getLogger(__name__) logging.basicConfig(stream=sys.stdout, level=logging.INFO) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) # ------------ # Load dataset # ------------ def load_and_cache_examples(args, tokenizer): dataset = CNNDailyMailDataset(tokenizer, data_dir=args.data_dir) return dataset def collate(data, tokenizer, block_size): """ List of tuple as an input. """ # remove the files with empty an story/summary, encode and fit to block data = filter(lambda x: not (len(x[0]) == 0 or len(x[1]) == 0), data) data = [ encode_for_summarization(story, summary, tokenizer) for story, summary in data ] data = [ ( fit_to_block_size(story, block_size, tokenizer.pad_token_id), fit_to_block_size(summary, block_size, tokenizer.pad_token_id), ) for story, summary in data ] stories = torch.tensor([story for story, summary in data]) summaries = torch.tensor([summary for story, summary in data]) encoder_token_type_ids = compute_token_type_ids(stories, tokenizer.cls_token_id) encoder_mask = build_mask(stories, tokenizer.pad_token_id) decoder_mask = build_mask(summaries, tokenizer.pad_token_id) lm_labels = build_lm_labels(summaries, tokenizer.pad_token_id) return ( stories, summaries, encoder_token_type_ids, encoder_mask, decoder_mask, lm_labels, ) # ---------- # Optimizers # ---------- class BertSumOptimizer(object): """ Specific optimizer for BertSum. As described in [1], the authors fine-tune BertSum for abstractive summarization using two Adam Optimizers with different warm-up steps and learning rate. They also use a custom learning rate scheduler. [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." arXiv preprint arXiv:1908.08345 (2019). """ def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8): self.encoder = model.encoder self.decoder = model.decoder self.lr = lr self.warmup_steps = warmup_steps self.optimizers = { "encoder": Adam( model.encoder.parameters(), lr=lr["encoder"], betas=(beta_1, beta_2), eps=eps, ), "decoder": Adam( model.decoder.parameters(), lr=lr["decoder"], betas=(beta_1, beta_2), eps=eps, ), } self._step = 0 def _update_rate(self, stack): return self.lr[stack] * min( self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-0.5) ) def zero_grad(self): self.optimizer_decoder.zero_grad() self.optimizer_encoder.zero_grad() def step(self): self._step += 1 for stack, optimizer in self.optimizers.items(): new_rate = self._update_rate(stack) for param_group in optimizer.param_groups: param_group["lr"] = new_rate optimizer.step() # ------------ # Train # ------------ def train(args, model, tokenizer): """ Fine-tune the pretrained model on the corpus. """ set_seed(args) # Load the data args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_dataset = load_and_cache_examples(args, tokenizer) train_sampler = RandomSampler(train_dataset) model_collate_fn = functools.partial(collate, tokenizer=tokenizer, block_size=512) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=model_collate_fn, ) # Training schedule if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = t_total // ( len(train_dataloader) // args.gradient_accumulation_steps + 1 ) else: t_total = ( len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs ) # Prepare the optimizer lr = {"encoder": 0.002, "decoder": 0.2} warmup_steps = {"encoder": 20000, "decoder": 10000} optimizer = BertSumOptimizer(model, lr, warmup_steps) # Train logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info( " Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size ) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps # * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) model.zero_grad() train_iterator = trange(args.num_train_epochs, desc="Epoch", disable=True) global_step = 0 tr_loss = 0.0 for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True) for step, batch in enumerate(epoch_iterator): source, target, encoder_token_type_ids, encoder_mask, decoder_mask, lm_labels = batch source = source.to(args.device) target = target.to(args.device) encoder_token_type_ids = encoder_token_type_ids.to(args.device) encoder_mask = encoder_mask.to(args.device) decoder_mask = decoder_mask.to(args.device) lm_labels = lm_labels.to(args.device) model.train() outputs = model( source, target, encoder_token_type_ids=encoder_token_type_ids, encoder_attention_mask=encoder_mask, decoder_attention_mask=decoder_mask, decoder_lm_labels=lm_labels, ) loss = outputs[0] print(loss) if args.gradient_accumulation_steps > 1: loss /= args.gradient_accumulation_steps loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() model.zero_grad() global_step += 1 if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return global_step, tr_loss / global_step # ------------ # Train # ------------ def evaluate(args, model, tokenizer, prefix=""): set_seed(args) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True) eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size ) # multi-gpu evaluate if args.n_gpu > 1: model = torch.nn.DataParallel(model) logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 model.eval() for batch in tqdm(eval_dataloader, desc="Evaluating"): source, target, encoder_token_type_ids, encoder_mask, decoder_mask, lm_labels = batch source = source.to(args.device) target = target.to(args.device) encoder_token_type_ids = encoder_token_type_ids.to(args.device) encoder_mask = encoder_mask.to(args.device) decoder_mask = decoder_mask.to(args.device) lm_labels = lm_labels.to(args.device) with torch.no_grad(): outputs = model( source, target, encoder_token_type_ids=encoder_token_type_ids, encoder_attention_mask=encoder_mask, decoder_attention_mask=decoder_mask, decoder_lm_labels=lm_labels, ) lm_loss = outputs[0] eval_loss += lm_loss.mean().item() nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps perplexity = torch.exp(torch.tensor(eval_loss)) result = {"perplexity": perplexity} # Save the evaluation's results output_eval_file = os.path.join(args.output_dir, "eval_results.txt") if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input training data file (a text file).", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) # Optional parameters parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--do_evaluate", type=bool, default=False, help="Run model evaluation on out-of-sample data.", ) parser.add_argument("--do_train", type=bool, default=False, help="Run training.") parser.add_argument( "--do_overwrite_output_dir", type=bool, default=False, help="Whether to overwrite the output dir.", ) parser.add_argument( "--model_name_or_path", default="bert-base-cased", type=str, help="The model checkpoint to initialize the encoder and decoder's weights with.", ) parser.add_argument( "--model_type", default="bert", type=str, help="The decoder architecture to be fine-tuned.", ) parser.add_argument( "--max_grad_norm", default=1.0, type=float, help="Max gradient norm." ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument( "--to_cpu", default=False, type=bool, help="Whether to force training on CPU." ) parser.add_argument( "--num_train_epochs", default=10, type=int, help="Total number of training epochs to perform.", ) parser.add_argument( "--per_gpu_train_batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.", ) parser.add_argument("--seed", default=42, type=int) args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.do_overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --do_overwrite_output_dir to overwrite.".format( args.output_dir ) ) # Set up training device if args.to_cpu or not torch.cuda.is_available(): args.device = torch.device("cpu") args.n_gpu = 0 else: args.device = torch.device("cuda") args.n_gpu = torch.cuda.device_count() # Load pretrained model and tokenizer. The decoder's weights are randomly initialized. tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) config = BertConfig.from_pretrained(args.model_name_or_path) decoder_model = BertForMaskedLM(config) model = Model2Model.from_pretrained( args.model_name_or_path, decoder_model=decoder_model ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", 0, args.device, args.n_gpu, False, False, ) logger.info("Training/evaluation parameters %s", args) # Train the model model.to(args.device) if args.do_train: global_step, tr_loss = train(args, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) torch.save(args, os.path.join(args.output_dir, "training_arguments.bin")) # Evaluate the model results = {} if args.do_evaluate: checkpoints = [] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: encoder_checkpoint = os.path.join(checkpoint, "encoder") decoder_checkpoint = os.path.join(checkpoint, "decoder") model = PreTrainedEncoderDecoder.from_pretrained( encoder_checkpoint, decoder_checkpoint ) model.to(args.device) results = "placeholder" return results if __name__ == "__main__": main()
15,727
30.902637
120
py
DeeBERT
DeeBERT-master/examples/run_bertology.py
#!/usr/bin/env python3 # Copyright 2018 CMU and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Bertology: this script shows how you can explore the internals of the models in the library to: - compute the entropy of the head attentions - compute the importance of each head - prune (remove) the low importance head. Some parts of this script are adapted from the code of Michel et al. (http://arxiv.org/abs/1905.10650) which is available at https://github.com/pmichel31415/are-16-heads-really-better-than-1 """ import os import argparse import logging from datetime import timedelta, datetime from tqdm import tqdm import numpy as np import torch from torch.utils.data import DataLoader, SequentialSampler, TensorDataset, Subset from torch.utils.data.distributed import DistributedSampler from torch.nn import CrossEntropyLoss, MSELoss from transformers import (WEIGHTS_NAME, BertConfig, BertForSequenceClassification, BertTokenizer, XLMConfig, XLMForSequenceClassification, XLMTokenizer, XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer) from run_glue import set_seed, load_and_cache_examples, ALL_MODELS, MODEL_CLASSES from transformers import glue_compute_metrics as compute_metrics from transformers import glue_output_modes as output_modes from transformers import glue_processors as processors logger = logging.getLogger(__name__) def entropy(p): """ Compute the entropy of a probability distribution """ plogp = p * torch.log(p) plogp[p == 0] = 0 return -plogp.sum(dim=-1) def print_2d_tensor(tensor): """ Print a 2D tensor """ logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor)))) for row in range(len(tensor)): if tensor.dtype != torch.long: logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data)) else: logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data)) def compute_heads_importance(args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None): """ This method shows how to compute: - head attention entropy - head importance scores according to http://arxiv.org/abs/1905.10650 """ # Prepare our tensors n_layers, n_heads = model.bert.config.num_hidden_layers, model.bert.config.num_attention_heads head_importance = torch.zeros(n_layers, n_heads).to(args.device) attn_entropy = torch.zeros(n_layers, n_heads).to(args.device) if head_mask is None: head_mask = torch.ones(n_layers, n_heads).to(args.device) head_mask.requires_grad_(requires_grad=True) preds = None labels = None tot_tokens = 0.0 for step, batch in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): batch = tuple(t.to(args.device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) outputs = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids, head_mask=head_mask) loss, logits, all_attentions = outputs[0], outputs[1], outputs[-1] # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask if compute_entropy: for layer, attn in enumerate(all_attentions): masked_entropy = entropy(attn.detach()) * input_mask.float().unsqueeze(1) attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() # Also store our logits/labels if we want to compute metrics afterwards if preds is None: preds = logits.detach().cpu().numpy() labels = label_ids.detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) labels = np.append(labels, label_ids.detach().cpu().numpy(), axis=0) tot_tokens += input_mask.float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: exponent = 2 norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1/exponent) head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20 if not args.dont_normalize_global_importance: head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print/save matrices np.save(os.path.join(args.output_dir, 'attn_entropy.npy'), attn_entropy.detach().cpu().numpy()) np.save(os.path.join(args.output_dir, 'head_importance.npy'), head_importance.detach().cpu().numpy()) logger.info("Attention entropies") print_2d_tensor(attn_entropy) logger.info("Head importance scores") print_2d_tensor(head_importance) logger.info("Head ranked by importance scores") head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device) head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange(head_importance.numel(), device=args.device) head_ranks = head_ranks.view_as(head_importance) print_2d_tensor(head_ranks) return attn_entropy, head_importance, preds, labels def mask_heads(args, model, eval_dataloader): """ This method shows how to mask head (set some heads to zero), to test the effect on the network, based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650) """ _, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) original_score = compute_metrics(args.task_name, preds, labels)[args.metric_name] logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold) new_head_mask = torch.ones_like(head_importance) num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount)) current_score = original_score while current_score >= original_score * args.masking_threshold: head_mask = new_head_mask.clone() # save current head mask # heads from least important to most - keep only not-masked heads head_importance[head_mask == 0.0] = float('Inf') current_heads_to_mask = head_importance.view(-1).sort()[1] if len(current_heads_to_mask) <= num_to_mask: break # mask heads current_heads_to_mask = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist())) new_head_mask = new_head_mask.view(-1) new_head_mask[current_heads_to_mask] = 0.0 new_head_mask = new_head_mask.view_as(head_mask) print_2d_tensor(new_head_mask) # Compute metric and head importance again _, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) current_score = compute_metrics(args.task_name, preds, labels)[args.metric_name] logger.info("Masking: current score: %f, remaning heads %d (%.1f percents)", current_score, new_head_mask.sum(), new_head_mask.sum()/new_head_mask.numel() * 100) logger.info("Final head mask") print_2d_tensor(head_mask) np.save(os.path.join(args.output_dir, 'head_mask.npy'), head_mask.detach().cpu().numpy()) return head_mask def prune_heads(args, model, eval_dataloader, head_mask): """ This method shows how to prune head (remove heads weights) based on the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650) """ # Try pruning and test time speedup # Pruning is like masking but we actually remove the masked weights before_time = datetime.now() _, _, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) score_masking = compute_metrics(args.task_name, preds, labels)[args.metric_name] original_time = datetime.now() - before_time original_num_params = sum(p.numel() for p in model.parameters()) heads_to_prune = dict((layer, (1 - head_mask[layer].long()).nonzero().tolist()) for layer in range(len(head_mask))) assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item() model.prune_heads(heads_to_prune) pruned_num_params = sum(p.numel() for p in model.parameters()) before_time = datetime.now() _, _, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=None) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) score_pruning = compute_metrics(args.task_name, preds, labels)[args.metric_name] new_time = datetime.now() - before_time logger.info("Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)", original_num_params, pruned_num_params, pruned_num_params/original_num_params * 100) logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning) logger.info("Pruning: speed ratio (new timing / original timing): %f percents", original_time/new_time * 100) def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join( ALL_MODELS)) parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name_or_path") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name_or_path") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--data_subset", type=int, default=-1, help="If > 0: limit the data to a subset of data_subset instances.") parser.add_argument("--overwrite_output_dir", action='store_true', help="Whether to overwrite data in output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument("--dont_normalize_importance_by_layer", action='store_true', help="Don't normalize importance score by layers") parser.add_argument("--dont_normalize_global_importance", action='store_true', help="Don't normalize all importance scores between 0 and 1") parser.add_argument("--try_masking", action='store_true', help="Whether to try to mask head until a threshold of accuracy.") parser.add_argument("--masking_threshold", default=0.9, type=float, help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).") parser.add_argument("--masking_amount", default=0.1, type=float, help="Amount to heads to masking at each masking step.") parser.add_argument("--metric_name", default="acc", type=str, help="Metric to use for head masking.") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded.") parser.add_argument("--batch_size", default=1, type=int, help="Batch size.") parser.add_argument("--seed", type=int, default=42) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) args.device = torch.device("cuda", args.local_rank) args.n_gpu = 1 torch.distributed.init_process_group(backend='nccl') # Initializes the distributed backend # Setup logging logging.basicConfig(level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, args.n_gpu, bool(args.local_rank != -1))) # Set seeds set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = "" for key in MODEL_CLASSES: if key in args.model_name_or_path.lower(): args.model_type = key # take the first match in model types break config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, output_attentions=True, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab # Distributed and parallel training model.to(args.device) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) elif args.n_gpu > 1: model = torch.nn.DataParallel(model) # Print/save training arguments torch.save(args, os.path.join(args.output_dir, 'run_args.bin')) logger.info("Training/evaluation parameters %s", args) # Prepare dataset for the GLUE task eval_data = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=True) if args.data_subset > 0: eval_data = Subset(eval_data, list(range(min(args.data_subset, len(eval_data))))) eval_sampler = SequentialSampler(eval_data) if args.local_rank == -1 else DistributedSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size) # Compute head entropy and importance score compute_heads_importance(args, model, eval_dataloader) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: head_mask = mask_heads(args, model, eval_dataloader) prune_heads(args, model, eval_dataloader, head_mask) if __name__ == '__main__': main()
18,901
51.798883
177
py
DeeBERT
DeeBERT-master/examples/utils_summarization_test.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from utils_summarization import ( compute_token_type_ids, fit_to_block_size, build_mask, build_lm_labels, process_story, ) class SummarizationDataProcessingTest(unittest.TestCase): def setUp(self): self.block_size = 10 def test_fit_to_block_sequence_too_small(self): """ Pad the sequence with 0 if the sequence is smaller than the block size.""" sequence = [1, 2, 3, 4] expected_output = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual( fit_to_block_size(sequence, self.block_size, 0), expected_output ) def test_fit_to_block_sequence_fit_exactly(self): """ Do nothing if the sequence is the right size. """ sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual( fit_to_block_size(sequence, self.block_size, 0), expected_output ) def test_fit_to_block_sequence_too_big(self): """ Truncate the sequence if it is too long. """ sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual( fit_to_block_size(sequence, self.block_size, 0), expected_output ) def test_process_story_no_highlights(self): """ Processing a story with no highlights returns an empty list for the summary. """ raw_story = """It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.""" _, summary_lines = process_story(raw_story) self.assertEqual(summary_lines, []) def test_process_empty_story(self): """ An empty story returns an empty collection of lines. """ raw_story = "" story_lines, summary_lines = process_story(raw_story) self.assertEqual(story_lines, []) self.assertEqual(summary_lines, []) def test_process_story_with_missing_period(self): raw_story = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) story_lines, summary_lines = process_story(raw_story) expected_story_lines = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(expected_story_lines, story_lines) expected_summary_lines = ["It was the best of times."] self.assertEqual(expected_summary_lines, summary_lines) def test_build_lm_labels_no_padding(self): sequence = torch.tensor([1, 2, 3, 4]) expected = sequence np.testing.assert_array_equal( build_lm_labels(sequence, 0).numpy(), expected.numpy() ) def test_build_lm_labels(self): sequence = torch.tensor([1, 2, 3, 4, 0, 0, 0]) expected = torch.tensor([1, 2, 3, 4, -1, -1, -1]) np.testing.assert_array_equal( build_lm_labels(sequence, 0).numpy(), expected.numpy() ) def test_build_mask_no_padding(self): sequence = torch.tensor([1, 2, 3, 4]) expected = torch.tensor([1, 1, 1, 1]) np.testing.assert_array_equal(build_mask(sequence, 0).numpy(), expected.numpy()) def test_build_mask(self): sequence = torch.tensor([1, 2, 3, 4, 23, 23, 23]) expected = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal( build_mask(sequence, 23).numpy(), expected.numpy() ) def test_build_mask_with_padding_equal_to_one(self): sequence = torch.tensor([8, 2, 3, 4, 1, 1, 1]) expected = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(sequence, 1).numpy(), expected.numpy()) def test_compute_token_type_ids(self): separator = 101 batch = torch.tensor( [[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) expected = torch.tensor( [[0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1], [0, 1, 1, 1, 0, 0]] ) result = compute_token_type_ids(batch, separator) np.testing.assert_array_equal(result, expected) if __name__ == "__main__": unittest.main()
5,178
36.80292
98
py
DeeBERT
DeeBERT-master/examples/run_generation.py
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet) """ from __future__ import absolute_import, division, print_function, unicode_literals import argparse import logging from tqdm import trange import torch import torch.nn.functional as F import numpy as np from transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, XLMConfig, CTRLConfig from transformers import GPT2LMHeadModel, GPT2Tokenizer from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer from transformers import XLNetLMHeadModel, XLNetTokenizer from transformers import TransfoXLLMHeadModel, TransfoXLTokenizer from transformers import CTRLLMHeadModel, CTRLTokenizer from transformers import XLMWithLMHeadModel, XLMTokenizer logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, XLMConfig, CTRLConfig)), ()) MODEL_CLASSES = { 'gpt2': (GPT2LMHeadModel, GPT2Tokenizer), 'ctrl': (CTRLLMHeadModel, CTRLTokenizer), 'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), 'xlnet': (XLNetLMHeadModel, XLNetTokenizer), 'transfo-xl': (TransfoXLLMHeadModel, TransfoXLTokenizer), 'xlm': (XLMWithLMHeadModel, XLMTokenizer), } # Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia # in https://github.com/rusiaaman/XLNet-gen#methodology # and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos>""" def set_seed(args): np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size x vocabulary size) top_k > 0: keep only top k tokens with highest probability (top-k filtering). top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ top_k = min(top_k, logits.size(-1)) # Safety check if top_k > 0: # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if top_p > 0.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probs > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove) logits[indices_to_remove] = filter_value return logits def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, repetition_penalty=1.0, is_xlnet=False, is_xlm_mlm=False, xlm_mask_token=None, xlm_lang=None, device='cpu'): context = torch.tensor(context, dtype=torch.long, device=device) context = context.unsqueeze(0).repeat(num_samples, 1) generated = context with torch.no_grad(): for _ in trange(length): inputs = {'input_ids': generated} if is_xlnet: # XLNet is a direct (predict same token, not next token) and bi-directional model by default # => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring) input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1) perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device) perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device) target_mapping[0, 0, -1] = 1.0 # predict last token inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping} if is_xlm_mlm and xlm_mask_token: # XLM MLM models are direct models (predict same token, not next token) # => need one additional dummy token in the input (will be masked and guessed) input_ids = torch.cat((generated, torch.full((1, 1), xlm_mask_token, dtype=torch.long, device=device)), dim=1) inputs = {'input_ids': input_ids} if xlm_lang is not None: inputs["langs"] = torch.tensor([xlm_lang] * inputs["input_ids"].shape[1], device=device).view(1, -1) outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet/CTRL (cached hidden-states) next_token_logits = outputs[0][:, -1, :] / (temperature if temperature > 0 else 1.) # repetition penalty from CTRL (https://arxiv.org/abs/1909.05858) for i in range(num_samples): for _ in set(generated[i].tolist()): next_token_logits[i, _] /= repetition_penalty filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) if temperature == 0: # greedy sampling: next_token = torch.argmax(filtered_logits, dim=-1).unsqueeze(-1) else: next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1) generated = torch.cat((generated, next_token), dim=1) return generated def main(): parser = argparse.ArgumentParser() parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--prompt", type=str, default="") parser.add_argument("--padding_text", type=str, default="") parser.add_argument("--xlm_lang", type=str, default="", help="Optional language when used with the XLM model.") parser.add_argument("--length", type=int, default=20) parser.add_argument("--num_samples", type=int, default=1) parser.add_argument("--temperature", type=float, default=1.0, help="temperature of 0 implies greedy sampling") parser.add_argument("--repetition_penalty", type=float, default=1.0, help="primarily useful for CTRL model; in that case, use 1.2") parser.add_argument("--top_k", type=int, default=0) parser.add_argument("--top_p", type=float, default=0.9) parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--stop_token', type=str, default=None, help="Token at which text generation is stopped") args = parser.parse_args() args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() set_seed(args) args.model_type = args.model_type.lower() model_class, tokenizer_class = MODEL_CLASSES[args.model_type] tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path) model = model_class.from_pretrained(args.model_name_or_path) model.to(args.device) model.eval() if args.length < 0 and model.config.max_position_embeddings > 0: args.length = model.config.max_position_embeddings elif 0 < model.config.max_position_embeddings < args.length: args.length = model.config.max_position_embeddings # No generation bigger than model size elif args.length < 0: args.length = MAX_LENGTH # avoid infinite loop logger.info(args) if args.model_type in ["ctrl"]: if args.temperature > 0.7: logger.info('CTRL typically works better with lower temperatures (and lower top_k).') while True: xlm_lang = None # XLM Language usage detailed in the issues #1414 if args.model_type in ["xlm"] and hasattr(tokenizer, 'lang2id') and hasattr(model.config, 'use_lang_emb') \ and model.config.use_lang_emb: if args.xlm_lang: language = args.xlm_lang else: language = None while language not in tokenizer.lang2id.keys(): language = input("Using XLM. Select language in " + str(list(tokenizer.lang2id.keys())) + " >>> ") xlm_lang = tokenizer.lang2id[language] # XLM masked-language modeling (MLM) models need masked token (see details in sample_sequence) is_xlm_mlm = args.model_type in ["xlm"] and 'mlm' in args.model_name_or_path if is_xlm_mlm: xlm_mask_token = tokenizer.mask_token_id else: xlm_mask_token = None raw_text = args.prompt if args.prompt else input("Model prompt >>> ") if args.model_type in ["transfo-xl", "xlnet"]: # Models with memory likes to have a long prompt for short inputs. raw_text = (args.padding_text if args.padding_text else PADDING_TEXT) + raw_text context_tokens = tokenizer.encode(raw_text, add_special_tokens=False) if args.model_type == "ctrl": if not any(context_tokens[0] == x for x in tokenizer.control_codes.values()): logger.info("WARNING! You are not starting your generation from a control code so you won't get good results") out = sample_sequence( model=model, context=context_tokens, num_samples=args.num_samples, length=args.length, temperature=args.temperature, top_k=args.top_k, top_p=args.top_p, repetition_penalty=args.repetition_penalty, is_xlnet=bool(args.model_type == "xlnet"), is_xlm_mlm=is_xlm_mlm, xlm_mask_token=xlm_mask_token, xlm_lang=xlm_lang, device=args.device, ) out = out[:, len(context_tokens):].tolist() for o in out: text = tokenizer.decode(o, clean_up_tokenization_spaces=True) text = text[: text.find(args.stop_token) if args.stop_token else None] print(text) if args.prompt: break return text if __name__ == '__main__': main()
13,112
49.241379
167
py
DeeBERT
DeeBERT-master/examples/run_ner.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert or Roberta). """ from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import numpy as np import torch from seqeval.metrics import precision_score, recall_score, f1_score from tensorboardX import SummaryWriter from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file from transformers import AdamW, get_linear_schedule_with_warmup from transformers import WEIGHTS_NAME, BertConfig, BertForTokenClassification, BertTokenizer from transformers import RobertaConfig, RobertaForTokenClassification, RobertaTokenizer from transformers import DistilBertConfig, DistilBertForTokenClassification, DistilBertTokenizer from transformers import CamembertConfig, CamembertForTokenClassification, CamembertTokenizer logger = logging.getLogger(__name__) ALL_MODELS = sum( (tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, RobertaConfig, DistilBertConfig)), ()) MODEL_CLASSES = { "bert": (BertConfig, BertForTokenClassification, BertTokenizer), "roberta": (RobertaConfig, RobertaForTokenClassification, RobertaTokenizer), "distilbert": (DistilBertConfig, DistilBertForTokenClassification, DistilBertTokenizer), "camembert": (CamembertConfig, CamembertForTokenClassification, CamembertTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay}, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * ( torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"]: batch[2] if args.model_type in ["bert", "xlnet"] else None # XLM and RoBERTa don"t use segment_ids outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scheduler.step() # Update learning rate schedule optimizer.step() model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev") for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""): eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation %s *****", prefix) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None model.eval() for batch in tqdm(eval_dataloader, desc="Evaluating"): batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"]: batch[2] if args.model_type in ["bert", "xlnet"] else None # XLM and RoBERTa don"t use segment_ids outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] if args.n_gpu > 1: tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating eval_loss += tmp_eval_loss.item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps preds = np.argmax(preds, axis=2) label_map = {i: label for i, label in enumerate(labels)} out_label_list = [[] for _ in range(out_label_ids.shape[0])] preds_list = [[] for _ in range(out_label_ids.shape[0])] for i in range(out_label_ids.shape[0]): for j in range(out_label_ids.shape[1]): if out_label_ids[i, j] != pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]]) preds_list[i].append(label_map[preds[i][j]]) results = { "loss": eval_loss, "precision": precision_score(out_label_list, preds_list), "recall": recall_score(out_label_list, preds_list), "f1": f1_score(out_label_list, preds_list) } logger.info("***** Eval results %s *****", prefix) for key in sorted(results.keys()): logger.info(" %s = %s", key, str(results[key])) return results, preds_list def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}".format(mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length))) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) examples = read_examples_from_file(args.data_dir, mode) features = convert_examples_to_features(examples, labels, args.max_seq_length, tokenizer, cls_token_at_end=bool(args.model_type in ["xlnet"]), # xlnet has a cls token at the end cls_token=tokenizer.cls_token, cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=bool(args.model_type in ["roberta"]), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0, pad_token_label_id=pad_token_label_id ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--labels", default="", type=str, help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.") parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument("--evaluate_during_training", action="store_true", help="Whether to run evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument("--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory") parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets") parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument("--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir( args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Prepare CONLL-2003 task labels = get_labels(args.labels) num_labels = len(labels) # Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later pad_token_label_id = CrossEntropyLoss().ignore_index # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train") global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))) logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step) if global_step: result = {"{}_{}".format(global_step, k): v for k, v in result.items()} results.update(result) output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: for key in sorted(results.keys()): writer.write("{} = {}\n".format(key, str(results[key]))) if args.do_predict and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model = model_class.from_pretrained(args.output_dir) model.to(args.device) result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test") # Save results output_test_results_file = os.path.join(args.output_dir, "test_results.txt") with open(output_test_results_file, "w") as writer: for key in sorted(result.keys()): writer.write("{} = {}\n".format(key, str(result[key]))) # Save predictions output_test_predictions_file = os.path.join(args.output_dir, "test_predictions.txt") with open(output_test_predictions_file, "w") as writer: with open(os.path.join(args.data_dir, "test.txt"), "r") as f: example_id = 0 for line in f: if line.startswith("-DOCSTART-") or line == "" or line == "\n": writer.write(line) if not predictions[example_id]: example_id += 1 elif predictions[example_id]: output_line = line.split()[0] + " " + predictions[example_id].pop(0) + "\n" writer.write(output_line) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0]) return results if __name__ == "__main__": main()
28,786
53.009381
184
py
DeeBERT
DeeBERT-master/examples/utils_summarization.py
from collections import deque import os import torch from torch.utils.data import Dataset # ------------ # Data loading # ------------ class CNNDailyMailDataset(Dataset): """ Abstracts the dataset used to train seq2seq models. CNN/Daily News: The CNN/Daily News raw datasets are downloaded from [1]. The stories are stored in different files; the summary appears at the end of the story as sentences that are prefixed by the special `@highlight` line. To process the data, untar both datasets in the same folder, and pass the path to this folder as the "data_dir argument. The formatting code was inspired by [2]. [1] https://cs.nyu.edu/~kcho/ [2] https://github.com/abisee/cnn-dailymail/ """ def __init__(self, tokenizer, prefix="train", data_dir=""): assert os.path.isdir(data_dir) self.tokenizer = tokenizer # We initialize the class by listing all the files that contain # stories and summaries. Files are not read in memory given # the size of the corpus. self.stories_path = [] datasets = ("cnn", "dailymail") for dataset in datasets: path_to_stories = os.path.join(data_dir, dataset, "stories") story_filenames_list = os.listdir(path_to_stories) for story_filename in story_filenames_list: path_to_story = os.path.join(path_to_stories, story_filename) if not os.path.isfile(path_to_story): continue self.stories_path.append(path_to_story) def __len__(self): return len(self.stories_path) def __getitem__(self, idx): story_path = self.stories_path[idx] with open(story_path, encoding="utf-8") as source: raw_story = source.read() story_lines, summary_lines = process_story(raw_story) return story_lines, summary_lines def process_story(raw_story): """ Extract the story and summary from a story file. Attributes: raw_story (str): content of the story file as an utf-8 encoded string. Raises: IndexError: If the stoy is empty or contains no highlights. """ nonempty_lines = list( filter(lambda x: len(x) != 0, [line.strip() for line in raw_story.split("\n")]) ) # for some unknown reason some lines miss a period, add it nonempty_lines = [_add_missing_period(line) for line in nonempty_lines] # gather article lines story_lines = [] lines = deque(nonempty_lines) while True: try: element = lines.popleft() if element.startswith("@highlight"): break story_lines.append(element) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None. return story_lines, [] # gather summary lines summary_lines = list(filter(lambda t: not t.startswith("@highlight"), lines)) return story_lines, summary_lines def _add_missing_period(line): END_TOKENS = [".", "!", "?", "...", "'", "`", '"', u"\u2019", u"\u2019", ")"] if line.startswith("@highlight"): return line if line[-1] in END_TOKENS: return line return line + "." # -------------------------- # Encoding and preprocessing # -------------------------- def fit_to_block_size(sequence, block_size, pad_token): """ Adapt the source and target sequences' lengths to the block size. If the sequence is shorter than the block size we pad it with -1 ids which correspond to padding tokens. """ if len(sequence) > block_size: return sequence[:block_size] else: sequence.extend([pad_token] * (block_size - len(sequence))) return sequence def build_lm_labels(sequence, pad_token): """ Padding token, encoded as 0, are represented by the value -1 so they are not taken into account in the loss computation. """ padded = sequence.clone() padded[padded == pad_token] = -1 return padded def build_mask(sequence, pad_token): """ Builds the mask. The attention mechanism will only attend to positions with value 1. """ mask = torch.ones_like(sequence) idx_pad_tokens = sequence == pad_token mask[idx_pad_tokens] = 0 return mask def encode_for_summarization(story_lines, summary_lines, tokenizer): """ Encode the story and summary lines, and join them as specified in [1] by using `[SEP] [CLS]` tokens to separate sentences. """ story_lines_token_ids = [ tokenizer.add_special_tokens_single_sequence(tokenizer.encode(line)) for line in story_lines ] summary_lines_token_ids = [ tokenizer.add_special_tokens_single_sequence(tokenizer.encode(line)) for line in summary_lines ] story_token_ids = [ token for sentence in story_lines_token_ids for token in sentence ] summary_token_ids = [ token for sentence in summary_lines_token_ids for token in sentence ] return story_token_ids, summary_token_ids def compute_token_type_ids(batch, separator_token_id): """ Segment embeddings as described in [1] The values {0,1} were found in the repository [2]. Attributes: batch: torch.Tensor, size [batch_size, block_size] Batch of input. separator_token_id: int The value of the token that separates the segments. [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." arXiv preprint arXiv:1908.08345 (2019). [2] https://github.com/nlpyang/PreSumm (/src/prepro/data_builder.py, commit fac1217) """ batch_embeddings = [] for sequence in batch: sentence_num = 0 embeddings = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2) batch_embeddings.append(embeddings) return torch.tensor(batch_embeddings)
6,022
31.556757
88
py
DeeBERT
DeeBERT-master/examples/run_tf_glue.py
import os import tensorflow as tf import tensorflow_datasets from transformers import BertTokenizer, TFBertForSequenceClassification, BertConfig, glue_convert_examples_to_features, BertForSequenceClassification, glue_processors # script parameters BATCH_SIZE = 32 EVAL_BATCH_SIZE = BATCH_SIZE * 2 USE_XLA = False USE_AMP = False EPOCHS = 3 TASK = "mrpc" if TASK == "sst-2": TFDS_TASK = "sst2" elif TASK == "sts-b": TFDS_TASK = "stsb" else: TFDS_TASK = TASK num_labels = len(glue_processors[TASK]().get_labels()) print(num_labels) tf.config.optimizer.set_jit(USE_XLA) tf.config.optimizer.set_experimental_options({"auto_mixed_precision": USE_AMP}) # Load tokenizer and model from pretrained model/vocabulary. Specify the number of labels to classify (2+: classification, 1: regression) config = BertConfig.from_pretrained("bert-base-cased", num_labels=num_labels) tokenizer = BertTokenizer.from_pretrained('bert-base-cased') model = TFBertForSequenceClassification.from_pretrained('bert-base-cased', config=config) # Load dataset via TensorFlow Datasets data, info = tensorflow_datasets.load(f'glue/{TFDS_TASK}', with_info=True) train_examples = info.splits['train'].num_examples # MNLI expects either validation_matched or validation_mismatched valid_examples = info.splits['validation'].num_examples # Prepare dataset for GLUE as a tf.data.Dataset instance train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, 128, TASK) # MNLI expects either validation_matched or validation_mismatched valid_dataset = glue_convert_examples_to_features(data['validation'], tokenizer, 128, TASK) train_dataset = train_dataset.shuffle(128).batch(BATCH_SIZE).repeat(-1) valid_dataset = valid_dataset.batch(EVAL_BATCH_SIZE) # Prepare training: Compile tf.keras model with optimizer, loss and learning rate schedule opt = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08) if USE_AMP: # loss scaling is currently required when using mixed precision opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(opt, 'dynamic') if num_labels == 1: loss = tf.keras.losses.MeanSquaredError() else: loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy') model.compile(optimizer=opt, loss=loss, metrics=[metric]) # Train and evaluate using tf.keras.Model.fit() train_steps = train_examples//BATCH_SIZE valid_steps = valid_examples//EVAL_BATCH_SIZE history = model.fit(train_dataset, epochs=EPOCHS, steps_per_epoch=train_steps, validation_data=valid_dataset, validation_steps=valid_steps) # Save TF2 model os.makedirs('./save/', exist_ok=True) model.save_pretrained('./save/') if TASK == "mrpc": # Load the TensorFlow model in PyTorch for inspection # This is to demo the interoperability between the two frameworks, you don't have to # do this in real life (you can run the inference on the TF model). pytorch_model = BertForSequenceClassification.from_pretrained('./save/', from_tf=True) # Quickly test a few predictions - MRPC is a paraphrasing task, let's see if our model learned the task sentence_0 = 'This research was consistent with his findings.' sentence_1 = 'His findings were compatible with this research.' sentence_2 = 'His findings were not compatible with this research.' inputs_1 = tokenizer.encode_plus(sentence_0, sentence_1, add_special_tokens=True, return_tensors='pt') inputs_2 = tokenizer.encode_plus(sentence_0, sentence_2, add_special_tokens=True, return_tensors='pt') del inputs_1["special_tokens_mask"] del inputs_2["special_tokens_mask"] pred_1 = pytorch_model(**inputs_1)[0].argmax().item() pred_2 = pytorch_model(**inputs_2)[0].argmax().item() print('sentence_1 is', 'a paraphrase' if pred_1 else 'not a paraphrase', 'of sentence_0') print('sentence_2 is', 'a paraphrase' if pred_2 else 'not a paraphrase', 'of sentence_0')
3,978
41.329787
166
py
DeeBERT
DeeBERT-master/examples/test_examples.py
# coding=utf-8 # Copyright 2018 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import unittest import argparse import logging try: # python 3.4+ can use builtin unittest.mock instead of mock package from unittest.mock import patch except ImportError: from mock import patch import run_glue import run_squad import run_generation logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() def get_setup_file(): parser = argparse.ArgumentParser() parser.add_argument('-f') args = parser.parse_args() return args.f class ExamplesTests(unittest.TestCase): def test_run_glue(self): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) testargs = ["run_glue.py", "--data_dir=./examples/tests_samples/MRPC/", "--task_name=mrpc", "--do_train", "--do_eval", "--output_dir=./examples/tests_samples/temp_dir", "--per_gpu_train_batch_size=2", "--per_gpu_eval_batch_size=1", "--learning_rate=1e-4", "--max_steps=10", "--warmup_steps=2", "--overwrite_output_dir", "--seed=42"] model_type, model_name = ("--model_type=bert", "--model_name_or_path=bert-base-uncased") with patch.object(sys, 'argv', testargs + [model_type, model_name]): result = run_glue.main() for value in result.values(): self.assertGreaterEqual(value, 0.75) def test_run_squad(self): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) testargs = ["run_squad.py", "--train_file=./examples/tests_samples/SQUAD/dev-v2.0-small.json", "--predict_file=./examples/tests_samples/SQUAD/dev-v2.0-small.json", "--model_name=bert-base-uncased", "--output_dir=./examples/tests_samples/temp_dir", "--max_steps=10", "--warmup_steps=2", "--do_train", "--do_eval", "--version_2_with_negative", "--learning_rate=2e-4", "--per_gpu_train_batch_size=2", "--per_gpu_eval_batch_size=1", "--overwrite_output_dir", "--seed=42"] model_type, model_name = ("--model_type=bert", "--model_name_or_path=bert-base-uncased") with patch.object(sys, 'argv', testargs + [model_type, model_name]): result = run_squad.main() self.assertGreaterEqual(result['f1'], 30) self.assertGreaterEqual(result['exact'], 30) def test_generation(self): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) testargs = ["run_generation.py", "--prompt=Hello", "--length=10", "--seed=42"] model_type, model_name = ("--model_type=openai-gpt", "--model_name_or_path=openai-gpt") with patch.object(sys, 'argv', testargs + [model_type, model_name]): result = run_generation.main() self.assertGreaterEqual(len(result), 10) if __name__ == "__main__": unittest.main()
4,166
36.205357
88
py
DeeBERT
DeeBERT-master/examples/__init__.py
0
0
0
py
DeeBERT
DeeBERT-master/examples/run_multiple_choice.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for multiple choice (Bert, Roberta, XLNet).""" from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForMultipleChoice, BertTokenizer, XLNetConfig, XLNetForMultipleChoice, XLNetTokenizer, RobertaConfig, RobertaForMultipleChoice, RobertaTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup from utils_multiple_choice import (convert_examples_to_features, processors) logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, RobertaConfig)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForMultipleChoice, BertTokenizer), 'xlnet': (XLNetConfig, XLNetForMultipleChoice, XLNetTokenizer), 'roberta': (RobertaConfig, RobertaForMultipleChoice, RobertaTokenizer) } def select_field(features, field): return [ [ choice[field] for choice in feature.choices_features ] for feature in features ] def simple_accuracy(preds, labels): return (preds == labels).mean() def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 best_dev_acc, best_dev_loss = 0.0, 99999999999.0 best_steps = 0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids 'labels': batch[3]} outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) if results["eval_acc"] > best_dev_acc: best_dev_acc = results["eval_acc"] best_dev_loss = results["eval_loss"] best_steps = global_step if args.do_test: results_test = evaluate(args, model, tokenizer, test=True) for key, value in results_test.items(): tb_writer.add_scalar('test_{}'.format(key), value, global_step) logger.info("test acc: %s, loss: %s, global steps: %s", str(results_test['eval_acc']), str(results_test['eval_loss']), str(global_step)) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logger.info("Average loss: %s at global step: %s", str((tr_loss - logging_loss)/args.logging_steps), str(global_step)) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_vocabulary(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step, best_steps def evaluate(args, model, tokenizer, prefix="", test=False): eval_task_names = (args.task_name,) eval_outputs_dirs = (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=not test, test=test) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids 'labels': batch[3]} outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps preds = np.argmax(preds, axis=1) acc = simple_accuracy(preds, out_label_ids) result = {"eval_acc": acc, "eval_loss": eval_loss} results.update(result) output_eval_file = os.path.join(eval_output_dir, "is_test_" + str(test).lower() + "_eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(str(prefix) + " is test:" + str(test))) writer.write("model =%s\n" % str(args.model_name_or_path)) writer.write("total batch size=%d\n" % (args.per_gpu_train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))) writer.write("train num epochs=%d\n" % args.num_train_epochs) writer.write("fp16 =%s\n" % args.fp16) writer.write("max seq length =%d\n" % args.max_seq_length) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False, test=False): if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() # Load data features from cache or dataset file if evaluate: cached_mode = 'dev' elif test: cached_mode = 'test' else: cached_mode = 'train' assert (evaluate == True and test == True) == False cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format( cached_mode, list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(task))) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if evaluate: examples = processor.get_dev_examples(args.data_dir) elif test: examples = processor.get_test_examples(args.data_dir) else: examples = processor.get_train_examples(args.data_dir) logger.info("Training number: %s", str(len(examples))) features = convert_examples_to_features( examples, label_list, args.max_seq_length, tokenizer, pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0 ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor(select_field(features, 'input_ids'), dtype=torch.long) all_input_mask = torch.tensor(select_field(features, 'input_mask'), dtype=torch.long) all_segment_ids = torch.tensor(select_field(features, 'segment_ids'), dtype=torch.long) all_label_ids = torch.tensor([f.label for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_test", action='store_true', help='Whether to run test on the test set') parser.add_argument("--evaluate_during_training", action='store_true', help="Run evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) best_steps = 0 # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss, best_steps = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: if not args.do_train: args.output_dir = args.model_name_or_path checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) results.update(result) if args.do_test and args.local_rank in [-1, 0]: if not args.do_train: args.output_dir = args.model_name_or_path checkpoints = [args.output_dir] # if args.eval_all_checkpoints: # can not use this to do test!! # checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) # logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix, test=True) result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) results.update(result) if best_steps: logger.info("best steps of eval acc is the following checkpoints: %s", best_steps) return results if __name__ == "__main__": main()
29,296
50.945035
168
py
DeeBERT
DeeBERT-master/examples/utils_ner.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """ from __future__ import absolute_import, division, print_function import logging import os from io import open logger = logging.getLogger(__name__) class InputExample(object): """A single training/test example for token classification.""" def __init__(self, guid, words, labels): """Constructs a InputExample. Args: guid: Unique id for the example. words: list. The words of the sequence. labels: (Optional) list. The labels for each word of the sequence. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.words = words self.labels = labels class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_ids): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_ids = label_ids def read_examples_from_file(data_dir, mode): file_path = os.path.join(data_dir, "{}.txt".format(mode)) guid_index = 1 examples = [] with open(file_path, encoding="utf-8") as f: words = [] labels = [] for line in f: if line.startswith("-DOCSTART-") or line == "" or line == "\n": if words: examples.append(InputExample(guid="{}-{}".format(mode, guid_index), words=words, labels=labels)) guid_index += 1 words = [] labels = [] else: splits = line.split(" ") words.append(splits[0]) if len(splits) > 1: labels.append(splits[-1].replace("\n", "")) else: # Examples could have no label for mode = "test" labels.append("O") if words: examples.append(InputExample(guid="%s-%d".format(mode, guid_index), words=words, labels=labels)) return examples def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, pad_token_label_id=-1, sequence_a_segment_id=0, mask_padding_with_zero=True): """ Loads a data file into a list of `InputBatch`s `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d", ex_index, len(examples)) tokens = [] label_ids = [] for word, label in zip(example.words, example.labels): word_tokens = tokenizer.tokenize(word) tokens.extend(word_tokens) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. special_tokens_count = 3 if sep_token_extra else 2 if len(tokens) > max_seq_length - special_tokens_count: tokens = tokens[:(max_seq_length - special_tokens_count)] label_ids = label_ids[:(max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: tokens = [cls_token] + tokens label_ids = [pad_token_label_id] + label_ids segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids label_ids = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += ([pad_token] * padding_length) input_mask += ([0 if mask_padding_with_zero else 1] * padding_length) segment_ids += ([pad_token_segment_id] * padding_length) label_ids += ([pad_token_label_id] * padding_length) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s", example.guid) logger.info("tokens: %s", " ".join([str(x) for x in tokens])) logger.info("input_ids: %s", " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s", " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) logger.info("label_ids: %s", " ".join([str(x) for x in label_ids])) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids)) return features def get_labels(path): if path: with open(path, "r") as f: labels = f.read().splitlines() if "O" not in labels: labels = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
9,181
42.107981
109
py
DeeBERT
DeeBERT-master/examples/utils_squad.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Load SQuAD dataset. """ from __future__ import absolute_import, division, print_function import json import logging import math import collections from io import open from tqdm import tqdm from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize # Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method) from utils_squad_evaluate import find_all_best_thresh_v2, make_qid_to_has_ans, get_raw_scores logger = logging.getLogger(__name__) class SquadExample(object): """ A single training/test example for the Squad dataset. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=None): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (self.qas_id) s += ", question_text: %s" % ( self.question_text) s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.end_position: s += ", end_position: %d" % (self.end_position) if self.is_impossible: s += ", is_impossible: %r" % (self.is_impossible) return s class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map, token_is_max_context, input_ids, input_mask, segment_ids, cls_index, p_mask, paragraph_len, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.cls_index = cls_index self.p_mask = p_mask self.paragraph_len = paragraph_len self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def read_squad_examples(input_file, is_training, version_2_with_negative): """Read a SQuAD json file into a list of SquadExample.""" with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: if version_2_with_negative: is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, cls_token_at_end=False, cls_token='[CLS]', sep_token='[SEP]', pad_token=0, sequence_a_segment_id=0, sequence_b_segment_id=1, cls_token_segment_id=0, pad_token_segment_id=0, mask_padding_with_zero=True, sequence_a_is_doc=False): """Loads a data file into a list of `InputBatch`s.""" unique_id = 1000000000 # cnt_pos, cnt_neg = 0, 0 # max_N, max_M = 1024, 1024 # f = np.zeros((max_N, max_M), dtype=np.float32) features = [] for (example_index, example) in enumerate(tqdm(examples)): # if example_index % 100 == 0: # logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg) query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 assert max_tokens_for_doc > 0 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # Original TF implem also keep the classification token (set to 0) (not sure why...) p_mask = [] # CLS token at the beginning if not cls_token_at_end: tokens.append(cls_token) segment_ids.append(cls_token_segment_id) p_mask.append(0) cls_index = 0 # XLNet: P SEP Q SEP CLS # Others: CLS Q SEP P SEP if not sequence_a_is_doc: # Query tokens += query_tokens segment_ids += [sequence_a_segment_id] * len(query_tokens) p_mask += [1] * len(query_tokens) # SEP token tokens.append(sep_token) segment_ids.append(sequence_a_segment_id) p_mask.append(1) # Paragraph for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) if not sequence_a_is_doc: segment_ids.append(sequence_b_segment_id) else: segment_ids.append(sequence_a_segment_id) p_mask.append(0) paragraph_len = doc_span.length if sequence_a_is_doc: # SEP token tokens.append(sep_token) segment_ids.append(sequence_a_segment_id) p_mask.append(1) tokens += query_tokens segment_ids += [sequence_b_segment_id] * len(query_tokens) p_mask += [1] * len(query_tokens) # SEP token tokens.append(sep_token) segment_ids.append(sequence_b_segment_id) p_mask.append(1) # CLS token at the end if cls_token_at_end: tokens.append(cls_token) segment_ids.append(cls_token_segment_id) p_mask.append(0) cls_index = len(tokens) - 1 # Index of classification token input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(pad_token) input_mask.append(0 if mask_padding_with_zero else 1) segment_ids.append(pad_token_segment_id) p_mask.append(1) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length span_is_impossible = example.is_impossible start_position = None end_position = None if is_training and not span_is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 span_is_impossible = True else: if sequence_a_is_doc: doc_offset = 0 else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and span_is_impossible: start_position = cls_index end_position = cls_index if example_index < 20: logger.info("*** Example ***") logger.info("unique_id: %s" % (unique_id)) logger.info("example_index: %s" % (example_index)) logger.info("doc_span_index: %s" % (doc_span_index)) logger.info("tokens: %s" % " ".join(tokens)) logger.info("token_to_orig_map: %s" % " ".join([ "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])) logger.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in token_is_max_context.items() ])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training and span_is_impossible: logger.info("impossible example") if is_training and not span_is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) logger.info("start_position: %d" % (start_position)) logger.info("end_position: %d" % (end_position)) logger.info( "answer: %s" % (answer_text)) features.append( InputFeatures( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, cls_index=cls_index, p_mask=p_mask, paragraph_len=paragraph_len, start_position=start_position, end_position=end_position, is_impossible=span_is_impossible)) unique_id += 1 return features def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold): """Write final predictions to the json file and log-odds of null if needed.""" logger.info("Writing predictions to: %s" % (output_prediction_file)) logger.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min null score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't include the empty option in the n-best, include it if version_2_with_negative: if "" not in seen_predictions: nbest.append( _NbestPrediction( text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if len(nbest)==1: nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - ( best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions # For XLNet (and XLM which uses the same head) RawResultExtended = collections.namedtuple("RawResultExtended", ["unique_id", "start_top_log_probs", "start_top_index", "end_top_log_probs", "end_top_index", "cls_logits"]) def write_predictions_extended(all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, orig_data_file, start_n_top, end_n_top, version_2_with_negative, tokenizer, verbose_logging): """ XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of null if needed. Requires utils_squad_evaluate.py """ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]) logger.info("Writing predictions to: %s", output_prediction_file) # logger.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] cur_null_score = result.cls_logits # if we could have irrelevant answers, get the min score of irrelevant score_null = min(score_null, cur_null_score) for i in range(start_n_top): for j in range(end_n_top): start_log_prob = result.start_top_log_probs[i] start_index = result.start_top_index[i] j_index = i * end_n_top + j end_log_prob = result.end_top_log_probs[j_index] end_index = result.end_top_index[j_index] # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= feature.paragraph_len - 1: continue if end_index >= feature.paragraph_len - 1: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_log_prob=start_log_prob, end_log_prob=end_log_prob)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] # XLNet un-tokenizer # Let's keep it simple for now and see if we need all this later. # # tok_start_to_orig_index = feature.tok_start_to_orig_index # tok_end_to_orig_index = feature.tok_end_to_orig_index # start_orig_pos = tok_start_to_orig_index[pred.start_index] # end_orig_pos = tok_end_to_orig_index[pred.end_index] # paragraph_text = example.paragraph_text # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() # Previously used Bert untokenizer tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = tokenizer.convert_tokens_to_string(tok_tokens) # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, tokenizer.do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6)) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_log_prob + entry.end_log_prob) if not best_non_null_entry: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_log_prob"] = entry.start_log_prob output["end_log_prob"] = entry.end_log_prob nbest_json.append(output) assert len(nbest_json) >= 1 assert best_non_null_entry is not None score_diff = score_null scores_diff_json[example.qas_id] = score_diff # note(zhiliny): always predict best_non_null_entry # and the evaluation script will search for the best threshold all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") with open(orig_data_file, "r", encoding='utf-8') as reader: orig_data = json.load(reader)["data"] qid_to_has_ans = make_qid_to_has_ans(orig_data) has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(orig_data, all_predictions) out_eval = {} find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans) return out_eval def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heuristic between # `pred_text` and `orig_text` to get a character-to-character alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info( "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs
42,376
40.627701
112
py
DeeBERT
DeeBERT-master/examples/contrib/run_transfo_xl.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Transformer XL model evaluation script. Adapted from https://github.com/kimiyoung/transformer-xl. In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/eval.py This script with default values evaluates a pretrained Transformer-XL on WikiText 103 """ from __future__ import absolute_import, division, print_function, unicode_literals import argparse import logging import time import math import torch from transformers import TransfoXLLMHeadModel, TransfoXLCorpus, TransfoXLTokenizer logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model') parser.add_argument('--model_name', type=str, default='transfo-xl-wt103', help='pretrained model name') parser.add_argument('--split', type=str, default='test', choices=['all', 'valid', 'test'], help='which split to evaluate') parser.add_argument('--batch_size', type=int, default=10, help='batch size') parser.add_argument('--tgt_len', type=int, default=128, help='number of tokens to predict') parser.add_argument('--ext_len', type=int, default=0, help='length of the extended context') parser.add_argument('--mem_len', type=int, default=1600, help='length of the retained previous heads') parser.add_argument('--clamp_len', type=int, default=1000, help='max positional embedding index') parser.add_argument('--no_cuda', action='store_true', help='Do not use CUDA even though CUA is available') parser.add_argument('--work_dir', type=str, required=True, help='path to the work_dir') parser.add_argument('--no_log', action='store_true', help='do not log the eval result') parser.add_argument('--same_length', action='store_true', help='set same length attention with masking') parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() assert args.ext_len >= 0, 'extended context length must be non-negative' if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") logger.info("device: {}".format(device)) # Load a pre-processed dataset # You can also build the corpus yourself using TransfoXLCorpus methods # The pre-processing involve computing word frequencies to prepare the Adaptive input and SoftMax # and tokenizing the dataset # The pre-processed corpus is a convertion (using the conversion script ) tokenizer = TransfoXLTokenizer.from_pretrained(args.model_name) corpus = TransfoXLCorpus.from_pretrained(args.model_name) ntokens = len(corpus.vocab) va_iter = corpus.get_iterator('valid', args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len) te_iter = corpus.get_iterator('test', args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len) # Load a pre-trained model model = TransfoXLLMHeadModel.from_pretrained(args.model_name) model = model.to(device) logger.info('Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}'.format( args.batch_size, args.tgt_len, args.ext_len, args.mem_len, args.clamp_len)) model.reset_length(args.tgt_len, args.ext_len, args.mem_len) if args.clamp_len > 0: model.clamp_len = args.clamp_len if args.same_length: model.same_length = True ############################################################################### # Evaluation code ############################################################################### def evaluate(eval_iter): # Turn on evaluation mode which disables dropout. model.eval() total_len, total_loss = 0, 0. start_time = time.time() with torch.no_grad(): mems = None for idx, (data, target, seq_len) in enumerate(eval_iter): ret = model(data, lm_labels=target, mems=mems) loss, _, mems = ret loss = loss.mean() total_loss += seq_len * loss.item() total_len += seq_len total_time = time.time() - start_time logger.info('Time : {:.2f}s, {:.2f}ms/segment'.format( total_time, 1000 * total_time / (idx+1))) return total_loss / total_len # Run on test data. if args.split == 'all': test_loss = evaluate(te_iter) valid_loss = evaluate(va_iter) elif args.split == 'valid': valid_loss = evaluate(va_iter) test_loss = None elif args.split == 'test': test_loss = evaluate(te_iter) valid_loss = None def format_log(loss, split): log_str = '| {0} loss {1:5.2f} | {0} ppl {2:9.3f} '.format( split, loss, math.exp(loss)) return log_str log_str = '' if valid_loss is not None: log_str += format_log(valid_loss, 'valid') if test_loss is not None: log_str += format_log(test_loss, 'test') logger.info('=' * 100) logger.info(log_str) logger.info('=' * 100) if __name__ == '__main__': main()
6,742
42.785714
111
py
DeeBERT
DeeBERT-master/examples/contrib/run_swag.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner. Finetuning the library models for multiple choice on SWAG (Bert). """ from __future__ import absolute_import, division, print_function import argparse import logging import csv import os import random import sys import glob import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForMultipleChoice, BertTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ for conf in [BertConfig]), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForMultipleChoice, BertTokenizer), } class SwagExample(object): """A single training/test example for the SWAG dataset.""" def __init__(self, swag_id, context_sentence, start_ending, ending_0, ending_1, ending_2, ending_3, label = None): self.swag_id = swag_id self.context_sentence = context_sentence self.start_ending = start_ending self.endings = [ ending_0, ending_1, ending_2, ending_3, ] self.label = label def __str__(self): return self.__repr__() def __repr__(self): l = [ "swag_id: {}".format(self.swag_id), "context_sentence: {}".format(self.context_sentence), "start_ending: {}".format(self.start_ending), "ending_0: {}".format(self.endings[0]), "ending_1: {}".format(self.endings[1]), "ending_2: {}".format(self.endings[2]), "ending_3: {}".format(self.endings[3]), ] if self.label is not None: l.append("label: {}".format(self.label)) return ", ".join(l) class InputFeatures(object): def __init__(self, example_id, choices_features, label ): self.example_id = example_id self.choices_features = [ { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } for _, input_ids, input_mask, segment_ids in choices_features ] self.label = label def read_swag_examples(input_file, is_training=True): with open(input_file, 'r', encoding='utf-8') as f: reader = csv.reader(f) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) if is_training and lines[0][-1] != 'label': raise ValueError( "For training, the input file must contain a label column." ) examples = [ SwagExample( swag_id = line[2], context_sentence = line[4], start_ending = line[5], # in the swag dataset, the # common beginning of each # choice is stored in "sent2". ending_0 = line[7], ending_1 = line[8], ending_2 = line[9], ending_3 = line[10], label = int(line[11]) if is_training else None ) for line in lines[1:] # we skip the line with the column names ] return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, is_training): """Loads a data file into a list of `InputBatch`s.""" # Swag is a multiple choice task. To perform this task using Bert, # we will use the formatting proposed in "Improving Language # Understanding by Generative Pre-Training" and suggested by # @jacobdevlin-google in this issue # https://github.com/google-research/bert/issues/38. # # Each choice will correspond to a sample on which we run the # inference. For a given Swag example, we will create the 4 # following inputs: # - [CLS] context [SEP] choice_1 [SEP] # - [CLS] context [SEP] choice_2 [SEP] # - [CLS] context [SEP] choice_3 [SEP] # - [CLS] context [SEP] choice_4 [SEP] # The model will output a single value for each input. To get the # final decision of the model, we will run a softmax over these 4 # outputs. features = [] for example_index, example in tqdm(enumerate(examples)): context_tokens = tokenizer.tokenize(example.context_sentence) start_ending_tokens = tokenizer.tokenize(example.start_ending) choices_features = [] for ending_index, ending in enumerate(example.endings): # We create a copy of the context tokens in order to be # able to shrink it according to ending_tokens context_tokens_choice = context_tokens[:] ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) # Modifies `context_tokens_choice` and `ending_tokens` in # place so that the total length is less than the # specified length. Account for [CLS], [SEP], [SEP] with # "- 3" _truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3) tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"] segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length choices_features.append((tokens, input_ids, input_mask, segment_ids)) label = example.label if example_index < 5: logger.info("*** Example ***") logger.info("swag_id: {}".format(example.swag_id)) for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features): logger.info("choice: {}".format(choice_idx)) logger.info("tokens: {}".format(' '.join(tokens))) logger.info("input_ids: {}".format(' '.join(map(str, input_ids)))) logger.info("input_mask: {}".format(' '.join(map(str, input_mask)))) logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids)))) if is_training: logger.info("label: {}".format(label)) features.append( InputFeatures( example_id = example.swag_id, choices_features = choices_features, label = label ) ) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def select_field(features, field): return [ [ choice[field] for choice in feature.choices_features ] for feature in features ] def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length))) if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", input_file) examples = read_swag_examples(input_file) features = convert_examples_to_features( examples, tokenizer, args.max_seq_length, not evaluate) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor(select_field(features, 'input_ids'), dtype=torch.long) all_input_mask = torch.tensor(select_field(features, 'input_mask'), dtype=torch.long) all_segment_ids = torch.tensor(select_field(features, 'segment_ids'), dtype=torch.long) all_label = torch.tensor([f.label for f in features], dtype=torch.long) if evaluate: dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) else: dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) if output_examples: return dataset, examples, features return dataset def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], #'token_type_ids': None if args.model_type == 'xlm' else batch[2], 'token_type_ids': batch[2], 'labels': batch[3]} # if args.model_type in ['xlnet', 'xlm']: # inputs.update({'cls_index': batch[5], # 'p_mask': batch[6]}) outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_vocabulary(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], # 'token_type_ids': None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids 'token_type_ids': batch[2], 'labels': batch[3]} # if args.model_type in ['xlnet', 'xlm']: # inputs.update({'cls_index': batch[4], # 'p_mask': batch[5]}) outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() logits = logits.detach().cpu().numpy() label_ids = inputs['labels'].to('cpu').numpy() tmp_eval_accuracy = accuracy(logits, label_ids) eval_accuracy += tmp_eval_accuracy nb_eval_steps += 1 nb_eval_examples += inputs['input_ids'].size(0) eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info("%s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_file", default=None, type=str, required=True, help="SWAG csv for training. E.g., train.csv") parser.add_argument("--predict_file", default=None, type=str, required=True, help="SWAG csv for predictions. E.g., val.csv or test.csv") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.local_rank == -1 or torch.distributed.get_rank() == 0: # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: checkpoints = [args.output_dir] else: # if do_train is False and do_eval is true, load model directly from pretrained. checkpoints = [args.model_name_or_path] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) tokenizer = tokenizer_class.from_pretrained(checkpoint) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
31,683
45.731563
154
py
DeeBERT
DeeBERT-master/examples/contrib/run_openai_gpt.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenAI GPT model fine-tuning script. Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset: python run_openai_gpt.py \ --model_name openai-gpt \ --do_train \ --do_eval \ --train_dataset $ROC_STORIES_DIR/cloze_test_val__spring2016\ -\ cloze_test_ALL_val.csv \ --eval_dataset $ROC_STORIES_DIR/cloze_test_test__spring2016\ -\ cloze_test_ALL_test.csv \ --output_dir ../log \ --train_batch_size 16 \ """ import argparse import os import csv import random import logging from tqdm import tqdm, trange import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from transformers import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, AdamW, cached_path, WEIGHTS_NAME, CONFIG_NAME, get_linear_schedule_with_warmup) ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz" logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def load_rocstories_dataset(dataset_path): """ Output a list of tuples(story, 1st continuation, 2nd continuation, label) """ with open(dataset_path, encoding='utf_8') as f: f = csv.reader(f) output = [] next(f) # skip the first line for line in tqdm(f): output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1)) return output def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token): """ Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label) To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation: input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] """ tensor_datasets = [] for dataset in encoded_datasets: n_batch = len(dataset) input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64) mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64) lm_labels = np.full((n_batch, 2, input_len), fill_value=-1, dtype=np.int64) mc_labels = np.zeros((n_batch,), dtype=np.int64) for i, (story, cont1, cont2, mc_label), in enumerate(dataset): with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token] input_ids[i, 0, :len(with_cont1)] = with_cont1 input_ids[i, 1, :len(with_cont2)] = with_cont2 mc_token_ids[i, 0] = len(with_cont1) - 1 mc_token_ids[i, 1] = len(with_cont2) - 1 lm_labels[i, 0, :len(with_cont1)] = with_cont1 lm_labels[i, 1, :len(with_cont2)] = with_cont2 mc_labels[i] = mc_label all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs)) return tensor_datasets def main(): parser = argparse.ArgumentParser() parser.add_argument('--model_name', type=str, default='openai-gpt', help='pretrained model name') parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") parser.add_argument('--train_dataset', type=str, default='') parser.add_argument('--eval_dataset', type=str, default='') parser.add_argument('--seed', type=int, default=42) parser.add_argument('--num_train_epochs', type=int, default=3) parser.add_argument('--train_batch_size', type=int, default=8) parser.add_argument('--eval_batch_size', type=int, default=16) parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument('--max_grad_norm', type=int, default=1) parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training \ steps to perform. Override num_train_epochs.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before\ performing a backward/update pass.") parser.add_argument('--learning_rate', type=float, default=6.25e-5) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--lr_schedule', type=str, default='warmup_linear') parser.add_argument('--weight_decay', type=float, default=0.01) parser.add_argument('--lm_coef', type=float, default=0.9) parser.add_argument('--n_valid', type=int, default=374) parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() print(args) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() logger.info("device: {}, n_gpu {}".format(device, n_gpu)) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset special_tokens = ['_start_', '_delimiter_', '_classify_'] tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name) tokenizer.add_tokens(special_tokens) special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens) model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name) model.resize_token_embeddings(len(tokenizer)) model.to(device) # Load and encode the datasets if not args.train_dataset and not args.eval_dataset: roc_stories = cached_path(ROCSTORIES_URL) def tokenize_and_encode(obj): """ Tokenize and encode a nested object """ if isinstance(obj, str): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj)) elif isinstance(obj, int): return obj return list(tokenize_and_encode(o) for o in obj) logger.info("Encoding dataset...") train_dataset = load_rocstories_dataset(args.train_dataset) eval_dataset = load_rocstories_dataset(args.eval_dataset) datasets = (train_dataset, eval_dataset) encoded_datasets = tokenize_and_encode(datasets) # Compute the max input length for the Transformer max_length = model.config.n_positions // 2 - 2 input_length = max(len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3 \ for dataset in encoded_datasets for story, cont1, cont2, _ in dataset) input_length = min(input_length, model.config.n_positions) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids) train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1] train_data = TensorDataset(*train_tensor_dataset) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) eval_data = TensorDataset(*eval_tensor_dataset) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) # Prepare optimizer if args.do_train: if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps //\ (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader)\ // args.gradient_accumulation_steps * args.num_train_epochs param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.do_train: nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_steps = 0 tqdm_bar = tqdm(train_dataloader, desc="Training") for step, batch in enumerate(tqdm_bar): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch losses = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels) loss = args.lm_coef * losses[0] + losses[1] loss.backward() scheduler.step() optimizer.step() optimizer.zero_grad() tr_loss += loss.item() exp_average_loss = loss.item() if exp_average_loss is None else 0.7*exp_average_loss+0.3*loss.item() nb_tr_steps += 1 tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, scheduler.get_lr()[0]) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer model_to_save = model.module if hasattr(model, 'module') else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) # Load a trained model and vocabulary that you have fine-tuned model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir) tokenizer = OpenAIGPTTokenizer.from_pretrained(args.output_dir) model.to(device) if args.do_eval: model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch with torch.no_grad(): _, mc_loss, _, mc_logits = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels) mc_logits = mc_logits.detach().cpu().numpy() mc_labels = mc_labels.to('cpu').numpy() tmp_eval_accuracy = accuracy(mc_logits, mc_labels) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples train_loss = tr_loss/nb_tr_steps if args.do_train else None result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == '__main__': main()
14,471
48.731959
132
py
DeeBERT
DeeBERT-master/examples/contrib/run_camembert.py
from pathlib import Path import tarfile import urllib.request import torch from transformers.tokenization_camembert import CamembertTokenizer from transformers.modeling_camembert import CamembertForMaskedLM def fill_mask(masked_input, model, tokenizer, topk=5): # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>') == 1 input_ids = torch.tensor(tokenizer.encode(masked_input, add_special_tokens=True)).unsqueeze(0) # Batch size 1 logits = model(input_ids)[0] # The last hidden-state is the first element of the output tuple masked_index = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() logits = logits[0, masked_index, :] prob = logits.softmax(dim=0) values, indices = prob.topk(k=topk, dim=0) topk_predicted_token_bpe = ' '.join([tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(indices))]) masked_token = tokenizer.mask_token topk_filled_outputs = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ')): predicted_token = predicted_token_bpe.replace('\u2581', ' ') if " {0}".format(masked_token) in masked_input: topk_filled_outputs.append(( masked_input.replace( ' {0}'.format(masked_token), predicted_token ), values[index].item(), predicted_token, )) else: topk_filled_outputs.append(( masked_input.replace(masked_token, predicted_token), values[index].item(), predicted_token, )) return topk_filled_outputs tokenizer = CamembertTokenizer.from_pretrained('camembert-base') model = CamembertForMaskedLM.from_pretrained('camembert-base') model.eval() masked_input = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
2,015
40.142857
114
py
DeeBERT
DeeBERT-master/examples/distillation/grouped_batch_sampler.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Adapted from PyTorch Vision (https://github.com/pytorch/vision/blob/master/references/detection/group_by_aspect_ratio.py) """ import bisect import copy from collections import defaultdict import numpy as np from torch.utils.data.sampler import BatchSampler, Sampler from utils import logger def _quantize(x, bins): bins = copy.deepcopy(bins) bins = sorted(bins) quantized = list(map(lambda y: bisect.bisect_right(bins, y), x)) return quantized def create_lengths_groups(lengths, k=0): bins = np.arange(start=3, stop=k, step=4).tolist() if k > 0 else [10] groups = _quantize(lengths, bins) # count number of elements per group counts = np.unique(groups, return_counts=True)[1] fbins = [0] + bins + [np.inf] logger.info("Using {} as bins for aspect lengths quantization".format(fbins)) logger.info("Count of instances per bin: {}".format(counts)) return groups class GroupedBatchSampler(BatchSampler): """ Wraps another sampler to yield a mini-batch of indices. It enforces that the batch only contain elements from the same group. It also tries to provide mini-batches which follows an ordering which is as close as possible to the ordering from the original sampler. Arguments: sampler (Sampler): Base sampler. group_ids (list[int]): If the sampler produces indices in range [0, N), `group_ids` must be a list of `N` ints which contains the group id of each sample. The group ids must be a continuous set of integers starting from 0, i.e. they must be in the range [0, num_groups). batch_size (int): Size of mini-batch. """ def __init__(self, sampler, group_ids, batch_size): if not isinstance(sampler, Sampler): raise ValueError( "sampler should be an instance of " "torch.utils.data.Sampler, but got sampler={}".format(sampler) ) self.sampler = sampler self.group_ids = group_ids self.batch_size = batch_size def __iter__(self): buffer_per_group = defaultdict(list) samples_per_group = defaultdict(list) num_batches = 0 for idx in self.sampler: group_id = self.group_ids[idx] buffer_per_group[group_id].append(idx) samples_per_group[group_id].append(idx) if len(buffer_per_group[group_id]) == self.batch_size: yield buffer_per_group[group_id] #TODO num_batches += 1 del buffer_per_group[group_id] assert len(buffer_per_group[group_id]) < self.batch_size # now we have run out of elements that satisfy # the group criteria, let's return the remaining # elements so that the size of the sampler is # deterministic expected_num_batches = len(self) num_remaining = expected_num_batches - num_batches if num_remaining > 0: # for the remaining batches, group the batches by similar lengths batch_idx = [] for group_id, idxs in sorted(buffer_per_group.items(), key=lambda x: x[0]): batch_idx.extend(idxs) if len(batch_idx) >= self.batch_size: yield batch_idx[:self.batch_size] batch_idx = batch_idx[self.batch_size:] num_remaining -= 1 if len(batch_idx) > 0: yield batch_idx num_remaining -= 1 assert num_remaining == 0 def __len__(self): """ Return the number of mini-batches rather than the number of samples. """ return (len(self.sampler) + self.batch_size - 1) // self.batch_size
4,368
40.216981
125
py
DeeBERT
DeeBERT-master/examples/distillation/utils.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utils to train DistilBERT adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import git import json import os import socket import torch import numpy as np import logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def git_log(folder_path: str): """ Log commit info. """ repo = git.Repo(search_parent_directories=True) repo_infos = { 'repo_id': str(repo), 'repo_sha': str(repo.head.object.hexsha), 'repo_branch': str(repo.active_branch) } with open(os.path.join(folder_path, 'git_log.json'), 'w') as f: json.dump(repo_infos, f, indent=4) def init_gpu_params(params): """ Handle single and multi-GPU / multi-node. """ if params.n_gpu <= 0: params.local_rank = 0 params.master_port = -1 params.is_master = True params.multi_gpu = False return assert torch.cuda.is_available() logger.info('Initializing GPUs') if params.n_gpu > 1: assert params.local_rank != -1 params.world_size = int(os.environ['WORLD_SIZE']) params.n_gpu_per_node = int(os.environ['N_GPU_NODE']) params.global_rank = int(os.environ['RANK']) # number of nodes / node ID params.n_nodes = params.world_size // params.n_gpu_per_node params.node_id = params.global_rank // params.n_gpu_per_node params.multi_gpu = True assert params.n_nodes == int(os.environ['N_NODES']) assert params.node_id == int(os.environ['NODE_RANK']) # local job (single GPU) else: assert params.local_rank == -1 params.n_nodes = 1 params.node_id = 0 params.local_rank = 0 params.global_rank = 0 params.world_size = 1 params.n_gpu_per_node = 1 params.multi_gpu = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode params.is_master = params.node_id == 0 and params.local_rank == 0 params.multi_node = params.n_nodes > 1 # summary PREFIX = f"--- Global rank: {params.global_rank} - " logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes) logger.info(PREFIX + "Node ID : %i" % params.node_id) logger.info(PREFIX + "Local rank : %i" % params.local_rank) logger.info(PREFIX + "World size : %i" % params.world_size) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node) logger.info(PREFIX + "Master : %s" % str(params.is_master)) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node)) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu)) logger.info(PREFIX + "Hostname : %s" % socket.gethostname()) # set GPU device torch.cuda.set_device(params.local_rank) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed") torch.distributed.init_process_group( init_method='env://', backend='nccl', ) def set_seed(args): """ Set the random seed. """ np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed)
4,308
32.146154
104
py
DeeBERT
DeeBERT-master/examples/distillation/lm_seqs_dataset.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Dataset to distilled models adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import torch from torch.utils.data import Dataset import numpy as np from utils import logger class LmSeqsDataset(Dataset): """Custom Dataset wrapping language modeling sequences. Each sample will be retrieved by indexing the list of token_ids and their corresponding lengths. Input: ------ params: `NameSpace` parameters data: `List[np.array[int]] """ def __init__(self, params, data): self.params = params self.token_ids = np.array(data) self.lengths = np.array([len(t) for t in data]) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.check() self.print_statistics() def __getitem__(self, index): return (self.token_ids[index], self.lengths[index]) def __len__(self): return len(self.lengths) def check(self): """ Some sanity checks """ assert len(self.token_ids) == len(self.lengths) assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) def remove_long_sequences(self): """ Sequences that are too long are splitted by chunk of max_model_input_size. """ max_len = self.params.max_model_input_size indices = self.lengths > max_len logger.info(f'Splitting {sum(indices)} too long sequences.') def divide_chunks(l, n): return [l[i:i + n] for i in range(0, len(l), n)] new_tok_ids = [] new_lengths = [] if self.params.mlm: cls_id, sep_id = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: cls_id, sep_id = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids, self.lengths): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_) new_lengths.append(len_) else: sub_seqs = [] for sub_s in divide_chunks(seq_, max_len-2): if sub_s[0] != cls_id: sub_s = np.insert(sub_s, 0, cls_id) if sub_s[-1] != sep_id: sub_s = np.insert(sub_s, len(sub_s), sep_id) assert len(sub_s) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(sub_s) new_tok_ids.extend(sub_seqs) new_lengths.extend([len(l) for l in sub_seqs]) self.token_ids = np.array(new_tok_ids) self.lengths = np.array(new_lengths) def remove_empty_sequences(self): """ Too short sequences are simply removed. This could be tunedd. """ init_size = len(self) indices = self.lengths > 11 self.token_ids = self.token_ids[indices] self.lengths = self.lengths[indices] new_size = len(self) logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.') def print_statistics(self): """ Print some statistics on the corpus. Only the master process. """ if not self.params.is_master: return logger.info(f'{len(self)} sequences') # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unkown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unkown} unknown tokens (covering {100*nb_unkown/data_len:.2f}% of the data)') def batch_sequences(self, batch): """ Do the padding and transform into torch.tensor. """ token_ids = [t[0] for t in batch] lengths = [t[1] for t in batch] assert len(token_ids) == len(lengths) # Max for paddings max_seq_len_ = max(lengths) # Pad token ids if self.params.mlm: pad_idx = self.params.special_tok_ids['pad_token'] else: pad_idx = self.params.special_tok_ids['unk_token'] tk_ = [list(t.astype(int)) + [pad_idx]*(max_seq_len_-len(t)) for t in token_ids] assert len(tk_) == len(token_ids) assert all(len(t) == max_seq_len_ for t in tk_) tk_t = torch.tensor(tk_) # (bs, max_seq_len_) lg_t = torch.tensor(lengths) # (bs) return tk_t, lg_t
5,453
34.881579
111
py
DeeBERT
DeeBERT-master/examples/distillation/run_squad_w_distillation.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This is the exact same script as `examples/run_squad.py` (as of 2019, October 4th) with an additional and optional step of distillation.""" from __future__ import absolute_import, division, print_function import argparse import logging import os import random import glob import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler import torch.nn.functional as F import torch.nn as nn try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForQuestionAnswering, BertTokenizer, XLMConfig, XLMForQuestionAnswering, XLMTokenizer, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer, DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup from ..utils_squad import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions, RawResultExtended, write_predictions_extended) # The follwing import is the official SQuAD evaluation script (2.0). # You can remove it from the dependencies if you are using this script outside of the library # We've added it here for automated tests (see examples/test_examples.py file) from ..utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ for conf in (BertConfig, XLNetConfig, XLMConfig)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer), 'xlnet': (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer), 'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer), 'distilbert': (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer, teacher=None): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() if teacher is not None: teacher.eval() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'start_positions': batch[3], 'end_positions': batch[4]} if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] if args.model_type in ['xlnet', 'xlm']: inputs.update({'cls_index': batch[5], 'p_mask': batch[6]}) outputs = model(**inputs) loss, start_logits_stu, end_logits_stu = outputs # Distillation loss if teacher is not None: if 'token_type_ids' not in inputs: inputs['token_type_ids'] = None if args.teacher_type == 'xlm' else batch[2] with torch.no_grad(): start_logits_tea, end_logits_tea = teacher(input_ids=inputs['input_ids'], token_type_ids=inputs['token_type_ids'], attention_mask=inputs['attention_mask']) assert start_logits_tea.size() == start_logits_stu.size() assert end_logits_tea.size() == end_logits_stu.size() loss_fct = nn.KLDivLoss(reduction='batchmean') loss_start = loss_fct(F.log_softmax(start_logits_stu/args.temperature, dim=-1), F.softmax(start_logits_tea/args.temperature, dim=-1)) * (args.temperature**2) loss_end = loss_fct(F.log_softmax(end_logits_stu/args.temperature, dim=-1), F.softmax(end_logits_tea/args.temperature, dim=-1)) * (args.temperature**2) loss_ce = (loss_start + loss_end)/2. loss = args.alpha_ce*loss_ce + args.alpha_squad*loss if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1] } if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids example_indices = batch[3] if args.model_type in ['xlnet', 'xlm']: inputs.update({'cls_index': batch[4], 'p_mask': batch[5]}) outputs = model(**inputs) for i, example_index in enumerate(example_indices): eval_feature = features[example_index.item()] unique_id = int(eval_feature.unique_id) if args.model_type in ['xlnet', 'xlm']: # XLNet uses a more complex post-processing procedure result = RawResultExtended(unique_id = unique_id, start_top_log_probs = to_list(outputs[0][i]), start_top_index = to_list(outputs[1][i]), end_top_log_probs = to_list(outputs[2][i]), end_top_index = to_list(outputs[3][i]), cls_logits = to_list(outputs[4][i])) else: result = RawResult(unique_id = unique_id, start_logits = to_list(outputs[0][i]), end_logits = to_list(outputs[1][i])) all_results.append(result) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None if args.model_type in ['xlnet', 'xlm']: # XLNet uses a more complex post-processing procedure write_predictions_extended(examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.predict_file, model.config.start_n_top, model.config.end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging) else: write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold) # Evaluate with the official SQuAD script evaluate_options = EVAL_OPTS(data_file=args.predict_file, pred_file=output_prediction_file, na_prob_file=output_null_log_odds_file) results = evaluate_on_squad(evaluate_options) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length))) if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", input_file) examples = read_squad_examples(input_file=input_file, is_training=not evaluate, version_2_with_negative=args.version_2_with_negative) features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) if evaluate: all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index, all_cls_index, all_p_mask) else: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask) if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_file", default=None, type=str, required=True, help="SQuAD json for training. E.g., train-v1.1.json") parser.add_argument("--predict_file", default=None, type=str, required=True, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") # Distillation parameters (optional) parser.add_argument('--teacher_type', default=None, type=str, help="Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for distillation.") parser.add_argument('--teacher_name_or_path', default=None, type=str, help="Path to the already SQuAD fine-tuned teacher model. Only for distillation.") parser.add_argument('--alpha_ce', default=0.5, type=float, help="Distillation loss linear weight. Only for distillation.") parser.add_argument('--alpha_squad', default=0.5, type=float, help="True SQuAD loss linear weight. Only for distillation.") parser.add_argument('--temperature', default=2.0, type=float, help="Distillation temperature. Only for distillation.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.') parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.") parser.add_argument("--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.") parser.add_argument("--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.") parser.add_argument("--verbose_logging", action='store_true', help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.teacher_type is not None: assert args.teacher_name_or_path is not None assert args.alpha_ce > 0. assert args.alpha_ce + args.alpha_squad > 0. assert args.teacher_type != 'distilbert', "We constraint teachers not to be of type DistilBERT." teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) teacher = teacher_model_class.from_pretrained(args.teacher_name_or_path, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None) teacher.to(args.device) else: teacher = None if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint, cache_dir=args.cache_dir if args.cache_dir else None) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
33,733
55.129784
151
py
DeeBERT
DeeBERT-master/examples/distillation/train.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Training the distilled model. Supported architectures include: BERT -> DistilBERT, RoBERTa -> DistilRoBERTa, GPT2 -> DistilGPT2. """ import os import argparse import pickle import json import shutil import numpy as np import torch from transformers import BertConfig, BertForMaskedLM, BertTokenizer from transformers import RobertaConfig, RobertaForMaskedLM, RobertaTokenizer from transformers import DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer from transformers import GPT2Config, GPT2LMHeadModel, GPT2Tokenizer from distiller import Distiller from utils import git_log, logger, init_gpu_params, set_seed from lm_seqs_dataset import LmSeqsDataset MODEL_CLASSES = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer) } def sanity_checks(args): """ A bunch of args sanity checks to perform even starting... """ assert (args.mlm and args.alpha_mlm > 0.) or (not args.mlm and args.alpha_mlm == 0.) assert (args.alpha_mlm > 0. and args.alpha_clm == 0.) or (args.alpha_mlm == 0. and args.alpha_clm > 0.) if args.mlm: assert os.path.isfile(args.token_counts) assert (args.student_type in ['roberta', 'distilbert']) and (args.teacher_type in ['roberta', 'bert']) else: assert (args.student_type in ['gpt2']) and (args.teacher_type in ['gpt2']) assert args.teacher_type == args.student_type or (args.student_type=='distilbert' and args.teacher_type=='bert') assert os.path.isfile(args.student_config) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights) if args.freeze_token_type_embds: assert args.student_type in ['roberta'] assert args.alpha_ce >= 0. assert args.alpha_mlm >= 0. assert args.alpha_clm >= 0. assert args.alpha_mse >= 0. assert args.alpha_cos >= 0. assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0. def freeze_pos_embeddings(student, args): if args.student_type == 'roberta': student.roberta.embeddings.position_embeddings.weight.requires_grad = False elif args.student_type == 'gpt2': student.transformer.wpe.weight.requires_grad = False def freeze_token_type_embeddings(student, args): if args.student_type == 'roberta': student.roberta.embeddings.token_type_embeddings.weight.requires_grad = False def main(): parser = argparse.ArgumentParser(description="Training") parser.add_argument("--force", action='store_true', help="Overwrite dump_path if it already exists.") parser.add_argument("--dump_path", type=str, required=True, help="The output directory (log, checkpoints, parameters, etc.)") parser.add_argument("--data_file", type=str, required=True, help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.") parser.add_argument("--student_type", type=str, choices=["distilbert", "roberta", "gpt2"], required=True, help="The student type (DistilBERT, RoBERTa).") parser.add_argument("--student_config", type=str, required=True, help="Path to the student configuration.") parser.add_argument("--student_pretrained_weights", default=None, type=str, help="Load student initialization checkpoint.") parser.add_argument("--teacher_type", choices=["bert", "roberta", "gpt2"], required=True, help="Teacher type (BERT, RoBERTa).") parser.add_argument("--teacher_name", type=str, required=True, help="The teacher model.") parser.add_argument("--temperature", default=2., type=float, help="Temperature for the softmax temperature.") parser.add_argument("--alpha_ce", default=0.5, type=float, help="Linear weight for the distillation loss. Must be >=0.") parser.add_argument("--alpha_mlm", default=0.0, type=float, help="Linear weight for the MLM loss. Must be >=0. Should be used in coonjunction with `mlm` flag.") parser.add_argument("--alpha_clm", default=0.5, type=float, help="Linear weight for the CLM loss. Must be >=0.") parser.add_argument("--alpha_mse", default=0.0, type=float, help="Linear weight of the MSE loss. Must be >=0.") parser.add_argument("--alpha_cos", default=0.0, type=float, help="Linear weight of the cosine embedding loss. Must be >=0.") parser.add_argument("--mlm", action="store_true", help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.") parser.add_argument("--mlm_mask_prop", default=0.15, type=float, help="Proportion of tokens for which we need to make a prediction.") parser.add_argument("--word_mask", default=0.8, type=float, help="Proportion of tokens to mask out.") parser.add_argument("--word_keep", default=0.1, type=float, help="Proportion of tokens to keep.") parser.add_argument("--word_rand", default=0.1, type=float, help="Proportion of tokens to randomly replace.") parser.add_argument("--mlm_smoothing", default=0.7, type=float, help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).") parser.add_argument("--token_counts", type=str, help="The token counts in the data_file for MLM.") parser.add_argument("--restrict_ce_to_mask", action='store_true', help="If true, compute the distilation loss only the [MLM] prediction distribution.") parser.add_argument("--freeze_pos_embs", action="store_true", help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.") parser.add_argument("--freeze_token_type_embds", action="store_true", help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.") parser.add_argument("--n_epoch", type=int, default=3, help="Number of pass on the whole dataset.") parser.add_argument("--batch_size", type=int, default=5, help="Batch size (for each process).") parser.add_argument("--group_by_size", action='store_false', help="If true, group sequences that have similar length into the same batch. Default is true.") parser.add_argument("--gradient_accumulation_steps", type=int, default=50, help="Gradient accumulation for larger training batches.") parser.add_argument("--warmup_prop", default=0.05, type=float, help="Linear warmup proportion.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--learning_rate", default=5e-4, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=5.0, type=float, help="Max gradient norm.") parser.add_argument("--initializer_range", default=0.02, type=float, help="Random initialization range.") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--n_gpu", type=int, default=1, help="Number of GPUs in the node.") parser.add_argument("--local_rank", type=int, default=-1, help="Distributed training - Local rank") parser.add_argument("--seed", type=int, default=56, help="Random seed") parser.add_argument("--log_interval", type=int, default=500, help="Tensorboard logging interval.") parser.add_argument("--checkpoint_interval", type=int, default=4000, help="Checkpoint interval.") args = parser.parse_args() sanity_checks(args) ## ARGS ## init_gpu_params(args) set_seed(args) if args.is_master: if os.path.exists(args.dump_path): if not args.force: raise ValueError(f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite it' 'Use `--force` if you want to overwrite it') else: shutil.rmtree(args.dump_path) if not os.path.exists(args.dump_path): os.makedirs(args.dump_path) logger.info(f'Experiment will be dumped and logged in {args.dump_path}') ### SAVE PARAMS ### logger.info(f'Param: {args}') with open(os.path.join(args.dump_path, 'parameters.json'), 'w') as f: json.dump(vars(args), f, indent=4) git_log(args.dump_path) student_config_class, student_model_class, _ = MODEL_CLASSES[args.student_type] teacher_config_class, teacher_model_class, teacher_tokenizer_class = MODEL_CLASSES[args.teacher_type] ### TOKENIZER ### tokenizer = teacher_tokenizer_class.from_pretrained(args.teacher_name) special_tok_ids = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): idx = tokenizer.all_special_tokens.index(tok_symbol) special_tok_ids[tok_name] = tokenizer.all_special_ids[idx] logger.info(f'Special tokens {special_tok_ids}') args.special_tok_ids = special_tok_ids args.max_model_input_size = tokenizer.max_model_input_sizes[args.teacher_name] ## DATA LOADER ## logger.info(f'Loading data from {args.data_file}') with open(args.data_file, 'rb') as fp: data = pickle.load(fp) if args.mlm: logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)') with open(args.token_counts, 'rb') as fp: counts = pickle.load(fp) token_probs = np.maximum(counts, 1) ** -args.mlm_smoothing for idx in special_tok_ids.values(): token_probs[idx] = 0. # do not predict special tokens token_probs = torch.from_numpy(token_probs) else: token_probs = None train_lm_seq_dataset = LmSeqsDataset(params=args, data=data) logger.info(f'Data loader created.') ## STUDENT ## logger.info(f'Loading student config from {args.student_config}') stu_architecture_config = student_config_class.from_pretrained(args.student_config) stu_architecture_config.output_hidden_states = True if args.student_pretrained_weights is not None: logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}') student = student_model_class.from_pretrained(args.student_pretrained_weights, config=stu_architecture_config) else: student = student_model_class(stu_architecture_config) if args.n_gpu > 0: student.to(f'cuda:{args.local_rank}') logger.info(f'Student loaded.') ## TEACHER ## teacher = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=True) if args.n_gpu > 0: teacher.to(f'cuda:{args.local_rank}') logger.info(f'Teacher loaded from {args.teacher_name}.') ## FREEZING ## if args.freeze_pos_embs: freeze_pos_embeddings(student, args) if args.freeze_token_type_embds: freeze_token_type_embeddings(student, args) ## SANITY CHECKS ## assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0) == stu_architecture_config.vocab_size ## DISTILLER ## torch.cuda.empty_cache() distiller = Distiller(params=args, dataset=train_lm_seq_dataset, token_probs=token_probs, student=student, teacher=teacher) distiller.train() logger.info("Let's go get some drinks.") if __name__ == "__main__": main()
13,598
45.731959
135
py
DeeBERT
DeeBERT-master/examples/distillation/distiller.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The distiller to distil the student. Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import os import math import psutil import time from tqdm import trange, tqdm import numpy as np import psutil import torch import torch.nn as nn import torch.nn.functional as F from torch.optim import AdamW from torch.utils.data.distributed import DistributedSampler from torch.utils.data import RandomSampler, BatchSampler, DataLoader try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from transformers import get_linear_schedule_with_warmup from utils import logger from lm_seqs_dataset import LmSeqsDataset from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups class Distiller: def __init__(self, params: dict, dataset: LmSeqsDataset, token_probs: torch.tensor, student: nn.Module, teacher: nn.Module): logger.info('Initializing Distiller') self.params = params self.dump_path = params.dump_path self.multi_gpu = params.multi_gpu self.fp16 = params.fp16 self.student = student self.teacher = teacher self.student_config = student.config self.vocab_size = student.config.vocab_size if params.n_gpu <= 1: sampler = RandomSampler(dataset) else: sampler = DistributedSampler(dataset) if params.group_by_size: groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size) sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size) else: sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False) self.dataloader = DataLoader(dataset=dataset, batch_sampler=sampler, collate_fn=dataset.batch_sequences) self.temperature = params.temperature assert self.temperature > 0. self.alpha_ce = params.alpha_ce self.alpha_mlm = params.alpha_mlm self.alpha_clm = params.alpha_clm self.alpha_mse = params.alpha_mse self.alpha_cos = params.alpha_cos self.mlm = params.mlm if self.mlm: logger.info(f'Using MLM loss for LM step.') self.mlm_mask_prop = params.mlm_mask_prop assert 0.0 <= self.mlm_mask_prop <= 1.0 assert params.word_mask + params.word_keep + params.word_rand == 1.0 self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand]) self.pred_probs = self.pred_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else self.pred_probs self.token_probs = token_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else token_probs if self.fp16: self.pred_probs = self.pred_probs.half() self.token_probs = self.token_probs.half() else: logger.info(f'Using CLM loss for LM step.') self.epoch = 0 self.n_iter = 0 self.n_total_iter = 0 self.n_sequences_epoch = 0 self.total_loss_epoch = 0 self.last_loss = 0 self.last_loss_ce = 0 self.last_loss_mlm = 0 self.last_loss_clm = 0 if self.alpha_mse > 0.: self.last_loss_mse = 0 if self.alpha_cos > 0.: self.last_loss_cos = 0 self.last_log = 0 self.ce_loss_fct = nn.KLDivLoss(reduction='batchmean') self.lm_loss_fct = nn.CrossEntropyLoss(ignore_index=-1) if self.alpha_mse > 0.: self.mse_loss_fct = nn.MSELoss(reduction='sum') if self.alpha_cos > 0.: self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction='mean') logger.info('--- Initializing model optimizer') assert params.gradient_accumulation_steps >= 1 self.num_steps_epoch = len(self.dataloader) num_train_optimization_steps = int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1 no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': params.weight_decay}, {'params': [p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': 0.0} ] logger.info("------ Number of trainable parameters (student): %i" % sum([p.numel() for p in self.student.parameters() if p.requires_grad])) logger.info("------ Number of parameters (student): %i" % sum([p.numel() for p in self.student.parameters()])) self.optimizer = AdamW(optimizer_grouped_parameters, lr=params.learning_rate, eps=params.adam_epsilon, betas=(0.9, 0.98)) warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop) self.scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps) if self.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") logger.info(f"Using fp16 training: {self.params.fp16_opt_level} level") self.student, self.optimizer = amp.initialize(self.student, self.optimizer, opt_level=self.params.fp16_opt_level) self.teacher = self.teacher.half() if self.multi_gpu: if self.fp16: from apex.parallel import DistributedDataParallel logger.info("Using apex.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel(self.student) else: from torch.nn.parallel import DistributedDataParallel logger.info("Using nn.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel(self.student, device_ids=[params.local_rank], output_device=params.local_rank, find_unused_parameters=True) self.is_master = params.is_master if self.is_master: logger.info('--- Initializing Tensorboard') self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, 'log', 'train')) self.tensorboard.add_text(tag='config/training', text_string=str(self.params), global_step=0) self.tensorboard.add_text(tag='config/student', text_string=str(self.student_config), global_step=0) def prepare_batch_mlm(self, batch): """ Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the masked label for MLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. mlm_labels: `torch.tensor(bs, seq_length)` - The masked languge modeling labels. There is a -1 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None]) bs, max_seq_len = token_ids.size() mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids) x_prob = self.token_probs[token_ids.flatten()] n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item()) tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False) pred_mask = torch.zeros(bs * max_seq_len, dtype=torch.bool, device=token_ids.device) # previously `dtype=torch.uint8`, cf pytorch 1.2.0 compatibility pred_mask[tgt_ids] = 1 pred_mask = pred_mask.view(bs, max_seq_len) pred_mask[token_ids == self.params.special_tok_ids['pad_token']] = 0 # mask a number of words == 0 [8] (faster with fp16) if self.fp16: n1 = pred_mask.sum().item() if n1 > 8: pred_mask = pred_mask.view(-1) n2 = max(n1 % 8, 8 * (n1 // 8)) if n2 != n1: pred_mask[torch.nonzero(pred_mask).view(-1)[:n1-n2]] = 0 pred_mask = pred_mask.view(bs, max_seq_len) assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item() _token_ids_real = token_ids[pred_mask] _token_ids_rand = _token_ids_real.clone().random_(self.vocab_size) _token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids['mask_token']) probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True) _token_ids = _token_ids_mask * (probs == 0).long() + _token_ids_real * (probs == 1).long() + _token_ids_rand * (probs == 2).long() token_ids = token_ids.masked_scatter(pred_mask, _token_ids) mlm_labels[~pred_mask] = -1 # previously `mlm_labels[1-pred_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, mlm_labels def prepare_batch_clm(self, batch): """ Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the labels for CLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. clm_labels: `torch.tensor(bs, seq_length)` - The causal languge modeling labels. There is a -1 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None]) clm_labels = token_ids.new(token_ids.size()).copy_(token_ids) clm_labels[~attn_mask] = -1 # previously `clm_labels[1-attn_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, clm_labels def round_batch(self, x: torch.tensor, lengths: torch.tensor): """ For float16 only. Sub-sample sentences in a batch, and add padding, so that each dimension is a multiple of 8. Input: ------ x: `torch.tensor(bs, seq_length)` - The token ids. lengths: `torch.tensor(bs, seq_length)` - The lengths of each of the sequence in the batch. Output: ------- x: `torch.tensor(new_bs, new_seq_length)` - The updated token ids. lengths: `torch.tensor(new_bs, new_seq_length)` - The updated lengths. """ if not self.fp16 or len(lengths) < 8: return x, lengths # number of sentences == 0 [8] bs1 = len(lengths) bs2 = 8 * (bs1 // 8) assert bs2 > 0 and bs2 % 8 == 0 if bs1 != bs2: idx = torch.randperm(bs1)[:bs2] lengths = lengths[idx] slen = lengths.max().item() x = x[idx, :slen] else: idx = None # sequence length == 0 [8] ml1 = x.size(1) if ml1 % 8 != 0: pad = 8 - (ml1 % 8) ml2 = ml1 + pad if self.mlm: pad_id = self.params.special_tok_ids['pad_token'] else: pad_id = self.params.special_tok_ids['unk_token'] padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id) x = torch.cat([x, padding_tensor], 1) assert x.size() == (bs2, ml2) assert x.size(0) % 8 == 0 assert x.size(1) % 8 == 0 return x, lengths def train(self): """ The real training loop. """ if self.is_master: logger.info('Starting training') self.last_log = time.time() self.student.train() self.teacher.eval() for _ in range(self.params.n_epoch): if self.is_master: logger.info(f'--- Starting epoch {self.epoch}/{self.params.n_epoch-1}') if self.multi_gpu: torch.distributed.barrier() iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0]) for batch in iter_bar: if self.params.n_gpu > 0: batch = tuple(t.to(f'cuda:{self.params.local_rank}') for t in batch) if self.mlm: token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch) else: token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch) self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels) iter_bar.update() iter_bar.set_postfix({'Last_loss': f'{self.last_loss:.2f}', 'Avg_cum_loss': f'{self.total_loss_epoch/self.n_iter:.2f}'}) iter_bar.close() if self.is_master: logger.info(f'--- Ending epoch {self.epoch}/{self.params.n_epoch-1}') self.end_epoch() if self.is_master: logger.info(f'Save very last checkpoint as `pytorch_model.bin`.') self.save_checkpoint(checkpoint_name=f'pytorch_model.bin') logger.info('Training is finished') def step(self, input_ids: torch.tensor, attention_mask: torch.tensor, lm_labels: torch.tensor): """ One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation), and possibly a parameter update (depending on the gradient accumulation). Input: ------ input_ids: `torch.tensor(bs, seq_length)` - The token ids. attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention. lm_labels: `torch.tensor(bs, seq_length)` - The language modeling labels (mlm labels for MLM and clm labels for CLM). """ if self.mlm: s_logits, s_hidden_states = self.student(input_ids=input_ids, attention_mask=attention_mask) # (bs, seq_length, voc_size) with torch.no_grad(): t_logits, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=attention_mask) # (bs, seq_length, voc_size) else: s_logits, _, s_hidden_states = self.student(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) with torch.no_grad(): t_logits, _, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) assert s_logits.size() == t_logits.size() #https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100 #https://github.com/peterliht/knowledge-distillation-pytorch/issues/2 if self.params.restrict_ce_to_mask: mask = (lm_labels>-1).unsqueeze(-1).expand_as(s_logits) # (bs, seq_lenth, voc_size) else: mask = attention_mask.unsqueeze(-1).expand_as(s_logits) # (bs, seq_lenth, voc_size) s_logits_slct = torch.masked_select(s_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask t_logits_slct = torch.masked_select(t_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask assert t_logits_slct.size() == s_logits_slct.size() loss_ce = self.ce_loss_fct(F.log_softmax(s_logits_slct/self.temperature, dim=-1), F.softmax(t_logits_slct/self.temperature, dim=-1)) * (self.temperature)**2 loss = self.alpha_ce*loss_ce if self.alpha_mlm > 0.: loss_mlm = self.lm_loss_fct(s_logits.view(-1, s_logits.size(-1)), lm_labels.view(-1)) loss += self.alpha_mlm * loss_mlm if self.alpha_clm > 0.: shift_logits = s_logits[..., :-1, :].contiguous() shift_labels = lm_labels[..., 1:].contiguous() loss_clm = self.lm_loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) loss += self.alpha_clm * loss_clm if self.alpha_mse > 0.: loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct)/s_logits_slct.size(0) # Reproducing batchmean reduction loss += self.alpha_mse * loss_mse if self.alpha_cos > 0.: s_hidden_states = s_hidden_states[-1] # (bs, seq_length, dim) t_hidden_states = t_hidden_states[-1] # (bs, seq_length, dim) mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states) # (bs, seq_length, dim) assert s_hidden_states.size() == t_hidden_states.size() dim = s_hidden_states.size(-1) s_hidden_states_slct = torch.masked_select(s_hidden_states, mask) # (bs * seq_length * dim) s_hidden_states_slct = s_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) t_hidden_states_slct = torch.masked_select(t_hidden_states, mask) # (bs * seq_length * dim) t_hidden_states_slct = t_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1) # (bs * seq_length,) loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target) loss += self.alpha_cos * loss_cos self.total_loss_epoch += loss.item() self.last_loss = loss.item() self.last_loss_ce = loss_ce.item() if self.alpha_mlm > 0.: self.last_loss_mlm = loss_mlm.item() if self.alpha_clm > 0.: self.last_loss_clm = loss_clm.item() if self.alpha_mse > 0.: self.last_loss_mse = loss_mse.item() if self.alpha_cos > 0.: self.last_loss_cos = loss_cos.item() self.optimize(loss) self.n_sequences_epoch += input_ids.size(0) def optimize(self, loss): """ Normalization on the loss (gradient accumulation or distributed training), followed by backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation). Also update the metrics for tensorboard. """ # Check for NaN if (loss != loss).data.any(): logger.error('NaN detected') exit() if self.multi_gpu: loss = loss.mean() if self.params.gradient_accumulation_steps > 1: loss = loss / self.params.gradient_accumulation_steps if self.fp16: from apex import amp with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() self.iter() if self.n_iter % self.params.gradient_accumulation_steps == 0: if self.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm) self.optimizer.step() self.optimizer.zero_grad() self.scheduler.step() def iter(self): """ Update global counts, write to tensorboard and save checkpoint. """ self.n_iter += 1 self.n_total_iter += 1 if self.n_total_iter % self.params.log_interval == 0: self.log_tensorboard() self.last_log = time.time() if self.n_total_iter % self.params.checkpoint_interval == 0: self.save_checkpoint() def log_tensorboard(self): """ Log into tensorboard. Only by the master process. """ if not self.is_master: return for param_name, param in self.student.named_parameters(): self.tensorboard.add_scalar(tag='parameter_mean/' + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter) self.tensorboard.add_scalar(tag='parameter_std/' + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter) if param.grad is None: continue self.tensorboard.add_scalar(tag="grad_mean/" + param_name, scalar_value=param.grad.data.mean(),global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="grad_std/" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="losses/cum_avg_loss_epoch", scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="losses/loss", scalar_value=self.last_loss, global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="losses/loss_ce", scalar_value=self.last_loss_ce, global_step=self.n_total_iter) if self.alpha_mlm > 0.: self.tensorboard.add_scalar(tag="losses/loss_mlm", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter) if self.alpha_clm > 0.: self.tensorboard.add_scalar(tag="losses/loss_clm", scalar_value=self.last_loss_clm, global_step=self.n_total_iter) if self.alpha_mse > 0.: self.tensorboard.add_scalar(tag="losses/loss_mse", scalar_value=self.last_loss_mse, global_step=self.n_total_iter) if self.alpha_cos > 0.: self.tensorboard.add_scalar(tag="losses/loss_cos", scalar_value=self.last_loss_cos, global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="learning_rate/lr", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="global/memory_usage", scalar_value=psutil.virtual_memory()._asdict()['used']/1_000_000, global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="global/speed", scalar_value=time.time()-self.last_log, global_step=self.n_total_iter) def end_epoch(self): """ Finally arrived at the end of epoch (full pass on dataset). Do some tensorboard logging and checkpoint saving. """ logger.info(f'{self.n_sequences_epoch} sequences have been trained during this epoch.') if self.is_master: self.save_checkpoint(checkpoint_name=f'model_epoch_{self.epoch}.pth') self.tensorboard.add_scalar(tag='epoch/loss', scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.epoch) self.epoch += 1 self.n_sequences_epoch = 0 self.n_iter = 0 self.total_loss_epoch = 0 def save_checkpoint(self, checkpoint_name: str = 'checkpoint.pth'): """ Save the current state. Only by the master process. """ if not self.is_master: return mdl_to_save = self.student.module if hasattr(self.student, 'module') else self.student mdl_to_save.config.save_pretrained(self.dump_path) state_dict = mdl_to_save.state_dict() torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))
25,933
46.848708
163
py
DeeBERT
DeeBERT-master/examples/distillation/scripts/token_counts.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before training the distilled model. """ from collections import Counter import argparse import pickle import logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) if __name__ == '__main__': parser = argparse.ArgumentParser(description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)") parser.add_argument("--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset.") parser.add_argument("--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file.") parser.add_argument("--vocab_size", default=30522, type=int) args = parser.parse_args() logger.info(f'Loading data from {args.data_file}') with open(args.data_file, 'rb') as fp: data = pickle.load(fp) logger.info('Counting occurences for MLM.') counter = Counter() for tk_ids in data: counter.update(tk_ids) counts = [0]*args.vocab_size for k, v in counter.items(): counts[k] = v logger.info(f'Dump to {args.token_counts_dump}') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
2,062
38.673077
129
py
DeeBERT
DeeBERT-master/examples/distillation/scripts/extract.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before training the distilled model. Specific to RoBERTa -> DistilRoBERTa and GPT2 -> DistilGPT2. """ from transformers import BertForMaskedLM, RobertaForMaskedLM, GPT2LMHeadModel import torch import argparse if __name__ == '__main__': parser = argparse.ArgumentParser(description="Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned Distillation") parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) parser.add_argument("--model_name", default='roberta-large', type=str) parser.add_argument("--dump_checkpoint", default='serialization_dir/tf_roberta_048131723.pth', type=str) parser.add_argument("--vocab_transform", action='store_true') args = parser.parse_args() if args.model_type == 'roberta': model = RobertaForMaskedLM.from_pretrained(args.model_name) prefix = 'roberta' elif args.model_type == 'gpt2': model = GPT2LMHeadModel.from_pretrained(args.model_name) prefix = 'transformer' state_dict = model.state_dict() compressed_sd = {} ### Embeddings ### if args.model_type == 'gpt2': for param_name in ['wte.weight', 'wpe.weight']: compressed_sd[f'{prefix}.{param_name}'] = state_dict[f'{prefix}.{param_name}'] else: for w in ['word_embeddings', 'position_embeddings', 'token_type_embeddings']: param_name = f'{prefix}.embeddings.{w}.weight' compressed_sd[param_name] = state_dict[param_name] for w in ['weight', 'bias']: param_name = f'{prefix}.embeddings.LayerNorm.{w}' compressed_sd[param_name] = state_dict[param_name] ### Transformer Blocks ### std_idx = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == 'gpt2': for layer in ['ln_1', 'attn.c_attn', 'attn.c_proj', 'ln_2', 'mlp.c_fc', 'mlp.c_proj']: for w in ['weight', 'bias']: compressed_sd[f'{prefix}.h.{std_idx}.{layer}.{w}'] = \ state_dict[f'{prefix}.h.{teacher_idx}.{layer}.{w}'] compressed_sd[f'{prefix}.h.{std_idx}.attn.bias'] = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias'] else: for layer in ['attention.self.query', 'attention.self.key', 'attention.self.value', 'attention.output.dense', 'attention.output.LayerNorm', 'intermediate.dense', 'output.dense', 'output.LayerNorm']: for w in ['weight', 'bias']: compressed_sd[f'{prefix}.encoder.layer.{std_idx}.{layer}.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'] std_idx += 1 ### Language Modeling Head ###s if args.model_type == 'roberta': for layer in ['lm_head.decoder.weight', 'lm_head.bias']: compressed_sd[f'{layer}'] = state_dict[f'{layer}'] if args.vocab_transform: for w in ['weight', 'bias']: compressed_sd[f'lm_head.dense.{w}'] = state_dict[f'lm_head.dense.{w}'] compressed_sd[f'lm_head.layer_norm.{w}'] = state_dict[f'lm_head.layer_norm.{w}'] elif args.model_type == 'gpt2': for w in ['weight', 'bias']: compressed_sd[f'{prefix}.ln_f.{w}'] = state_dict[f'{prefix}.ln_f.{w}'] compressed_sd[f'lm_head.weight'] = state_dict[f'lm_head.weight'] print(f'N layers selected for distillation: {std_idx}') print(f'Number of params transfered for distillation: {len(compressed_sd.keys())}') print(f'Save transfered checkpoint to {args.dump_checkpoint}.') torch.save(compressed_sd, args.dump_checkpoint)
4,326
47.077778
158
py
DeeBERT
DeeBERT-master/examples/distillation/scripts/extract_distilbert.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before training DistilBERT. Specific to BERT -> DistilBERT. """ from transformers import BertForMaskedLM, RobertaForMaskedLM import torch import argparse if __name__ == '__main__': parser = argparse.ArgumentParser(description="Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned Distillation") parser.add_argument("--model_type", default="bert", choices=["bert"]) parser.add_argument("--model_name", default='bert-base-uncased', type=str) parser.add_argument("--dump_checkpoint", default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument("--vocab_transform", action='store_true') args = parser.parse_args() if args.model_type == 'bert': model = BertForMaskedLM.from_pretrained(args.model_name) prefix = 'bert' else: raise ValueError(f'args.model_type should be "bert".') state_dict = model.state_dict() compressed_sd = {} for w in ['word_embeddings', 'position_embeddings']: compressed_sd[f'distilbert.embeddings.{w}.weight'] = \ state_dict[f'{prefix}.embeddings.{w}.weight'] for w in ['weight', 'bias']: compressed_sd[f'distilbert.embeddings.LayerNorm.{w}'] = \ state_dict[f'{prefix}.embeddings.LayerNorm.{w}'] std_idx = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ['weight', 'bias']: compressed_sd[f'distilbert.transformer.layer.{std_idx}.attention.q_lin.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.attention.k_lin.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.attention.v_lin.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.attention.out_lin.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.sa_layer_norm.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.ffn.lin1.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.ffn.lin2.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.output_layer_norm.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'] std_idx += 1 compressed_sd[f'vocab_projector.weight'] = state_dict[f'cls.predictions.decoder.weight'] compressed_sd[f'vocab_projector.bias'] = state_dict[f'cls.predictions.bias'] if args.vocab_transform: for w in ['weight', 'bias']: compressed_sd[f'vocab_transform.{w}'] = state_dict[f'cls.predictions.transform.dense.{w}'] compressed_sd[f'vocab_layer_norm.{w}'] = state_dict[f'cls.predictions.transform.LayerNorm.{w}'] print(f'N layers selected for distillation: {std_idx}') print(f'Number of params transfered for distillation: {len(compressed_sd.keys())}') print(f'Save transfered checkpoint to {args.dump_checkpoint}.') torch.save(compressed_sd, args.dump_checkpoint)
4,255
50.277108
158
py
DeeBERT
DeeBERT-master/examples/distillation/scripts/binarized_data.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before distillation. """ import argparse import pickle import random import time import numpy as np from transformers import BertTokenizer, RobertaTokenizer, GPT2Tokenizer import logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser(description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).") parser.add_argument('--file_path', type=str, default='data/dump.txt', help='The path to the data.') parser.add_argument('--tokenizer_type', type=str, default='bert', choices=['bert', 'roberta', 'gpt2']) parser.add_argument('--tokenizer_name', type=str, default='bert-base-uncased', help="The tokenizer to use.") parser.add_argument('--dump_file', type=str, default='data/dump', help='The dump file prefix.') args = parser.parse_args() logger.info(f'Loading Tokenizer ({args.tokenizer_name})') if args.tokenizer_type == 'bert': tokenizer = BertTokenizer.from_pretrained(args.tokenizer_name) bos = tokenizer.special_tokens_map['cls_token'] # `[CLS]` sep = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == 'roberta': tokenizer = RobertaTokenizer.from_pretrained(args.tokenizer_name) bos = tokenizer.special_tokens_map['cls_token'] # `<s>` sep = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == 'gpt2': tokenizer = GPT2Tokenizer.from_pretrained(args.tokenizer_name) bos = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` sep = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(f'Loading text from {args.file_path}') with open(args.file_path, 'r', encoding='utf8') as fp: data = fp.readlines() logger.info(f'Start encoding') logger.info(f'{len(data)} examples to process.') rslt = [] iter = 0 interval = 10000 start = time.time() for text in data: text = f'{bos} {text.strip()} {sep}' token_ids = tokenizer.encode(text, add_special_tokens=False) rslt.append(token_ids) iter += 1 if iter % interval == 0: end = time.time() logger.info(f'{iter} examples processed. - {(end-start)/interval:.2f}s/expl') start = time.time() logger.info('Finished binarization') logger.info(f'{len(data)} examples processed.') dp_file = f'{args.dump_file}.{args.tokenizer_name}.pickle' rslt_ = [np.uint16(d) for d in rslt] random.shuffle(rslt_) logger.info(f'Dump to {dp_file}') with open(dp_file, 'wb') as handle: pickle.dump(rslt_, handle, protocol=pickle.HIGHEST_PROTOCOL) if __name__ == "__main__": main()
3,630
38.043011
140
py
DeeBERT
DeeBERT-master/templates/adding_a_new_model/modeling_xxx.py
# coding=utf-8 # Copyright 2018 XXX Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch XXX model. """ #################################################### # In this template, replace all the XXX (various casings) with your model name #################################################### from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import math import os import sys from io import open import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from .modeling_utils import PreTrainedModel, prune_linear_layer from .configuration_xxx import XxxConfig from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) #################################################### # This dict contrains shortcut names and associated url # for the pretrained weights provided with the models #################################################### XXX_PRETRAINED_MODEL_ARCHIVE_MAP = { 'xxx-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-pytorch_model.bin", 'xxx-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-pytorch_model.bin", } #################################################### # This is a conversion method from TF 1.0 to PyTorch # More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28 #################################################### def load_tf_weights_in_xxx(model, config, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model. """ try: import re import numpy as np import tensorflow as tf except ImportError: logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m", "global_step"] for n in name): logger.info("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif l[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, l[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model #################################################### # PyTorch Models are constructed by sub-classing # - torch.nn.Module for the layers and # - PreTrainedModel for the models (itself a sub-class of torch.nn.Module) #################################################### #################################################### # Here is an example of typical layer in a PyTorch model of the library # The classes are usually identical to the TF 2.0 ones without the 'TF' prefix. # # See the conversion methods in modeling_tf_pytorch_utils.py for more details #################################################### class XxxLayer(nn.Module): def __init__(self, config): super(XxxLayer, self).__init__() self.attention = XxxAttention(config) self.intermediate = XxxIntermediate(config) self.output = XxxOutput(config) def forward(self, hidden_states, attention_mask=None, head_mask=None): attention_outputs = self.attention(hidden_states, attention_mask, head_mask) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs #################################################### # PreTrainedModel is a sub-class of torch.nn.Module # which take care of loading and saving pretrained weights # and various common utilities. # # Here you just need to specify a few (self-explanatory) # pointers for your model and the weights initialization # method if its not fully covered by PreTrainedModel's default method #################################################### class XxxPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = XxxConfig pretrained_model_archive_map = XXX_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_xxx base_model_prefix = "transformer" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, XxxLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() XXX_START_DOCSTRING = r""" The XXX model was proposed in `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer pre-trained using a combination of masked language modeling objective and next sentence prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia. This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. .. _`XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`: https://arxiv.org/abs/1810.04805 .. _`torch.nn.Module`: https://pytorch.org/docs/stable/nn.html#module Parameters: config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ XXX_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. To match pre-training, XXX input sequence should be formatted with [CLS] and [SEP] tokens as follows: (a) For sequence pairs: ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` (b) For single sequences: ``tokens: [CLS] the dog is hairy . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0`` Xxx is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`transformers.XxxTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token (see `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare Xxx Model transformer outputting raw hidden-states without any specific head on top.", XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class XxxModel(XxxPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the output of the last layer of the model. **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during Xxx pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = XxxModel.from_pretrained('xxx-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config): super(XxxModel, self).__init__(config) self.embeddings = XxxEmbeddings(config) self.encoder = XxxEncoder(config) self.pooler = XxxPooler(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.num_hidden_layers ################################## # Replace this with your model code embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask) sequence_output = encoder_outputs[0] outputs = (sequence_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here return outputs # sequence_output, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model with a `language modeling` head on top. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class XxxForMaskedLM(XxxPreTrainedModel): r""" **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the masked language modeling loss. Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Masked language modeling loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = XxxForMaskedLM.from_pretrained('xxx-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids, masked_lm_labels=input_ids) loss, prediction_scores = outputs[:2] """ def __init__(self, config): super(XxxForMaskedLM, self).__init__(config) self.transformer = XxxModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size) self.init_weights() def get_output_embeddings(self): return self.lm_head def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None): outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) outputs = (masked_lm_loss,) + outputs return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class XxxForSequenceClassification(XxxPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = XxxForSequenceClassification.from_pretrained('xxx-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ def __init__(self, config): super(XxxForSequenceClassification, self).__init__(config) self.num_labels = config.num_labels self.transformer = XxxModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None): outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), logits, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class XxxForTokenClassification(XxxPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss. **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` Classification scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = XxxForTokenClassification.from_pretrained('xxx-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, scores = outputs[:2] """ def __init__(self, config): super(XxxForTokenClassification, self).__init__(config) self.num_labels = config.num_labels self.transformer = XxxModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None): outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), scores, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class XxxForQuestionAnswering(XxxPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = XxxForQuestionAnswering.from_pretrained('xxx-large-uncased-whole-word-masking-finetuned-squad') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]" input_ids = tokenizer.encode(input_text) token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))] start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids])) all_tokens = tokenizer.convert_ids_to_tokens(input_ids) print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])) # a nice puppet """ def __init__(self, config): super(XxxForQuestionAnswering, self).__init__(config) self.num_labels = config.num_labels self.transformer = XxxModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None): outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) outputs = (start_logits, end_logits,) + outputs[2:] if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 outputs = (total_loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
34,579
51.473445
151
py
DeeBERT
DeeBERT-master/templates/adding_a_new_model/tokenization_xxx.py
# coding=utf-8 # Copyright 2018 XXX Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model XXX.""" from __future__ import absolute_import, division, print_function, unicode_literals import collections import logging import os import unicodedata from io import open from .tokenization_utils import PreTrainedTokenizer logger = logging.getLogger(__name__) #################################################### # In this template, replace all the XXX (various casings) with your model name #################################################### #################################################### # Mapping from the keyword arguments names of Tokenizer `__init__` # to file names for serializing Tokenizer instances #################################################### VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} #################################################### # Mapping from the keyword arguments names of Tokenizer `__init__` # to pretrained vocabulary URL for all the model shortcut names. #################################################### PRETRAINED_VOCAB_FILES_MAP = { 'vocab_file': { 'xxx-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-vocab.txt", 'xxx-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-vocab.txt", } } #################################################### # Mapping from model shortcut names to max length of inputs #################################################### PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'xxx-base-uncased': 512, 'xxx-large-uncased': 512, } #################################################### # Mapping from model shortcut names to a dictionary of additional # keyword arguments for Tokenizer `__init__`. # To be used for checkpoint specific configurations. #################################################### PRETRAINED_INIT_CONFIGURATION = { 'xxx-base-uncased': {'do_lower_case': True}, 'xxx-large-uncased': {'do_lower_case': True}, } def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip('\n') vocab[token] = index return vocab class XxxTokenizer(PreTrainedTokenizer): r""" Constructs a XxxTokenizer. :class:`~transformers.XxxTokenizer` runs end-to-end tokenization: punctuation splitting + wordpiece Args: vocab_file: Path to a one-wordpiece-per-line vocabulary file do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", **kwargs): """Constructs a XxxTokenizer. Args: **vocab_file**: Path to a one-wordpiece-per-line vocabulary file **do_lower_case**: (`optional`) boolean (default True) Whether to lower case the input Only has an effect when do_basic_tokenize=True """ super(XxxTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs) self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = XxxTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) self.vocab = load_vocab(vocab_file) @property def vocab_size(self): return len(self.vocab) def _tokenize(self, text): """ Take as input a string and return a list of strings (tokens) for words/sub-words """ split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """ Converts a token (str/unicode) in an id using the vocab. """ return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (string/unicode) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: single sequence: [CLS] X [SEP] pair of sequences: [CLS] A [SEP] B [SEP] """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0: list of ids (must not contain special tokens) token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids for sequence pairs already_has_special_tokens: (default False) Set to True if the token list is already formated with special tokens for the model Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError("You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model.") return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 | first sequence | second sequence if token_ids_1 is None, only returns the first portion of the mask (0's). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary to a directory or file.""" index = 0 if os.path.isdir(vocab_path): vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = vocab_path with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!".format(vocab_file)) index = token_index writer.write(token + u'\n') index += 1 return (vocab_file,)
9,578
42.739726
117
py
DeeBERT
DeeBERT-master/templates/adding_a_new_model/configuration_xxx.py
# coding=utf-8 # Copyright 2010, XXX authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XXX model configuration """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import sys import six from io import open from .configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) XXX_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'xxx-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-config.json", 'xxx-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-config.json", } class XxxConfig(PretrainedConfig): r""" :class:`~transformers.XxxConfig` is the configuration class to store the configuration of a `XxxModel`. Arguments: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `XxxModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `XxxModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps: The epsilon used by LayerNorm. """ pretrained_config_archive_map = XXX_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__(self, vocab_size_or_config_json_file=50257, n_positions=1024, n_ctx=1024, n_embd=768, n_layer=12, n_head=12, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, num_labels=1, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs): super(XxxConfig, self).__init__(**kwargs) self.vocab_size = vocab_size_or_config_json_file if isinstance(vocab_size_or_config_json_file, six.string_types) else -1 self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.num_labels = num_labels self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels if isinstance(vocab_size_or_config_json_file, six.string_types): with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif not isinstance(vocab_size_or_config_json_file, int): raise ValueError( "First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)" ) @property def max_position_embeddings(self): return self.n_positions @property def hidden_size(self): return self.n_embd @property def num_attention_heads(self): return self.n_head @property def num_hidden_layers(self): return self.n_layer
5,273
39.259542
128
py
DeeBERT
DeeBERT-master/templates/adding_a_new_model/convert_xxx_original_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert XXX checkpoint.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import torch from transformers import XxxConfig, XxxForPreTraining, load_tf_weights_in_xxx import logging logging.basicConfig(level=logging.INFO) def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, xxx_config_file, pytorch_dump_path): # Initialise PyTorch model config = XxxConfig.from_json_file(xxx_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = XxxForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_xxx(model, config, tf_checkpoint_path) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--tf_checkpoint_path", default = None, type = str, required = True, help = "Path to the TensorFlow checkpoint path.") parser.add_argument("--xxx_config_file", default = None, type = str, required = True, help = "The config json file corresponding to the pre-trained XXX model. \n" "This specifies the model architecture.") parser.add_argument("--pytorch_dump_path", default = None, type = str, required = True, help = "Path to the output PyTorch model.") args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.xxx_config_file, args.pytorch_dump_path)
2,565
37.878788
100
py
DeeBERT
DeeBERT-master/templates/adding_a_new_model/modeling_tf_xxx.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 XXX model. """ #################################################### # In this template, replace all the XXX (various casings) with your model name #################################################### from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import math import os import sys from io import open import numpy as np import tensorflow as tf from .configuration_xxx import XxxConfig from .modeling_tf_utils import TFPreTrainedModel, get_initializer from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) #################################################### # This dict contrains shortcut names and associated url # for the pretrained weights provided with the models #################################################### TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP = { 'xxx-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-tf_model.h5", 'xxx-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-tf_model.h5", } #################################################### # TF 2.0 Models are constructed using Keras imperative API by sub-classing # - tf.keras.layers.Layer for the layers and # - TFPreTrainedModel for the models (itself a sub-class of tf.keras.Model) #################################################### #################################################### # Here is an example of typical layer in a TF 2.0 model of the library # The classes are usually identical to the PyTorch ones and prefixed with 'TF'. # # Note that class __init__ parameters includes **kwargs (send to 'super'). # This let us have a control on class scope and variable names: # More precisely, we set the names of the class attributes (lower level layers) to # to the equivalent attributes names in the PyTorch model so we can have equivalent # class and scope structure between PyTorch and TF 2.0 models and easily load one in the other. # # See the conversion methods in modeling_tf_pytorch_utils.py for more details #################################################### class TFXxxLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super(TFXxxLayer, self).__init__(**kwargs) self.attention = TFXxxAttention(config, name='attention') self.intermediate = TFXxxIntermediate(config, name='intermediate') self.transformer_output = TFXxxOutput(config, name='output') def call(self, inputs, training=False): hidden_states, attention_mask, head_mask = inputs attention_outputs = self.attention([hidden_states, attention_mask, head_mask], training=training) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.transformer_output([intermediate_output, attention_output], training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs #################################################### # The full model without a specific pretrained or finetuning head is # provided as a tf.keras.layers.Layer usually called "TFXxxMainLayer" #################################################### class TFXxxMainLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super(TFXxxMainLayer, self).__init__(**kwargs) def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models def _prune_heads(self, heads_to_prune): raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, training=False): # We allow three types of multi-inputs: # - traditional keyword arguments in the call method # - all the arguments provided as a dict in the first positional argument of call # - all the arguments provided as a list/tuple (ordered) in the first positional argument of call # The last two options are useful to use the tf.keras fit() method. if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask assert len(inputs) <= 5, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) assert len(inputs) <= 5, "Too many inputs." else: input_ids = inputs if attention_mask is None: attention_mask = tf.fill(tf.shape(input_ids), 1) if token_type_ids is None: token_type_ids = tf.fill(tf.shape(input_ids), 0) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, tf.float32) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if not head_mask is None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) ################################## # Replace this with your model code embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids) encoder_outputs = self.encoder([embedding_output, extended_attention_mask, head_mask], training=training) sequence_output = encoder_outputs[0] outputs = (sequence_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here return outputs # sequence_output, (hidden_states), (attentions) #################################################### # TFXxxPreTrainedModel is a sub-class of tf.keras.Model # which take care of loading and saving pretrained weights # and various common utilities. # Here you just need to specify a few (self-explanatory) # pointers for your model. #################################################### class TFXxxPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = XxxConfig pretrained_model_archive_map = TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "transformer" XXX_START_DOCSTRING = r""" The XXX model was proposed in `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer pre-trained using a combination of masked language modeling objective and next sentence prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia. This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. _`XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`: https://arxiv.org/abs/1810.04805 .. _`tf.keras.Model`: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model Note on the model inputs: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with input_ids only and nothing else: `model(inputs_ids) - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associaed to the input names given in the docstring: `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` Parameters: config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ XXX_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. To match pre-training, XXX input sequence should be formatted with [CLS] and [SEP] tokens as follows: (a) For sequence pairs: ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` (b) For single sequences: ``tokens: [CLS] the dog is hairy . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0`` Xxx is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`transformers.XxxTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token (see `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). **position_ids**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare Xxx Model transformer outputing raw hidden-states without any specific head on top.", XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class TFXxxModel(TFXxxPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the output of the last layer of the model. **pooler_output**: ``tf.Tensor`` of shape ``(batch_size, hidden_size)`` Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during Xxx pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import XxxTokenizer, TFXxxModel tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = TFXxxModel.from_pretrained('xxx-base-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config, *inputs, **kwargs): super(TFXxxModel, self).__init__(config, *inputs, **kwargs) self.transformer = TFXxxMainLayer(config, name='transformer') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) return outputs @add_start_docstrings("""Xxx Model with a `language modeling` head on top. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class TFXxxForMaskedLM(TFXxxPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **prediction_scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import XxxTokenizer, TFXxxForMaskedLM tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = TFXxxForMaskedLM.from_pretrained('xxx-base-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) prediction_scores = outputs[0] """ def __init__(self, config, *inputs, **kwargs): super(TFXxxForMaskedLM, self).__init__(config, *inputs, **kwargs) self.transformer = TFXxxMainLayer(config, name='transformer') self.mlm = TFXxxMLMHead(config, self.transformer.embeddings, name='mlm') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) sequence_output = outputs[0] prediction_scores = self.mlm(sequence_output, training=kwargs.get('training', False)) outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here return outputs # prediction_scores, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class TFXxxForSequenceClassification(TFXxxPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **logits**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import XxxTokenizer, TFXxxForSequenceClassification tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = TFXxxForSequenceClassification.from_pretrained('xxx-base-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) logits = outputs[0] """ def __init__(self, config, *inputs, **kwargs): super(TFXxxForSequenceClassification, self).__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXxxMainLayer(config, name='transformer') self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=kwargs.get('training', False)) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here return outputs # logits, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class TFXxxForTokenClassification(TFXxxPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` Classification scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import XxxTokenizer, TFXxxForTokenClassification tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = TFXxxForTokenClassification.from_pretrained('xxx-base-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) scores = outputs[0] """ def __init__(self, config, *inputs, **kwargs): super(TFXxxForTokenClassification, self).__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXxxMainLayer(config, name='transformer') self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=kwargs.get('training', False)) logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here return outputs # scores, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class TFXxxForQuestionAnswering(TFXxxPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **start_scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import XxxTokenizer, TFXxxForQuestionAnswering tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = TFXxxForQuestionAnswering.from_pretrained('xxx-base-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) start_scores, end_scores = outputs[:2] """ def __init__(self, config, *inputs, **kwargs): super(TFXxxForQuestionAnswering, self).__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXxxMainLayer(config, name='transformer') self.qa_outputs = tf.keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) outputs = (start_logits, end_logits,) + outputs[2:] return outputs # start_logits, end_logits, (hidden_states), (attentions)
27,575
53.605941
193
py
DeeBERT
DeeBERT-master/templates/adding_a_new_model/tests/tokenization_xxx_test.py
# coding=utf-8 # Copyright 2018 XXX Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals import os import unittest from io import open from transformers.tokenization_bert import (XxxTokenizer, VOCAB_FILES_NAMES) from .tokenization_tests_commons import CommonTestCases class XxxTokenizationTest(CommonTestCases.CommonTokenizerTester): tokenizer_class = XxxTokenizer def setUp(self): super(XxxTokenizationTest, self).setUp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file, "w", encoding='utf-8') as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_tokenizer(self, **kwargs): return XxxTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self): input_text = u"UNwant\u00E9d,running" output_text = u"unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize(u"UNwant\u00E9d,running") self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) if __name__ == '__main__': unittest.main()
2,092
35.086207
90
py
DeeBERT
DeeBERT-master/templates/adding_a_new_model/tests/modeling_tf_xxx_test.py
# coding=utf-8 # Copyright 2018 XXX Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import shutil import pytest import sys from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester from transformers import XxxConfig, is_tf_available if is_tf_available(): import tensorflow as tf from transformers.modeling_tf_xxx import (TFXxxModel, TFXxxForMaskedLM, TFXxxForSequenceClassification, TFXxxForTokenClassification, TFXxxForQuestionAnswering, TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP) else: pytestmark = pytest.mark.skip("Require TensorFlow") class TFXxxModelTest(TFCommonTestCases.TFCommonModelTester): all_model_classes = (TFXxxModel, TFXxxForMaskedLM, TFXxxForQuestionAnswering, TFXxxForSequenceClassification, TFXxxForTokenClassification) if is_tf_available() else () class TFXxxModelTester(object): def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = XxxConfig( vocab_size_or_config_json_file=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_xxx_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = TFXxxModel(config=config) inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} sequence_output, pooled_output = model(inputs) inputs = [input_ids, input_mask] sequence_output, pooled_output = model(inputs) sequence_output, pooled_output = model(input_ids) result = { "sequence_output": sequence_output.numpy(), "pooled_output": pooled_output.numpy(), } self.parent.assertListEqual( list(result["sequence_output"].shape), [self.batch_size, self.seq_length, self.hidden_size]) self.parent.assertListEqual(list(result["pooled_output"].shape), [self.batch_size, self.hidden_size]) def create_and_check_xxx_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = TFXxxForMaskedLM(config=config) inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} prediction_scores, = model(inputs) result = { "prediction_scores": prediction_scores.numpy(), } self.parent.assertListEqual( list(result["prediction_scores"].shape), [self.batch_size, self.seq_length, self.vocab_size]) def create_and_check_xxx_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_labels = self.num_labels model = TFXxxForSequenceClassification(config=config) inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} logits, = model(inputs) result = { "logits": logits.numpy(), } self.parent.assertListEqual( list(result["logits"].shape), [self.batch_size, self.num_labels]) def create_and_check_xxx_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_labels = self.num_labels model = TFXxxForTokenClassification(config=config) inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} logits, = model(inputs) result = { "logits": logits.numpy(), } self.parent.assertListEqual( list(result["logits"].shape), [self.batch_size, self.seq_length, self.num_labels]) def create_and_check_xxx_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = TFXxxForQuestionAnswering(config=config) inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} start_logits, end_logits = model(inputs) result = { "start_logits": start_logits.numpy(), "end_logits": end_logits.numpy(), } self.parent.assertListEqual( list(result["start_logits"].shape), [self.batch_size, self.seq_length]) self.parent.assertListEqual( list(result["end_logits"].shape), [self.batch_size, self.seq_length]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict def setUp(self): self.model_tester = TFXxxModelTest.TFXxxModelTester(self) self.config_tester = ConfigTester(self, config_class=XxxConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_xxx_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_token_classification(*config_and_inputs) @pytest.mark.slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in ['xxx-base-uncased']: model = TFXxxModel.from_pretrained(model_name, cache_dir=cache_dir) shutil.rmtree(cache_dir) self.assertIsNotNone(model) if __name__ == "__main__": unittest.main()
11,411
43.404669
160
py
DeeBERT
DeeBERT-master/templates/adding_a_new_model/tests/modeling_xxx_test.py
# coding=utf-8 # Copyright 2018 XXX Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import shutil import pytest from transformers import is_torch_available from .modeling_common_test import (CommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester if is_torch_available(): from transformers import (XxxConfig, XxxModel, XxxForMaskedLM, XxxForNextSentencePrediction, XxxForPreTraining, XxxForQuestionAnswering, XxxForSequenceClassification, XxxForTokenClassification, XxxForMultipleChoice) from transformers.modeling_xxx import XXX_PRETRAINED_MODEL_ARCHIVE_MAP else: pytestmark = pytest.mark.skip("Require Torch") class XxxModelTest(CommonTestCases.CommonModelTester): all_model_classes = (XxxModel, XxxForMaskedLM, XxxForQuestionAnswering, XxxForSequenceClassification, XxxForTokenClassification) if is_torch_available() else () class XxxModelTester(object): def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = XxxConfig( vocab_size_or_config_json_file=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def check_loss_output(self, result): self.parent.assertListEqual( list(result["loss"].size()), []) def create_and_check_xxx_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = XxxModel(config=config) model.eval() sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) sequence_output, pooled_output = model(input_ids, token_type_ids=token_type_ids) sequence_output, pooled_output = model(input_ids) result = { "sequence_output": sequence_output, "pooled_output": pooled_output, } self.parent.assertListEqual( list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]) self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size]) def create_and_check_xxx_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = XxxForMaskedLM(config=config) model.eval() loss, prediction_scores = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels) result = { "loss": loss, "prediction_scores": prediction_scores, } self.parent.assertListEqual( list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]) self.check_loss_output(result) def create_and_check_xxx_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = XxxForQuestionAnswering(config=config) model.eval() loss, start_logits, end_logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels) result = { "loss": loss, "start_logits": start_logits, "end_logits": end_logits, } self.parent.assertListEqual( list(result["start_logits"].size()), [self.batch_size, self.seq_length]) self.parent.assertListEqual( list(result["end_logits"].size()), [self.batch_size, self.seq_length]) self.check_loss_output(result) def create_and_check_xxx_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_labels = self.num_labels model = XxxForSequenceClassification(config) model.eval() loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) result = { "loss": loss, "logits": logits, } self.parent.assertListEqual( list(result["logits"].size()), [self.batch_size, self.num_labels]) self.check_loss_output(result) def create_and_check_xxx_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_labels = self.num_labels model = XxxForTokenClassification(config=config) model.eval() loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) result = { "loss": loss, "logits": logits, } self.parent.assertListEqual( list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels]) self.check_loss_output(result) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict def setUp(self): self.model_tester = XxxModelTest.XxxModelTester(self) self.config_tester = ConfigTester(self, config_class=XxxConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_xxx_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_token_classification(*config_and_inputs) @pytest.mark.slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(XXX_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: model = XxxModel.from_pretrained(model_name, cache_dir=cache_dir) shutil.rmtree(cache_dir) self.assertIsNotNone(model) if __name__ == "__main__": unittest.main()
11,628
44.425781
160
py
DeeBERT
DeeBERT-master/templates/adding_a_new_example_script/utils_xxx.py
# coding=utf-8 # Copyright 2018 XXX. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Load XXX dataset. """ from __future__ import absolute_import, division, print_function import json import logging import math import collections from io import open from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize # Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method) from utils_squad_evaluate import find_all_best_thresh_v2, make_qid_to_has_ans, get_raw_scores logger = logging.getLogger(__name__) class SquadExample(object): """ A single training/test example for the Squad dataset. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=None): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (self.qas_id) s += ", question_text: %s" % ( self.question_text) s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.end_position: s += ", end_position: %d" % (self.end_position) if self.is_impossible: s += ", is_impossible: %r" % (self.is_impossible) return s class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map, token_is_max_context, input_ids, input_mask, segment_ids, cls_index, p_mask, paragraph_len, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.cls_index = cls_index self.p_mask = p_mask self.paragraph_len = paragraph_len self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def read_squad_examples(input_file, is_training, version_2_with_negative): """Read a SQuAD json file into a list of SquadExample.""" with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: if version_2_with_negative: is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, cls_token_at_end=False, cls_token='[CLS]', sep_token='[SEP]', pad_token=0, sequence_a_segment_id=0, sequence_b_segment_id=1, cls_token_segment_id=0, pad_token_segment_id=0, mask_padding_with_zero=True): """Loads a data file into a list of `InputBatch`s.""" unique_id = 1000000000 # cnt_pos, cnt_neg = 0, 0 # max_N, max_M = 1024, 1024 # f = np.zeros((max_N, max_M), dtype=np.float32) features = [] for (example_index, example) in enumerate(examples): # if example_index % 100 == 0: # logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg) query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # Original TF implem also keep the classification token (set to 0) (not sure why...) p_mask = [] # CLS token at the beginning if not cls_token_at_end: tokens.append(cls_token) segment_ids.append(cls_token_segment_id) p_mask.append(0) cls_index = 0 # Query for token in query_tokens: tokens.append(token) segment_ids.append(sequence_a_segment_id) p_mask.append(1) # SEP token tokens.append(sep_token) segment_ids.append(sequence_a_segment_id) p_mask.append(1) # Paragraph for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(sequence_b_segment_id) p_mask.append(0) paragraph_len = doc_span.length # SEP token tokens.append(sep_token) segment_ids.append(sequence_b_segment_id) p_mask.append(1) # CLS token at the end if cls_token_at_end: tokens.append(cls_token) segment_ids.append(cls_token_segment_id) p_mask.append(0) cls_index = len(tokens) - 1 # Index of classification token input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(pad_token) input_mask.append(0 if mask_padding_with_zero else 1) segment_ids.append(pad_token_segment_id) p_mask.append(1) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length span_is_impossible = example.is_impossible start_position = None end_position = None if is_training and not span_is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 span_is_impossible = True else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and span_is_impossible: start_position = cls_index end_position = cls_index if example_index < 20: logger.info("*** Example ***") logger.info("unique_id: %s" % (unique_id)) logger.info("example_index: %s" % (example_index)) logger.info("doc_span_index: %s" % (doc_span_index)) logger.info("tokens: %s" % " ".join(tokens)) logger.info("token_to_orig_map: %s" % " ".join([ "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])) logger.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in token_is_max_context.items() ])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training and span_is_impossible: logger.info("impossible example") if is_training and not span_is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) logger.info("start_position: %d" % (start_position)) logger.info("end_position: %d" % (end_position)) logger.info( "answer: %s" % (answer_text)) features.append( InputFeatures( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, cls_index=cls_index, p_mask=p_mask, paragraph_len=paragraph_len, start_position=start_position, end_position=end_position, is_impossible=span_is_impossible)) unique_id += 1 return features def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold): """Write final predictions to the json file and log-odds of null if needed.""" logger.info("Writing predictions to: %s" % (output_prediction_file)) logger.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min null score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't include the empty option in the n-best, include it if version_2_with_negative: if "" not in seen_predictions: nbest.append( _NbestPrediction( text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if len(nbest)==1: nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - ( best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions # For XLNet (and XLM which uses the same head) RawResultExtended = collections.namedtuple("RawResultExtended", ["unique_id", "start_top_log_probs", "start_top_index", "end_top_log_probs", "end_top_index", "cls_logits"]) def write_predictions_extended(all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, orig_data_file, start_n_top, end_n_top, version_2_with_negative, tokenizer, verbose_logging): """ XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of null if needed. Requires utils_squad_evaluate.py """ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]) logger.info("Writing predictions to: %s", output_prediction_file) # logger.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] cur_null_score = result.cls_logits # if we could have irrelevant answers, get the min score of irrelevant score_null = min(score_null, cur_null_score) for i in range(start_n_top): for j in range(end_n_top): start_log_prob = result.start_top_log_probs[i] start_index = result.start_top_index[i] j_index = i * end_n_top + j end_log_prob = result.end_top_log_probs[j_index] end_index = result.end_top_index[j_index] # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= feature.paragraph_len - 1: continue if end_index >= feature.paragraph_len - 1: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_log_prob=start_log_prob, end_log_prob=end_log_prob)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] # XLNet un-tokenizer # Let's keep it simple for now and see if we need all this later. # # tok_start_to_orig_index = feature.tok_start_to_orig_index # tok_end_to_orig_index = feature.tok_end_to_orig_index # start_orig_pos = tok_start_to_orig_index[pred.start_index] # end_orig_pos = tok_end_to_orig_index[pred.end_index] # paragraph_text = example.paragraph_text # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() # Previously used Bert untokenizer tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = tokenizer.convert_tokens_to_string(tok_tokens) # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, tokenizer.do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6)) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_log_prob + entry.end_log_prob) if not best_non_null_entry: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_log_prob"] = entry.start_log_prob output["end_log_prob"] = entry.end_log_prob nbest_json.append(output) assert len(nbest_json) >= 1 assert best_non_null_entry is not None score_diff = score_null scores_diff_json[example.qas_id] = score_diff # note(zhiliny): always predict best_non_null_entry # and the evaluation script will search for the best threshold all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") with open(orig_data_file, "r", encoding='utf-8') as reader: orig_data = json.load(reader)["data"] qid_to_has_ans = make_qid_to_has_ans(orig_data) has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(orig_data, all_predictions) out_eval = {} find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans) return out_eval def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heuristic between # `pred_text` and `orig_text` to get a character-to-character alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info( "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs
41,415
40.582329
112
py
DeeBERT
DeeBERT-master/templates/adding_a_new_example_script/run_xxx.py
# coding=utf-8 # Copyright 2018 XXX. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for task XXX.""" from __future__ import absolute_import, division, print_function import argparse import logging import os import random import glob import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForQuestionAnswering, BertTokenizer, XLMConfig, XLMForQuestionAnswering, XLMTokenizer, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer, DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup from utils_squad import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions, RawResultExtended, write_predictions_extended) # The follwing import is the official SQuAD evaluation script (2.0). # You can remove it from the dependencies if you are using this script outside of the library # We've added it here for automated tests (see examples/test_examples.py file) from utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ for conf in (BertConfig, XLNetConfig, XLMConfig)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer), 'xlnet': (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer), 'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer), 'distilbert': (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'start_positions': batch[3], 'end_positions': batch[4]} if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] if args.model_type in ['xlnet', 'xlm']: inputs.update({'cls_index': batch[5], 'p_mask': batch[6]}) outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1] } if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids example_indices = batch[3] if args.model_type in ['xlnet', 'xlm']: inputs.update({'cls_index': batch[4], 'p_mask': batch[5]}) outputs = model(**inputs) for i, example_index in enumerate(example_indices): eval_feature = features[example_index.item()] unique_id = int(eval_feature.unique_id) if args.model_type in ['xlnet', 'xlm']: # XLNet uses a more complex post-processing procedure result = RawResultExtended(unique_id = unique_id, start_top_log_probs = to_list(outputs[0][i]), start_top_index = to_list(outputs[1][i]), end_top_log_probs = to_list(outputs[2][i]), end_top_index = to_list(outputs[3][i]), cls_logits = to_list(outputs[4][i])) else: result = RawResult(unique_id = unique_id, start_logits = to_list(outputs[0][i]), end_logits = to_list(outputs[1][i])) all_results.append(result) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None if args.model_type in ['xlnet', 'xlm']: # XLNet uses a more complex post-processing procedure write_predictions_extended(examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.predict_file, model.config.start_n_top, model.config.end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging) else: write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold) # Evaluate with the official SQuAD script evaluate_options = EVAL_OPTS(data_file=args.predict_file, pred_file=output_prediction_file, na_prob_file=output_null_log_odds_file) results = evaluate_on_squad(evaluate_options) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length))) if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", input_file) examples = read_squad_examples(input_file=input_file, is_training=not evaluate, version_2_with_negative=args.version_2_with_negative) features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) if evaluate: all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index, all_cls_index, all_p_mask) else: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask) if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_file", default=None, type=str, required=True, help="SQuAD json for training. E.g., train-v1.1.json") parser.add_argument("--predict_file", default=None, type=str, required=True, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.') parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.") parser.add_argument("--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.") parser.add_argument("--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.") parser.add_argument("--verbose_logging", action='store_true', help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, 'einsum') except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
30,654
53.741071
151
py
DeeBERT
DeeBERT-master/docs/source/conf.py
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- Project information ----------------------------------------------------- project = u'transformers' copyright = u'2019, huggingface' author = u'huggingface' # The short X.Y version version = u'' # The full version, including alpha/beta/rc tags release = u'2.1.1' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.napoleon', 'recommonmark', 'sphinx.ext.viewcode', 'sphinx_markdown_tables' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] # source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { 'analytics_id': 'UA-83738774-2' } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'transformersdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'transformers.tex', u'transformers Documentation', u'huggingface', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'transformers', u'transformers Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'transformers', u'transformers Documentation', author, 'transformers', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] def setup(app): app.add_stylesheet('css/huggingface.css') app.add_stylesheet('css/code-snippets.css') app.add_js_file('js/custom.js') # -- Extension configuration -------------------------------------------------
5,609
28.68254
79
py
DeeBERT
DeeBERT-master/transformers/modeling_encoder_decoder.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Classes to support Encoder-Decoder architectures """ from __future__ import absolute_import, division, print_function, unicode_literals import logging import os import torch from torch import nn from .modeling_auto import AutoModel, AutoModelWithLMHead logger = logging.getLogger(__name__) class PreTrainedEncoderDecoder(nn.Module): r""" :class:`~transformers.PreTrainedEncoderDecoder` is a generic model class that will be instantiated as a transformer architecture with one of the base model classes of the library as encoder and (optionally) another one as decoder when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)` class method. """ def __init__(self, encoder, decoder): super(PreTrainedEncoderDecoder, self).__init__() self.encoder = encoder self.decoder = decoder @classmethod def from_pretrained( cls, encoder_pretrained_model_name_or_path=None, decoder_pretrained_model_name_or_path=None, *model_args, **kwargs ): r""" Instantiates an encoder and a decoder from one or two base classes of the library from pre-trained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you need to first set it back in training mode with `model.train()` Params: encoder_pretrained_model_name_or_path: information necessary to initiate the encoder. Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/encoder``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. decoder_pretrained_model_name_or_path: information necessary to initiate the decoder. Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/decoder``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments. Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. You can specify kwargs sepcific for the encoder and decoder by prefixing the key with `encoder_` and `decoder_` respectively. (e.g. ``decoder_output_attention=True``). The remaining kwargs will be passed to both encoders and decoders. Examples:: model = PreTrainedEncoderDecoder.from_pretained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert """ # keyword arguments come in 3 flavors: encoder-specific (prefixed by # `encoder_`), decoder-specific (prefixed by `decoder_`) and those # that apply to the model as a whole. # We let the specific kwargs override the common ones in case of conflict. kwargs_common = { argument: value for argument, value in kwargs.items() if not argument.startswith("encoder_") and not argument.startswith("decoder_") } kwargs_decoder = kwargs_common.copy() kwargs_encoder = kwargs_common.copy() kwargs_encoder.update( { argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") } ) kwargs_decoder.update( { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } ) # Load and initialize the encoder and decoder # The distinction between encoder and decoder at the model level is made # by the value of the flag `is_decoder` that we need to set correctly. encoder = kwargs_encoder.pop("model", None) if encoder is None: encoder = AutoModel.from_pretrained( encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder ) encoder.config.is_decoder = False decoder = kwargs_decoder.pop("model", None) if decoder is None: decoder = AutoModelWithLMHead.from_pretrained( decoder_pretrained_model_name_or_path, **kwargs_decoder ) decoder.config.is_decoder = True model = cls(encoder, decoder) return model def save_pretrained(self, save_directory): """ Save a Seq2Seq model and its configuration file in a format such that it can be loaded using `:func:`~transformers.PreTrainedEncoderDecoder.from_pretrained` We save the encoder' and decoder's parameters in two separate directories. """ self.encoder.save_pretrained(os.path.join(save_directory, "encoder")) self.decoder.save_pretrained(os.path.join(save_directory, "decoder")) def forward(self, encoder_input_ids, decoder_input_ids, **kwargs): """ The forward pass on a seq2eq depends what we are performing: - During training we perform one forward pass through both the encoder and decoder; - During prediction, we perform one forward pass through the encoder, and then perform several forward passes with the encoder's hidden state through the decoder to decode a full sequence. Therefore, we skip the forward pass on the encoder if an argument named `encoder_hidden_state` is passed to this function. Params: encoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)`` Indices of encoder input sequence tokens in the vocabulary. decoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)`` Indices of decoder input sequence tokens in the vocabulary. kwargs: (`optional`) Remaining dictionary of keyword arguments. """ # keyword arguments come in 3 flavors: encoder-specific (prefixed by # `encoder_`), decoder-specific (prefixed by `decoder_`) and those # that apply to the model as whole. # We let the specific kwargs override the common ones in case of conflict. kwargs_common = { argument: value for argument, value in kwargs.items() if not argument.startswith("encoder_") and not argument.startswith("decoder_") } kwargs_decoder = kwargs_common.copy() kwargs_encoder = kwargs_common.copy() kwargs_encoder.update( { argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") } ) kwargs_decoder.update( { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } ) # Encode if needed (training, first prediction pass) encoder_hidden_states = kwargs_encoder.pop("hidden_states", None) if encoder_hidden_states is None: encoder_outputs = self.encoder(encoder_input_ids, **kwargs_encoder) encoder_hidden_states = encoder_outputs[ 0 ] # output the last layer hidden state else: encoder_outputs = () # Decode kwargs_decoder["encoder_hidden_states"] = encoder_hidden_states kwargs_decoder["encoder_attention_mask"] = kwargs_encoder.get( "attention_mask", None ) decoder_outputs = self.decoder(decoder_input_ids, **kwargs_decoder) return decoder_outputs + encoder_outputs class Model2Model(PreTrainedEncoderDecoder): r""" :class:`~transformers.Model2Model` instantiates a Seq2Seq2 model where both of the encoder and decoder are of the same family. If the name of or that path to a pretrained model is specified the encoder and the decoder will be initialized with the pretrained weight (the cross-attention will be intialized randomly if its weights are not present). It is possible to override this behavior and initialize, say, the decoder randomly by creating it beforehand as follows config = BertConfig.from_pretrained() decoder = BertForMaskedLM(config) model = Model2Model.from_pretrained('bert-base-uncased', decoder_model=decoder) """ def __init__(self, *args, **kwargs): super(Model2Model, self).__init__(*args, **kwargs) self.tie_weights() def tie_weights(self): """ Tying the encoder and decoders' embeddings together. We need for each to get down to the embedding weights. However the different model classes are inconsistent to that respect: - BertModel: embeddings.word_embeddings - RoBERTa: embeddings.word_embeddings - XLMModel: embeddings - GPT2: wte - BertForMaskedLM: bert.embeddings.word_embeddings - RobertaForMaskedLM: roberta.embeddings.word_embeddings argument of the XEmbedding layer for each model, but it is "blocked" by a model-specific keyword (bert, )... """ # self._tie_or_clone_weights(self.encoder, self.decoder) pass @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs): if ( "bert" not in pretrained_model_name_or_path or "roberta" in pretrained_model_name_or_path or "distilbert" in pretrained_model_name_or_path ): raise ValueError("Only the Bert model is currently supported.") model = super(Model2Model, cls).from_pretrained( encoder_pretrained_model_name_or_path=pretrained_model_name_or_path, decoder_pretrained_model_name_or_path=pretrained_model_name_or_path, *args, **kwargs ) return model class Model2LSTM(PreTrainedEncoderDecoder): @classmethod def from_pretrained(cls, *args, **kwargs): if kwargs.get("decoder_model", None) is None: # We will create a randomly initilized LSTM model as decoder if "decoder_config" not in kwargs: raise ValueError( "To load an LSTM in Encoder-Decoder model, please supply either: " " - a torch.nn.LSTM model as `decoder_model` parameter (`decoder_model=lstm_model`), or" " - a dictionary of configuration parameters that will be used to initialize a" " torch.nn.LSTM model as `decoder_config` keyword argument. " " E.g. `decoder_config={'input_size': 768, 'hidden_size': 768, 'num_layers': 2}`" ) kwargs["decoder_model"] = torch.nn.LSTM(kwargs.pop("decoder_config")) model = super(Model2LSTM, cls).from_pretrained(*args, **kwargs) return model
15,827
49.893891
472
py
DeeBERT
DeeBERT-master/transformers/tokenization_roberta.py
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for RoBERTa.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import sys import json import logging import os import regex as re from io import open from .tokenization_gpt2 import GPT2Tokenizer try: from functools import lru_cache except ImportError: # Just a dummy decorator to get the checks to run on python2 # because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now. def lru_cache(): return lambda func: func logger = logging.getLogger(__name__) VOCAB_FILES_NAMES = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } PRETRAINED_VOCAB_FILES_MAP = { 'vocab_file': { 'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json", 'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json", 'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-vocab.json", 'distilroberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-vocab.json", 'roberta-base-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json", 'roberta-large-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json", }, 'merges_file': { 'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt", 'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt", 'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-merges.txt", 'distilroberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-merges.txt", 'roberta-base-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt", 'roberta-large-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'roberta-base': 512, 'roberta-large': 512, 'roberta-large-mnli': 512, 'distilroberta-base': 512, 'roberta-base-openai-detector': 512, 'roberta-large-openai-detector': 512, } class RobertaTokenizer(GPT2Tokenizer): """ RoBERTa BPE tokenizer, derived from the GPT-2 tokenizer. Peculiarities: - Byte-level Byte-Pair-Encoding - Requires a space to start the input string => the encoding methods should be called with the ``add_prefix_space`` flag set to ``True``. Otherwise, this tokenizer ``encode`` and ``decode`` method will not conserve the absence of a space at the beginning of a string: `tokenizer.decode(tokenizer.encode("Hello")) = " Hello"` """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, merges_file, errors='replace', bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token='<pad>', mask_token='<mask>', **kwargs): super(RobertaTokenizer, self).__init__(vocab_file=vocab_file, merges_file=merges_file, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, **kwargs) self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format: single sequence: <s> X </s> pair of sequences: <s> A </s></s> B </s> """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0: list of ids (must not contain special tokens) token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids for sequence pairs already_has_special_tokens: (default False) Set to True if the token list is already formated with special tokens for the model Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError("You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model.") return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A RoBERTa sequence pair mask has the following format: 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 | first sequence | second sequence if token_ids_1 is None, only returns the first portion of the mask (0's). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep) * [0] + len(token_ids_1 + sep) * [1]
7,300
47.673333
120
py
DeeBERT
DeeBERT-master/transformers/configuration_xlm.py
# coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XLM configuration """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import sys from io import open from .configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) XLM_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-config.json", 'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-config.json", 'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-config.json", 'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-config.json", 'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-config.json", 'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-config.json", 'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-config.json", 'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-config.json", 'xlm-mlm-17-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-config.json", 'xlm-mlm-100-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-config.json", } class XLMConfig(PretrainedConfig): """Configuration class to store the configuration of a `XLMModel`. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `XLMModel`. d_model: Size of the encoder layers and the pooler layer. n_layer: Number of hidden layers in the Transformer encoder. n_head: Number of attention heads for each attention layer in the Transformer encoder. d_inner: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. ff_activation: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. untie_r: untie relative position biases attn_type: 'bi' for XLM, 'uni' for Transformer-XL dropout: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps: The epsilon used by LayerNorm. dropout: float, dropout rate. init: str, the initialization scheme, either "normal" or "uniform". init_range: float, initialize the parameters with a uniform distribution in [-init_range, init_range]. Only effective when init="uniform". init_std: float, initialize the parameters with a normal distribution with mean 0 and stddev init_std. Only effective when init="normal". mem_len: int, the number of tokens to cache. reuse_len: int, the number of tokens in the currect batch to be cached and reused in the future. bi_data: bool, whether to use bidirectional input pipeline. Usually set to True during pretraining and False during finetuning. clamp_len: int, clamp all relative distances larger than clamp_len. -1 means no clamping. same_length: bool, whether to use the same attention length for each token. """ pretrained_config_archive_map = XLM_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__(self, vocab_size_or_config_json_file=30145, emb_dim=2048, n_layers=12, n_heads=16, dropout=0.1, attention_dropout=0.1, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=1, use_lang_emb=True, max_position_embeddings=512, embed_init_std=2048 ** -0.5, layer_norm_eps=1e-12, init_std=0.02, bos_index=0, eos_index=1, pad_index=2, unk_index=3, mask_index=5, is_encoder=True, finetuning_task=None, num_labels=2, summary_type='first', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, start_n_top=5, end_n_top=5, **kwargs): """Constructs XLMConfig. """ super(XLMConfig, self).__init__(**kwargs) if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 and isinstance(vocab_size_or_config_json_file, unicode)): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.n_words = vocab_size_or_config_json_file self.emb_dim = emb_dim self.n_layers = n_layers self.n_heads = n_heads self.dropout = dropout self.attention_dropout = attention_dropout self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.use_lang_emb = use_lang_emb self.layer_norm_eps = layer_norm_eps self.bos_index = bos_index self.eos_index = eos_index self.pad_index = pad_index self.unk_index = unk_index self.mask_index = mask_index self.is_encoder = is_encoder self.max_position_embeddings = max_position_embeddings self.embed_init_std = embed_init_std self.init_std = init_std self.finetuning_task = finetuning_task self.num_labels = num_labels self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_proj_to_labels = summary_proj_to_labels self.summary_first_dropout = summary_first_dropout self.start_n_top = start_n_top self.end_n_top = end_n_top else: raise ValueError("First argument must be either a vocabulary size (int)" " or the path to a pretrained model config file (str)") @property def vocab_size(self): return self.n_words @vocab_size.setter def vocab_size(self, value): self.n_words = value @property def hidden_size(self): return self.emb_dim @property def num_attention_heads(self): return self.n_heads @property def num_hidden_layers(self): return self.n_layers
8,112
43.576923
121
py
DeeBERT
DeeBERT-master/transformers/optimization.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import logging import math import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR logger = logging.getLogger(__name__) def get_constant_schedule(optimizer, last_epoch=-1): """ Create a schedule with a constant learning rate. """ return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) def get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1): """ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and 1. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1. return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly after linearly increasing during a warmup period. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=.5, last_epoch=-1): """ Create a schedule with a learning rate that decreases following the values of the cosine function between 0 and `pi * cycles` after a warmup period during which it increases linearly between 0 and 1. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0., 0.5 * (1. + math.cos(math.pi * float(num_cycles) * 2. * progress))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=1., last_epoch=-1): """ Create a schedule with a learning rate that decreases following the values of the cosine function with several hard restarts, after a warmup period during which it increases linearly between 0 and 1. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) if progress >= 1.: return 0. return max(0., 0.5 * (1. + math.cos(math.pi * ((float(num_cycles) * progress) % 1.)))) return LambdaLR(optimizer, lr_lambda, last_epoch) class AdamW(Optimizer): """ Implements Adam algorithm with weight decay fix. Parameters: lr (float): learning rate. Default 1e-3. betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999) eps (float): Adams epsilon. Default: 1e-6 weight_decay (float): Weight decay. Default: 0.0 correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True. """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True): if lr < 0.0: raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1])) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias) super(AdamW, self).__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time exp_avg.mul_(beta1).add_(1.0 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad) denom = exp_avg_sq.sqrt().add_(group['eps']) step_size = group['lr'] if group['correct_bias']: # No bias correction for Bert bias_correction1 = 1.0 - beta1 ** state['step'] bias_correction2 = 1.0 - beta2 ** state['step'] step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 p.data.addcdiv_(-step_size, exp_avg, denom) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. # Add weight decay at the end (fixed version) if group['weight_decay'] > 0.0: p.data.add_(-group['lr'] * group['weight_decay'], p.data) return loss
7,658
44.052941
134
py
DeeBERT
DeeBERT-master/transformers/configuration_xlnet.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XLNet configuration """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import sys from io import open from .configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'xlnet-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-config.json", 'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-config.json", } class XLNetConfig(PretrainedConfig): """Configuration class to store the configuration of a ``XLNetModel``. Args: vocab_size_or_config_json_file: Vocabulary size of ``inputs_ids`` in ``XLNetModel``. d_model: Size of the encoder layers and the pooler layer. n_layer: Number of hidden layers in the Transformer encoder. n_head: Number of attention heads for each attention layer in the Transformer encoder. d_inner: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. ff_activation: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. untie_r: untie relative position biases attn_type: 'bi' for XLNet, 'uni' for Transformer-XL dropout: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps: The epsilon used by LayerNorm. dropout: float, dropout rate. init: str, the initialization scheme, either "normal" or "uniform". init_range: float, initialize the parameters with a uniform distribution in [-init_range, init_range]. Only effective when init="uniform". init_std: float, initialize the parameters with a normal distribution with mean 0 and stddev init_std. Only effective when init="normal". mem_len: int, the number of tokens to cache. reuse_len: int, the number of tokens in the currect batch to be cached and reused in the future. bi_data: bool, whether to use bidirectional input pipeline. Usually set to True during pretraining and False during finetuning. clamp_len: int, clamp all relative distances larger than clamp_len. -1 means no clamping. same_length: bool, whether to use the same attention length for each token. finetuning_task: name of the glue task on which the model was fine-tuned if any """ pretrained_config_archive_map = XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__(self, vocab_size_or_config_json_file=32000, d_model=1024, n_layer=24, n_head=16, d_inner=4096, max_position_embeddings=512, ff_activation="gelu", untie_r=True, attn_type="bi", initializer_range=0.02, layer_norm_eps=1e-12, dropout=0.1, mem_len=None, reuse_len=None, bi_data=False, clamp_len=-1, same_length=False, finetuning_task=None, num_labels=2, summary_type='last', summary_use_proj=True, summary_activation='tanh', summary_last_dropout=0.1, start_n_top=5, end_n_top=5, **kwargs): """Constructs XLNetConfig. """ super(XLNetConfig, self).__init__(**kwargs) if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 and isinstance(vocab_size_or_config_json_file, unicode)): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): setattr(config, key, value) elif isinstance(vocab_size_or_config_json_file, int): self.n_token = vocab_size_or_config_json_file self.d_model = d_model self.n_layer = n_layer self.n_head = n_head assert d_model % n_head == 0 self.d_head = d_model // n_head self.ff_activation = ff_activation self.d_inner = d_inner self.untie_r = untie_r self.attn_type = attn_type self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.dropout = dropout self.mem_len = mem_len self.reuse_len = reuse_len self.bi_data = bi_data self.clamp_len = clamp_len self.same_length = same_length self.finetuning_task = finetuning_task self.num_labels = num_labels self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_last_dropout = summary_last_dropout self.start_n_top = start_n_top self.end_n_top = end_n_top else: raise ValueError("First argument must be either a vocabulary size (int)" " or the path to a pretrained model config file (str)") @property def max_position_embeddings(self): return -1 @property def vocab_size(self): return self.n_token @vocab_size.setter def vocab_size(self, value): self.n_token = value @property def hidden_size(self): return self.d_model @property def num_attention_heads(self): return self.n_head @property def num_hidden_layers(self): return self.n_layer
6,805
38.80117
110
py
DeeBERT
DeeBERT-master/transformers/__main__.py
# coding: utf8 def main(): import sys if (len(sys.argv) < 4 or len(sys.argv) > 6) or sys.argv[1] not in ["bert", "gpt", "transfo_xl", "gpt2", "xlnet", "xlm"]: print( "This command line utility let you convert original (author released) model checkpoint to pytorch.\n" "It should be used as one of: \n" ">> transformers bert TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT, \n" ">> transformers gpt OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG], \n" ">> transformers transfo_xl TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG] or \n" ">> transformers gpt2 TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG] or \n" ">> transformers xlnet TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT [FINETUNING_TASK_NAME] or \n" ">> transformers xlm XLM_CHECKPOINT_PATH PYTORCH_DUMP_OUTPUT") else: if sys.argv[1] == "bert": try: from .convert_bert_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: print("transformers can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise if len(sys.argv) != 5: # pylint: disable=line-too-long print("Should be used as `transformers bert TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`") else: PYTORCH_DUMP_OUTPUT = sys.argv.pop() TF_CONFIG = sys.argv.pop() TF_CHECKPOINT = sys.argv.pop() convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT) elif sys.argv[1] == "gpt": from .convert_openai_original_tf_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch if len(sys.argv) < 4 or len(sys.argv) > 5: # pylint: disable=line-too-long print("Should be used as `transformers gpt OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`") else: OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2] PYTORCH_DUMP_OUTPUT = sys.argv[3] if len(sys.argv) == 5: OPENAI_GPT_CONFIG = sys.argv[4] else: OPENAI_GPT_CONFIG = "" convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH, OPENAI_GPT_CONFIG, PYTORCH_DUMP_OUTPUT) elif sys.argv[1] == "transfo_xl": try: from .convert_transfo_xl_original_tf_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch except ImportError: print("transformers can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise if len(sys.argv) < 4 or len(sys.argv) > 5: # pylint: disable=line-too-long print("Should be used as `transformers transfo_xl TF_CHECKPOINT/TF_DATASET_FILE PYTORCH_DUMP_OUTPUT [TF_CONFIG]`") else: if 'ckpt' in sys.argv[2].lower(): TF_CHECKPOINT = sys.argv[2] TF_DATASET_FILE = "" else: TF_DATASET_FILE = sys.argv[2] TF_CHECKPOINT = "" PYTORCH_DUMP_OUTPUT = sys.argv[3] if len(sys.argv) == 5: TF_CONFIG = sys.argv[4] else: TF_CONFIG = "" convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE) elif sys.argv[1] == "gpt2": try: from .convert_gpt2_original_tf_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch except ImportError: print("transformers can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise if len(sys.argv) < 4 or len(sys.argv) > 5: # pylint: disable=line-too-long print("Should be used as `transformers gpt2 TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [TF_CONFIG]`") else: TF_CHECKPOINT = sys.argv[2] PYTORCH_DUMP_OUTPUT = sys.argv[3] if len(sys.argv) == 5: TF_CONFIG = sys.argv[4] else: TF_CONFIG = "" convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT) elif sys.argv[1] == "xlnet": try: from .convert_xlnet_original_tf_checkpoint_to_pytorch import convert_xlnet_checkpoint_to_pytorch except ImportError: print("transformers can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise if len(sys.argv) < 5 or len(sys.argv) > 6: # pylint: disable=line-too-long print("Should be used as `transformers xlnet TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT [FINETUNING_TASK_NAME]`") else: TF_CHECKPOINT = sys.argv[2] TF_CONFIG = sys.argv[3] PYTORCH_DUMP_OUTPUT = sys.argv[4] if len(sys.argv) == 6: FINETUNING_TASK = sys.argv[5] else: FINETUNING_TASK = None convert_xlnet_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, FINETUNING_TASK) elif sys.argv[1] == "xlm": from .convert_xlm_original_pytorch_checkpoint_to_pytorch import convert_xlm_checkpoint_to_pytorch if len(sys.argv) != 4: # pylint: disable=line-too-long print("Should be used as `transformers xlm XLM_CHECKPOINT_PATH PYTORCH_DUMP_OUTPUT`") else: XLM_CHECKPOINT_PATH = sys.argv[2] PYTORCH_DUMP_OUTPUT = sys.argv[3] convert_xlm_checkpoint_to_pytorch(XLM_CHECKPOINT_PATH, PYTORCH_DUMP_OUTPUT) if __name__ == '__main__': main()
7,085
53.507692
135
py
DeeBERT
DeeBERT-master/transformers/configuration_utils.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Configuration base class and utilities.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import copy import json import logging import os from io import open from .file_utils import cached_path, CONFIG_NAME logger = logging.getLogger(__name__) class PretrainedConfig(object): r""" Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations. Note: A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights. It only affects the model's configuration. Class attributes (overridden by derived classes): - ``pretrained_config_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained model configurations as values. Parameters: ``finetuning_task``: string, default `None`. Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint. ``num_labels``: integer, default `2`. Number of classes to use when the model is a classification model (sequences/tokens) ``output_attentions``: boolean, default `False`. Should the model returns attentions weights. ``output_hidden_states``: string, default `False`. Should the model returns all hidden-states. ``torchscript``: string, default `False`. Is the model used with Torchscript. """ pretrained_config_archive_map = {} def __init__(self, **kwargs): self.finetuning_task = kwargs.pop('finetuning_task', None) self.num_labels = kwargs.pop('num_labels', 2) self.output_attentions = kwargs.pop('output_attentions', False) self.output_hidden_states = kwargs.pop('output_hidden_states', False) self.output_past = kwargs.pop('output_past', True) # Not used by all models self.torchscript = kwargs.pop('torchscript', False) # Only used by PyTorch models self.use_bfloat16 = kwargs.pop('use_bfloat16', False) self.pruned_heads = kwargs.pop('pruned_heads', {}) self.is_decoder = kwargs.pop('is_decoder', False) def save_pretrained(self, save_directory): """ Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method. """ assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved" # If we save using the predefined names, we can load using `from_pretrained` output_config_file = os.path.join(save_directory, CONFIG_NAME) self.to_json_file(output_config_file) logger.info("Configuration saved in {}".format(output_config_file)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pre-trained model configuration. Parameters: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model configuration to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing a configuration file saved using the :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``. - a path or url to a saved configuration JSON `file`, e.g.: ``./my_model_directory/configuration.json``. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. kwargs: (`optional`) dict: key/value pairs with which to update the configuration object after loading. - The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. - Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled by the `return_unused_kwargs` keyword parameter. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. return_unused_kwargs: (`optional`) bool: - If False, then this function returns just the final configuration object. - If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs` is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part of kwargs which has not been used to update `config` and is otherwise ignored. Examples:: # We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a # derived class: BertConfig config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')` config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json') config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False) assert config.output_attention == True config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False, return_unused_kwargs=True) assert config.output_attention == True assert unused_kwargs == {'foo': False} """ cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) return_unused_kwargs = kwargs.pop('return_unused_kwargs', False) if pretrained_model_name_or_path in cls.pretrained_config_archive_map: config_file = cls.pretrained_config_archive_map[pretrained_model_name_or_path] elif os.path.isdir(pretrained_model_name_or_path): config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) else: config_file = pretrained_model_name_or_path # redirect to the cache, if necessary try: resolved_config_file = cached_path(config_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies) except EnvironmentError: if pretrained_model_name_or_path in cls.pretrained_config_archive_map: msg = "Couldn't reach server at '{}' to download pretrained model configuration file.".format( config_file) else: msg = "Model name '{}' was not found in model name list ({}). " \ "We assumed '{}' was a path or url to a configuration file named {} or " \ "a directory containing such a file but couldn't find any such file at this path or url.".format( pretrained_model_name_or_path, ', '.join(cls.pretrained_config_archive_map.keys()), config_file, CONFIG_NAME) raise EnvironmentError(msg) if resolved_config_file == config_file: logger.info("loading configuration file {}".format(config_file)) else: logger.info("loading configuration file {} from cache at {}".format( config_file, resolved_config_file)) # Load config config = cls.from_json_file(resolved_config_file) if hasattr(config, 'pruned_heads'): config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items()) # Update config with kwargs if needed to_remove = [] for key, value in kwargs.items(): if hasattr(config, key): setattr(config, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info("Model config %s", str(config)) if return_unused_kwargs: return config, kwargs else: return config @classmethod def from_dict(cls, json_object): """Constructs a `Config` from a Python dictionary of parameters.""" config = cls(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): setattr(config, key, value) return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path): """ Save this instance to a json file.""" with open(json_file_path, "w", encoding='utf-8') as writer: writer.write(self.to_json_string())
10,830
50.822967
296
py
DeeBERT
DeeBERT-master/transformers/modeling_tf_pytorch_utils.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch - TF 2.0 general utilities.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import logging import os import re import numpy logger = logging.getLogger(__name__) def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=''): """ Convert a TF 2.0 model variable name in a pytorch model weight name. Conventions for TF2.0 scopes -> PyTorch attribute names conversions: - '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch) - '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList) return tuple with: - pytorch model weight name - transpose: boolean indicating weither TF2.0 and PyTorch weights matrices are transposed with regards to each other """ tf_name = tf_name.replace(':0', '') # device ids tf_name = re.sub(r'/[^/]*___([^/]*)/', r'/\1/', tf_name) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch) tf_name = tf_name.replace('_._', '/') # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList) tf_name = re.sub(r'//+', '/', tf_name) # Remove empty levels at the end tf_name = tf_name.split('/') # Convert from TF2.0 '/' separators to PyTorch '.' separators tf_name = tf_name[1:] # Remove level zero # When should we transpose the weights transpose = bool(tf_name[-1] == 'kernel' or 'emb_projs' in tf_name or 'out_projs' in tf_name) # Convert standard TF2.0 names in PyTorch names if tf_name[-1] == 'kernel' or tf_name[-1] == 'embeddings' or tf_name[-1] == 'gamma': tf_name[-1] = 'weight' if tf_name[-1] == 'beta': tf_name[-1] = 'bias' # Remove prefix if needed tf_name = '.'.join(tf_name) if start_prefix_to_remove: tf_name = tf_name.replace(start_prefix_to_remove, '', 1) return tf_name, transpose ##################### ### PyTorch => TF 2.0 def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False): """ Load pytorch checkpoints in a TF 2.0 model """ try: import tensorflow as tf import torch except ImportError as e: logger.error("Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.") raise e pt_path = os.path.abspath(pytorch_checkpoint_path) logger.info("Loading PyTorch weights from {}".format(pt_path)) pt_state_dict = torch.load(pt_path, map_location='cpu') return load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys) def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False): """ Load pytorch checkpoints in a TF 2.0 model """ pt_state_dict = pt_model.state_dict() return load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys) def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False): """ Load pytorch state_dict in a TF 2.0 model. """ try: import torch import tensorflow as tf from tensorflow.python.keras import backend as K except ImportError as e: logger.error("Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.") raise e if tf_inputs is None: tf_inputs = tf_model.dummy_inputs if tf_inputs is not None: tfo = tf_model(tf_inputs, training=False) # Make sure model is built # Adapt state dict - TODO remove this and update the AWS weights files instead # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in pt_state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): pt_state_dict[new_key] = pt_state_dict.pop(old_key) # Make sure we are able to load PyTorch base models as well as derived models (with heads) # TF models always have a prefix, some of PyTorch models (base ones) don't start_prefix_to_remove = '' if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()): start_prefix_to_remove = tf_model.base_model_prefix + '.' symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights weight_value_tuples = [] all_pytorch_weights = set(list(pt_state_dict.keys())) for symbolic_weight in symbolic_weights: sw_name = symbolic_weight.name name, transpose = convert_tf_weight_name_to_pt_weight_name(sw_name, start_prefix_to_remove=start_prefix_to_remove) # Find associated numpy array in pytorch model state dict assert name in pt_state_dict, "{} not found in PyTorch model".format(name) array = pt_state_dict[name].numpy() if transpose: array = numpy.transpose(array) if len(symbolic_weight.shape) < len(array.shape): array = numpy.squeeze(array) elif len(symbolic_weight.shape) > len(array.shape): array = numpy.expand_dims(array, axis=0) try: assert list(symbolic_weight.shape) == list(array.shape) except AssertionError as e: e.args += (symbolic_weight.shape, array.shape) raise e logger.info("Initialize TF weight {}".format(symbolic_weight.name)) weight_value_tuples.append((symbolic_weight, array)) all_pytorch_weights.discard(name) K.batch_set_value(weight_value_tuples) if tf_inputs is not None: tfo = tf_model(tf_inputs, training=False) # Make sure restore ops are run logger.info("Weights or buffers not loaded from PyTorch model: {}".format(all_pytorch_weights)) return tf_model ##################### ### TF 2.0 => PyTorch def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False): """ Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357). """ try: import tensorflow as tf import torch except ImportError as e: logger.error("Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.") raise e import transformers tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path)) # Instantiate and load the associated TF 2.0 model tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beggining tf_model_class = getattr(transformers, tf_model_class_name) tf_model = tf_model_class(pt_model.config) if tf_inputs is None: tf_inputs = tf_model.dummy_inputs if tf_inputs is not None: tfo = tf_model(tf_inputs, training=False) # Make sure model is built tf_model.load_weights(tf_checkpoint_path, by_name=True) return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys) def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False): """ Load TF 2.0 model in a pytorch model """ weights = tf_model.weights return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys) def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False): """ Load TF2.0 symbolic weights in a PyTorch model """ try: import tensorflow as tf import torch except ImportError as e: logger.error("Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.") raise e new_pt_params_dict = {} current_pt_params_dict = dict(pt_model.named_parameters()) # Make sure we are able to load PyTorch base models as well as derived models (with heads) # TF models always have a prefix, some of PyTorch models (base ones) don't start_prefix_to_remove = '' if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()): start_prefix_to_remove = pt_model.base_model_prefix + '.' # Build a map from potential PyTorch weight names to TF 2.0 Variables tf_weights_map = {} for tf_weight in tf_weights: pt_name, transpose = convert_tf_weight_name_to_pt_weight_name(tf_weight.name, start_prefix_to_remove=start_prefix_to_remove) tf_weights_map[pt_name] = (tf_weight.numpy(), transpose) all_tf_weights = set(list(tf_weights_map.keys())) loaded_pt_weights_data_ptr = {} for pt_weight_name, pt_weight in current_pt_params_dict.items(): # Handle PyTorch shared weight ()not duplicated in TF 2.0 if pt_weight.data_ptr() in loaded_pt_weights_data_ptr: new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()] continue # Find associated numpy array in pytorch model state dict if pt_weight_name not in tf_weights_map: raise ValueError("{} not found in TF 2.0 model".format(pt_weight_name)) array, transpose = tf_weights_map[pt_weight_name] if transpose: array = numpy.transpose(array) if len(pt_weight.shape) < len(array.shape): array = numpy.squeeze(array) elif len(pt_weight.shape) > len(array.shape): array = numpy.expand_dims(array, axis=0) try: assert list(pt_weight.shape) == list(array.shape) except AssertionError as e: e.args += (pt_weight.shape, array.shape) raise e logger.info("Initialize PyTorch weight {}".format(pt_weight_name)) new_pt_params_dict[pt_weight_name] = torch.from_numpy(array) loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array) all_tf_weights.discard(pt_weight_name) missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from TF 2.0 model: {}".format( pt_model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from TF 2.0 model not used in {}: {}".format( pt_model.__class__.__name__, unexpected_keys)) logger.info("Weights or buffers not loaded from TF 2.0 model: {}".format(all_tf_weights)) return pt_model
12,356
41.610345
166
py
DeeBERT
DeeBERT-master/transformers/modeling_distilbert.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert) """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import math import copy import sys from io import open import itertools import numpy as np import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from .modeling_utils import PreTrainedModel, prune_linear_layer from .configuration_distilbert import DistilBertConfig from .file_utils import add_start_docstrings import logging logger = logging.getLogger(__name__) DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { 'distilbert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-pytorch_model.bin", 'distilbert-base-uncased-distilled-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-pytorch_model.bin" } ### UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE ### def gelu(x): return 0.5 * x * (1.0 + torch.erf(x / math.sqrt(2.0))) def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([ [pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos) ]) out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() out.requires_grad = False class Embeddings(nn.Module): def __init__(self, config): super(Embeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim) if config.sinusoidal_pos_embds: create_sinusoidal_embeddings(n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight) self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12) self.dropout = nn.Dropout(config.dropout) def forward(self, input_ids): """ Parameters ---------- input_ids: torch.tensor(bs, max_seq_length) The token ids to embed. Outputs ------- embeddings: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type embeddings) """ seq_length = input_ids.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length) word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim) position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim) embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim) embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim) embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim) return embeddings class MultiHeadSelfAttention(nn.Module): def __init__(self, config): super(MultiHeadSelfAttention, self).__init__() self.n_heads = config.n_heads self.dim = config.dim self.dropout = nn.Dropout(p=config.attention_dropout) self.output_attentions = config.output_attentions assert self.dim % self.n_heads == 0 self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.pruned_heads = set() def prune_heads(self, heads): attention_head_size = self.dim // self.n_heads if len(heads) == 0: return mask = torch.ones(self.n_heads, attention_head_size) heads = set(heads) - self.pruned_heads for head in heads: head -= sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.q_lin = prune_linear_layer(self.q_lin, index) self.k_lin = prune_linear_layer(self.k_lin, index) self.v_lin = prune_linear_layer(self.v_lin, index) self.out_lin = prune_linear_layer(self.out_lin, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.dim = attention_head_size * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, query, key, value, mask, head_mask = None): """ Parameters ---------- query: torch.tensor(bs, seq_length, dim) key: torch.tensor(bs, seq_length, dim) value: torch.tensor(bs, seq_length, dim) mask: torch.tensor(bs, seq_length) Outputs ------- weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs, seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True` """ bs, q_length, dim = query.size() k_length = key.size(1) # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim) # assert key.size() == value.size() dim_per_head = self.dim // self.n_heads mask_reshp = (bs, 1, 1, k_length) def shape(x): """ separate heads """ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2) def unshape(x): """ group heads """ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head) q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head) k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head) v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head) q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) scores = torch.matmul(q, k.transpose(2,3)) # (bs, n_heads, q_length, k_length) mask = (mask==0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length) scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length) weights = nn.Softmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length) weights = self.dropout(weights) # (bs, n_heads, q_length, k_length) # Mask heads if we want to if head_mask is not None: weights = weights * head_mask context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head) context = unshape(context) # (bs, q_length, dim) context = self.out_lin(context) # (bs, q_length, dim) if self.output_attentions: return (context, weights) else: return (context,) class FFN(nn.Module): def __init__(self, config): super(FFN, self).__init__() self.dropout = nn.Dropout(p=config.dropout) self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim) self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim) assert config.activation in ['relu', 'gelu'], "activation ({}) must be in ['relu', 'gelu']".format(config.activation) self.activation = gelu if config.activation == 'gelu' else nn.ReLU() def forward(self, input): x = self.lin1(input) x = self.activation(x) x = self.lin2(x) x = self.dropout(x) return x class TransformerBlock(nn.Module): def __init__(self, config): super(TransformerBlock, self).__init__() self.n_heads = config.n_heads self.dim = config.dim self.hidden_dim = config.hidden_dim self.dropout = nn.Dropout(p=config.dropout) self.activation = config.activation self.output_attentions = config.output_attentions assert config.dim % config.n_heads == 0 self.attention = MultiHeadSelfAttention(config) self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12) self.ffn = FFN(config) self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12) def forward(self, x, attn_mask=None, head_mask=None): """ Parameters ---------- x: torch.tensor(bs, seq_length, dim) attn_mask: torch.tensor(bs, seq_length) Outputs ------- sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output: torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization. """ # Self-Attention sa_output = self.attention(query=x, key=x, value=x, mask=attn_mask, head_mask=head_mask) if self.output_attentions: sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length) else: # To handle these `output_attention` or `output_hidden_states` cases returning tuples assert type(sa_output) == tuple sa_output = sa_output[0] sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim) # Feed Forward Network ffn_output = self.ffn(sa_output) # (bs, seq_length, dim) ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim) output = (ffn_output,) if self.output_attentions: output = (sa_weights,) + output return output class Transformer(nn.Module): def __init__(self, config): super(Transformer, self).__init__() self.n_layers = config.n_layers self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states layer = TransformerBlock(config) self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.n_layers)]) def forward(self, x, attn_mask=None, head_mask=None): """ Parameters ---------- x: torch.tensor(bs, seq_length, dim) Input sequence embedded. attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence. Outputs ------- hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hiddens states in the last (top) layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)] Tuple of length n_layers with the hidden states from each layer. Optional: only if output_hidden_states=True all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)] Tuple of length n_layers with the attention weights from each layer Optional: only if output_attentions=True """ all_hidden_states = () all_attentions = () hidden_state = x for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) layer_outputs = layer_module(x=hidden_state, attn_mask=attn_mask, head_mask=head_mask[i]) hidden_state = layer_outputs[-1] if self.output_attentions: assert len(layer_outputs) == 2 attentions = layer_outputs[0] all_attentions = all_attentions + (attentions,) else: assert len(layer_outputs) == 1 # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) outputs = (hidden_state,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) return outputs # last-layer hidden state, (all hidden states), (all attentions) ### INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL ### class DistilBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DistilBertConfig pretrained_model_archive_map = DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = None base_model_prefix = "distilbert" def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, nn.Embedding): if module.weight.requires_grad: module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() DISTILBERT_START_DOCSTRING = r""" DistilBERT is a small, fast, cheap and light Transformer model trained by distilling Bert base. It has 40% less parameters than `bert-base-uncased`, runs 60% faster while preserving over 95% of Bert's performances as measured on the GLUE language understanding benchmark. Here are the differences between the interface of Bert and DistilBert: - DistilBert doesn't have `token_type_ids`, you don't need to indicate which token belongs to which segment. Just separate your segments with the separation token `tokenizer.sep_token` (or `[SEP]`) - DistilBert doesn't have options to select the input positions (`position_ids` input). This could be added if necessary though, just let's us know if you need this option. For more information on DistilBERT, please refer to our `detailed blog post`_ .. _`detailed blog post`: https://medium.com/huggingface/distilbert-8cf3380435b5 Parameters: config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ DISTILBERT_INPUTS_DOCSTRING = r""" Inputs: **input_ids** ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. The input sequences should start with `[CLS]` and end with `[SEP]` tokens. For now, ONLY BertTokenizer(`bert-base-uncased`) is supported and you should use this tokenizer when using DistilBERT. **attention_mask**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.", DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING) class DistilBertModel(DistilBertPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the output of the last layer of the model. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') model = DistilBertModel.from_pretrained('distilbert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config): super(DistilBertModel, self).__init__(config) self.embeddings = Embeddings(config) # Embeddings self.transformer = Transformer(config) # Encoder self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.transformer.layer[layer].attention.prune_heads(heads) def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.num_hidden_layers if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) # (bs, seq_length, dim) tfmr_output = self.transformer(x=inputs_embeds, attn_mask=attention_mask, head_mask=head_mask) hidden_state = tfmr_output[0] output = (hidden_state, ) + tfmr_output[1:] return output # last-layer hidden-state, (all hidden_states), (all attentions) @add_start_docstrings("""DistilBert Model with a `masked language modeling` head on top. """, DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING) class DistilBertForMaskedLM(DistilBertPreTrainedModel): r""" **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the masked language modeling loss. Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Masked language modeling loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') model = DistilBertForMaskedLM.from_pretrained('distilbert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids, masked_lm_labels=input_ids) loss, prediction_scores = outputs[:2] """ def __init__(self, config): super(DistilBertForMaskedLM, self).__init__(config) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.distilbert = DistilBertModel(config) self.vocab_transform = nn.Linear(config.dim, config.dim) self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12) self.vocab_projector = nn.Linear(config.dim, config.vocab_size) self.init_weights() self.mlm_loss_fct = nn.CrossEntropyLoss(ignore_index=-1) def get_output_embeddings(self): return self.vocab_projector def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None): dlbrt_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds) hidden_states = dlbrt_output[0] # (bs, seq_length, dim) prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim) prediction_logits = gelu(prediction_logits) # (bs, seq_length, dim) prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim) prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size) outputs = (prediction_logits, ) + dlbrt_output[1:] if masked_lm_labels is not None: mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), masked_lm_labels.view(-1)) outputs = (mlm_loss,) + outputs return outputs # (mlm_loss), prediction_logits, (all hidden_states), (all attentions) @add_start_docstrings("""DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING) class DistilBertForSequenceClassification(DistilBertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ def __init__(self, config): super(DistilBertForSequenceClassification, self).__init__(config) self.num_labels = config.num_labels self.distilbert = DistilBertModel(config) self.pre_classifier = nn.Linear(config.dim, config.dim) self.classifier = nn.Linear(config.dim, config.num_labels) self.dropout = nn.Dropout(config.seq_classif_dropout) self.init_weights() def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, labels=None): distilbert_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds) hidden_state = distilbert_output[0] # (bs, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs, dim) pooled_output = self.pre_classifier(pooled_output) # (bs, dim) pooled_output = nn.ReLU()(pooled_output) # (bs, dim) pooled_output = self.dropout(pooled_output) # (bs, dim) logits = self.classifier(pooled_output) # (bs, dim) outputs = (logits,) + distilbert_output[1:] if labels is not None: if self.num_labels == 1: loss_fct = nn.MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = nn.CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), logits, (hidden_states), (attentions) @add_start_docstrings("""DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING) class DistilBertForQuestionAnswering(DistilBertPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') model = DistilBertForQuestionAnswering.from_pretrained('distilbert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) loss, start_scores, end_scores = outputs[:3] """ def __init__(self, config): super(DistilBertForQuestionAnswering, self).__init__(config) self.distilbert = DistilBertModel(config) self.qa_outputs = nn.Linear(config.dim, config.num_labels) assert config.num_labels == 2 self.dropout = nn.Dropout(config.qa_dropout) self.init_weights() def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None): distilbert_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds) hidden_states = distilbert_output[0] # (bs, max_query_len, dim) hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim) logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) # (bs, max_query_len) end_logits = end_logits.squeeze(-1) # (bs, max_query_len) outputs = (start_logits, end_logits,) + distilbert_output[1:] if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 outputs = (total_loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) @add_start_docstrings("""DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING) class DistilBertForTokenClassification(DistilBertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss. **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` Classification scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') model = DistilBertForTokenClassification.from_pretrained('distilbert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, scores = outputs[:2] """ def __init__(self, config): super(DistilBertForTokenClassification, self).__init__(config) self.num_labels = config.num_labels self.distilbert = DistilBertModel(config) self.dropout = nn.Dropout(config.dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, labels=None): outputs = self.distilbert(input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), scores, (hidden_states), (attentions)
39,452
49.710797
201
py
DeeBERT
DeeBERT-master/transformers/configuration_camembert.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ CamemBERT configuration """ from __future__ import (absolute_import, division, print_function, unicode_literals) import logging from .configuration_roberta import RobertaConfig logger = logging.getLogger(__name__) CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'camembert-base': "https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-config.json", } class CamembertConfig(RobertaConfig): pretrained_config_archive_map = CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
1,222
34.970588
103
py
DeeBERT
DeeBERT-master/transformers/tokenization_xlm.py
# coding=utf-8 # Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import json import logging import os import re import sys import unicodedata from io import open import sacremoses as sm from .tokenization_utils import PreTrainedTokenizer from .tokenization_bert import BasicTokenizer logger = logging.getLogger(__name__) VOCAB_FILES_NAMES = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } PRETRAINED_VOCAB_FILES_MAP = { 'vocab_file': { 'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-vocab.json", 'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-vocab.json", 'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-vocab.json", 'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-vocab.json", 'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-vocab.json", 'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-vocab.json", 'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-vocab.json", 'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-vocab.json", 'xlm-mlm-17-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-vocab.json", 'xlm-mlm-100-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-vocab.json", }, 'merges_file': { 'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-merges.txt", 'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-merges.txt", 'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-merges.txt", 'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-merges.txt", 'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-merges.txt", 'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-merges.txt", 'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-merges.txt", 'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-merges.txt", 'xlm-mlm-17-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-merges.txt", 'xlm-mlm-100-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'xlm-mlm-en-2048': 512, 'xlm-mlm-ende-1024': 512, 'xlm-mlm-enfr-1024': 512, 'xlm-mlm-enro-1024': 512, 'xlm-mlm-tlm-xnli15-1024': 512, 'xlm-mlm-xnli15-1024': 512, 'xlm-clm-enfr-1024': 512, 'xlm-clm-ende-1024': 512, 'xlm-mlm-17-1280': 512, 'xlm-mlm-100-1280': 512, } PRETRAINED_INIT_CONFIGURATION = { 'xlm-mlm-en-2048': {"do_lowercase_and_remove_accent": True}, 'xlm-mlm-ende-1024': { "do_lowercase_and_remove_accent": True, "id2lang": { "0": "de", "1": "en"}, "lang2id": { "de": 0, "en": 1 }}, 'xlm-mlm-enfr-1024': { "do_lowercase_and_remove_accent": True, "id2lang": { "0": "en", "1": "fr"}, "lang2id": { "en": 0, "fr": 1 }}, 'xlm-mlm-enro-1024': { "do_lowercase_and_remove_accent": True, "id2lang": { "0": "en", "1": "ro"}, "lang2id": { "en": 0, "ro": 1 }}, 'xlm-mlm-tlm-xnli15-1024': { "do_lowercase_and_remove_accent": True, "id2lang": { "0": "ar", "1": "bg", "2": "de", "3": "el", "4": "en", "5": "es", "6": "fr", "7": "hi", "8": "ru", "9": "sw", "10": "th", "11": "tr", "12": "ur", "13": "vi", "14": "zh"}, "lang2id": { "ar": 0, "bg": 1, "de": 2, "el": 3, "en": 4, "es": 5, "fr": 6, "hi": 7, "ru": 8, "sw": 9, "th": 10, "tr": 11, "ur": 12, "vi": 13, "zh": 14 }}, 'xlm-mlm-xnli15-1024': { "do_lowercase_and_remove_accent": True, "id2lang": { "0": "ar", "1": "bg", "2": "de", "3": "el", "4": "en", "5": "es", "6": "fr", "7": "hi", "8": "ru", "9": "sw", "10": "th", "11": "tr", "12": "ur", "13": "vi", "14": "zh"}, "lang2id": { "ar": 0, "bg": 1, "de": 2, "el": 3, "en": 4, "es": 5, "fr": 6, "hi": 7, "ru": 8, "sw": 9, "th": 10, "tr": 11, "ur": 12, "vi": 13, "zh": 14 }}, 'xlm-clm-enfr-1024': { "do_lowercase_and_remove_accent": True, "id2lang": { "0": "en", "1": "fr"}, "lang2id": { "en": 0, "fr": 1 }}, 'xlm-clm-ende-1024': { "do_lowercase_and_remove_accent": True, "id2lang": { "0": "de", "1": "en"}, "lang2id": { "de": 0, "en": 1 }}, 'xlm-mlm-17-1280': {"do_lowercase_and_remove_accent": False, "id2lang": { "0": "ar", "1": "de", "2": "en", "3": "es", "4": "fr", "5": "hi", "6": "it", "7": "ja", "8": "ko", "9": "nl", "10": "pl", "11": "pt", "12": "ru", "13": "sv", "14": "tr", "15": "vi", "16": "zh" }, "lang2id": { "ar": 0, "de": 1, "en": 2, "es": 3, "fr": 4, "hi": 5, "it": 6, "ja": 7, "ko": 8, "nl": 9, "pl": 10, "pt": 11, "ru": 12, "sv": 13, "tr": 14, "vi": 15, "zh": 16}}, 'xlm-mlm-100-1280': {"do_lowercase_and_remove_accent": False, "id2lang": { "0": "af", "1": "als", "2": "am", "3": "an", "4": "ang", "5": "ar", "6": "arz", "7": "ast", "8": "az", "9": "bar", "10": "be", "11": "bg", "12": "bn", "13": "br", "14": "bs", "15": "ca", "16": "ceb", "17": "ckb", "18": "cs", "19": "cy", "20": "da", "21": "de", "22": "el", "23": "en", "24": "eo", "25": "es", "26": "et", "27": "eu", "28": "fa", "29": "fi", "30": "fr", "31": "fy", "32": "ga", "33": "gan", "34": "gl", "35": "gu", "36": "he", "37": "hi", "38": "hr", "39": "hu", "40": "hy", "41": "ia", "42": "id", "43": "is", "44": "it", "45": "ja", "46": "jv", "47": "ka", "48": "kk", "49": "kn", "50": "ko", "51": "ku", "52": "la", "53": "lb", "54": "lt", "55": "lv", "56": "mk", "57": "ml", "58": "mn", "59": "mr", "60": "ms", "61": "my", "62": "nds", "63": "ne", "64": "nl", "65": "nn", "66": "no", "67": "oc", "68": "pl", "69": "pt", "70": "ro", "71": "ru", "72": "scn", "73": "sco", "74": "sh", "75": "si", "76": "simple", "77": "sk", "78": "sl", "79": "sq", "80": "sr", "81": "sv", "82": "sw", "83": "ta", "84": "te", "85": "th", "86": "tl", "87": "tr", "88": "tt", "89": "uk", "90": "ur", "91": "uz", "92": "vi", "93": "war", "94": "wuu", "95": "yi", "96": "zh", "97": "zh_classical", "98": "zh_min_nan", "99": "zh_yue" }, "lang2id": { "af": 0, "als": 1, "am": 2, "an": 3, "ang": 4, "ar": 5, "arz": 6, "ast": 7, "az": 8, "bar": 9, "be": 10, "bg": 11, "bn": 12, "br": 13, "bs": 14, "ca": 15, "ceb": 16, "ckb": 17, "cs": 18, "cy": 19, "da": 20, "de": 21, "el": 22, "en": 23, "eo": 24, "es": 25, "et": 26, "eu": 27, "fa": 28, "fi": 29, "fr": 30, "fy": 31, "ga": 32, "gan": 33, "gl": 34, "gu": 35, "he": 36, "hi": 37, "hr": 38, "hu": 39, "hy": 40, "ia": 41, "id": 42, "is": 43, "it": 44, "ja": 45, "jv": 46, "ka": 47, "kk": 48, "kn": 49, "ko": 50, "ku": 51, "la": 52, "lb": 53, "lt": 54, "lv": 55, "mk": 56, "ml": 57, "mn": 58, "mr": 59, "ms": 60, "my": 61, "nds": 62, "ne": 63, "nl": 64, "nn": 65, "no": 66, "oc": 67, "pl": 68, "pt": 69, "ro": 70, "ru": 71, "scn": 72, "sco": 73, "sh": 74, "si": 75, "simple": 76, "sk": 77, "sl": 78, "sq": 79, "sr": 80, "sv": 81, "sw": 82, "ta": 83, "te": 84, "th": 85, "tl": 86, "tr": 87, "tt": 88, "uk": 89, "ur": 90, "uz": 91, "vi": 92, "war": 93, "wuu": 94, "yi": 95, "zh": 96, "zh_classical": 97, "zh_min_nan": 98, "zh_yue": 99 }}, } def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def lowercase_and_remove_accent(text): """ Lowercase and strips accents from a piece of text based on https://github.com/facebookresearch/XLM/blob/master/tools/lowercase_and_remove_accent.py """ text = ' '.join(text) text = text.lower() text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output).lower().split(' ') def replace_unicode_punct(text): ''' Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl ''' text = text.replace(',', ',') text = re.sub(r'。\s*', '. ', text) text = text.replace('、', ',') text = text.replace('”', '"') text = text.replace('“', '"') text = text.replace('∶', ':') text = text.replace(':', ':') text = text.replace('?', '?') text = text.replace('《', '"') text = text.replace('》', '"') text = text.replace(')', ')') text = text.replace('!', '!') text = text.replace('(', '(') text = text.replace(';', ';') text = text.replace('1', '"') text = text.replace('」', '"') text = text.replace('「', '"') text = text.replace('0', '0') text = text.replace('3', '3') text = text.replace('2', '2') text = text.replace('5', '5') text = text.replace('6', '6') text = text.replace('9', '9') text = text.replace('7', '7') text = text.replace('8', '8') text = text.replace('4', '4') text = re.sub(r'.\s*', '. ', text) text = text.replace('~', '~') text = text.replace('’', '\'') text = text.replace('…', '...') text = text.replace('━', '-') text = text.replace('〈', '<') text = text.replace('〉', '>') text = text.replace('【', '[') text = text.replace('】', ']') text = text.replace('%', '%') return text def remove_non_printing_char(text): ''' Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl ''' output = [] for char in text: cat = unicodedata.category(char) if cat.startswith('C'): continue output.append(char) return "".join(output) def romanian_preprocessing(text): '''Sennrich's WMT16 scripts for Romanian preprocessing, used by model `xlm-mlm-enro-1024`''' # https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/normalise-romanian.py text = text.replace("\u015e", "\u0218").replace("\u015f", "\u0219") text = text.replace("\u0162", "\u021a").replace("\u0163", "\u021b") # https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/remove-diacritics.py text = text.replace("\u0218", "S").replace("\u0219", "s") #s-comma text = text.replace("\u021a", "T").replace("\u021b", "t") #t-comma text = text.replace("\u0102", "A").replace("\u0103", "a") text = text.replace("\u00C2", "A").replace("\u00E2", "a") text = text.replace("\u00CE", "I").replace("\u00EE", "i") return text class XLMTokenizer(PreTrainedTokenizer): """ BPE tokenizer for XLM - Moses preprocessing & tokenization for most supported languages - Language specific tokenization for Chinese (Jieba), Japanese (KyTea) and Thai (PyThaiNLP) - (optionally) lower case & normalize all inputs text - argument ``special_tokens`` and function ``set_special_tokens``, can be used to add additional symbols \ (ex: "__classify__") to a vocabulary - `lang2id` attribute maps the languages supported by the model with their ids if provided (automatically set for pretrained vocabularies) - `id2lang` attributes does reverse mapping if provided (automatically set for pretrained vocabularies) - `do_lowercase_and_remove_accent` controle lower casing and accent (automatically set for pretrained vocabularies) """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, merges_file, unk_token="<unk>", bos_token="<s>", sep_token="</s>", pad_token="<pad>", cls_token="</s>", mask_token="<special1>", additional_special_tokens=["<special0>", "<special1>", "<special2>", "<special3>", "<special4>", "<special5>", "<special6>", "<special7>", "<special8>", "<special9>"], lang2id=None, id2lang=None, do_lowercase_and_remove_accent=True, **kwargs): super(XLMTokenizer, self).__init__(unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs) # cache of sm.MosesPunctNormalizer instance self.cache_moses_punct_normalizer = dict() # cache of sm.MosesTokenizer instance self.cache_moses_tokenizer = dict() self.lang_with_custom_tokenizer = set(['zh', 'th', 'ja']) # True for current supported model (v1.2.0), False for XLM-17 & 100 self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent self.lang2id = lang2id self.id2lang = id2lang if lang2id is not None and id2lang is not None: assert len(lang2id) == len(id2lang) self.ja_word_tokenizer = None self.zh_word_tokenizer = None self.encoder = json.load(open(vocab_file, encoding="utf-8")) self.decoder = {v:k for k,v in self.encoder.items()} merges = open(merges_file, encoding='utf-8').read().split('\n')[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} def moses_punct_norm(self, text, lang): if lang not in self.cache_moses_punct_normalizer: punct_normalizer = sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer else: punct_normalizer = self.cache_moses_punct_normalizer[lang] return punct_normalizer.normalize(text) def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer else: moses_tokenizer = self.cache_moses_tokenizer[lang] return moses_tokenizer.tokenize(text, return_str=False, escape=False) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text def ja_tokenize(self, text): if self.ja_word_tokenizer is None: try: import Mykytea self.ja_word_tokenizer = Mykytea.Mykytea('-model %s/local/share/kytea/model.bin' % os.path.expanduser('~')) except (AttributeError, ImportError) as e: logger.error("Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper (https://github.com/chezou/Mykytea-python) with the following steps") logger.error("1. git clone [email protected]:neubig/kytea.git && cd kytea") logger.error("2. autoreconf -i") logger.error("3. ./configure --prefix=$HOME/local") logger.error("4. make && make install") logger.error("5. pip install kytea") raise e return list(self.ja_word_tokenizer.getWS(text)) @property def vocab_size(self): return len(self.encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + '</w>',) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token+'</w>' while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word)-1 and word[i+1] == second: new_word.append(first+second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) if word == '\n </w>': word = '\n</w>' self.cache[token] = word return word def _tokenize(self, text, lang='en', bypass_tokenizer=False): """ Tokenize a string given language code. For Chinese, Japanese and Thai, we use a language specific tokenizerself. Otherwise, we use Moses. Details of tokenization: - [sacremoses](https://github.com/alvations/sacremoses): port of Moses - Install with `pip install sacremoses` - [pythainlp](https://github.com/PyThaiNLP/pythainlp): Thai tokenizer - Install with `pip install pythainlp` - [kytea](https://github.com/chezou/Mykytea-python): Japanese tokenizer, wrapper of [KyTea](https://github.com/neubig/kytea) - Install with the following steps: ``` git clone [email protected]:neubig/kytea.git && cd kytea autoreconf -i ./configure --prefix=$HOME/local make && make install pip install kytea ``` - [jieba](https://github.com/fxsjy/jieba): Chinese tokenizer * - Install with `pip install jieba` \* The original XLM used [Stanford Segmenter](https://nlp.stanford.edu/software/stanford-segmenter-2018-10-16.zip). However, the wrapper (`nltk.tokenize.stanford_segmenter`) is slow due to JVM overhead, and it will be deprecated. Jieba is a lot faster and pip-installable. Note there is some mismatch with the Stanford Segmenter. It should be fine if you fine-tune the model with Chinese supervisionself. If you want the same exact behaviour, use the original XLM [preprocessing script](https://github.com/facebookresearch/XLM/tree/master/tools) to tokenize the sentence externally, and set `bypass_tokenizer=True` to bypass the tokenizer. Args: - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported languages. However, we don't enforce it. - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE. Returns: List of tokens. """ if lang and self.lang2id and lang not in self.lang2id: logger.error("Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model.") if bypass_tokenizer: text = text.split() elif lang not in self.lang_with_custom_tokenizer: text = self.moses_pipeline(text, lang=lang) # TODO: make sure we are using `xlm-mlm-enro-1024`, since XLM-100 doesn't have this step if lang == 'ro': text = romanian_preprocessing(text) text = self.moses_tokenize(text, lang=lang) elif lang == 'th': text = self.moses_pipeline(text, lang=lang) try: if 'pythainlp' not in sys.modules: from pythainlp.tokenize import word_tokenize as th_word_tokenize else: th_word_tokenize = sys.modules['pythainlp'].word_tokenize except (AttributeError, ImportError) as e: logger.error("Make sure you install PyThaiNLP (https://github.com/PyThaiNLP/pythainlp) with the following steps") logger.error("1. pip install pythainlp") raise e text = th_word_tokenize(text) elif lang == 'zh': try: if 'jieba' not in sys.modules: import jieba else: jieba = sys.modules['jieba'] except (AttributeError, ImportError) as e: logger.error("Make sure you install Jieba (https://github.com/fxsjy/jieba) with the following steps") logger.error("1. pip install jieba") raise e text = ' '.join(jieba.cut(text)) text = self.moses_pipeline(text, lang=lang) text = text.split() elif lang == 'ja': text = self.moses_pipeline(text, lang=lang) text = self.ja_tokenize(text) else: raise ValueError('It should not reach here') if self.do_lowercase_and_remove_accent and not bypass_tokenizer: text = lowercase_and_remove_accent(text) split_tokens = [] for token in text: if token: split_tokens.extend([t for t in self.bpe(token).split(' ')]) return split_tokens def _convert_token_to_id(self, token): """ Converts a token (str/unicode) in an id using the vocab. """ return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (string/unicode) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = ''.join(tokens).replace('</w>', ' ').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format: single sequence: <s> X </s> pair of sequences: <s> A </s></s> B </s> """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] sep = [self.sep_token_id] cls = [self.cls_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0: list of ids (must not contain special tokens) token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids for sequence pairs already_has_special_tokens: (default False) Set to True if the token list is already formated with special tokens for the model Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError("You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model.") return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence pair mask has the following format: 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 | first sequence | second sequence if token_ids_1 is None, only returns the first portion of the mask (0's). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory): """Save the tokenizer vocabulary and merge files to a directory.""" if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, "w", encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file)) index = token_index writer.write(' '.join(bpe_tokens) + u'\n') index += 1 return vocab_file, merge_file
37,149
43.544365
185
py
DeeBERT
DeeBERT-master/transformers/tokenization_ctrl.py
# coding=utf-8 # Copyright 2018 Salesforce and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Salesforce CTRL.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import json import logging import os import regex as re from io import open from .tokenization_utils import PreTrainedTokenizer logger = logging.getLogger(__name__) VOCAB_FILES_NAMES = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } PRETRAINED_VOCAB_FILES_MAP = { 'vocab_file': { 'ctrl': "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json", }, 'merges_file': { 'ctrl': "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'ctrl': 256, } CONTROL_CODES = { "Pregnancy": 168629, "Christianity": 7675, "Explain": 106423, "Fitness": 63440, "Saving": 63163, "Ask": 27171, "Ass": 95985, "Joke": 163509, "Questions": 45622, "Thoughts": 49605, "Retail": 52342, "Feminism": 164338, "Writing": 11992, "Atheism": 192263, "Netflix": 48616, "Computing": 39639, "Opinion": 43213, "Alone": 44967, "Funny": 58917, "Gaming": 40358, "Human": 4088, "India": 1331, "Joker": 77138, "Diet": 36206, "Legal": 11859, "Norman": 4939, "Tip": 72689, "Weight": 52343, "Movies": 46273, "Running": 23425, "Science": 2090, "Horror": 37793, "Confession": 60572, "Finance": 12250, "Politics": 16360, "Scary": 191985, "Support": 12654, "Technologies": 32516, "Teenage": 66160, "Event": 32769, "Learned": 67460, "Notion": 182770, "Wikipedia": 37583, "Books": 6665, "Extract": 76050, "Confessions": 102701, "Conspiracy": 75932, "Links": 63674, "Narcissus": 150425, "Relationship": 54766, "Relationships": 134796, "Reviews": 41671, "News": 4256, "Translation": 26820, "multilingual": 128406, } def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char pairs = set(pairs) return pairs class CTRLTokenizer(PreTrainedTokenizer): """ CTRL BPE tokenizer. Peculiarities: - Byte-Pair-Encoding """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES control_codes = CONTROL_CODES def __init__(self, vocab_file, merges_file, unk_token="<unk>", **kwargs): super(CTRLTokenizer, self).__init__(unk_token=unk_token, **kwargs) self.max_len_single_sentence = self.max_len # no default special tokens - you can update this value if you add special tokens self.max_len_sentences_pair = self.max_len # no default special tokens - you can update this value if you add special tokens self.encoder = json.load(open(vocab_file, encoding="utf-8")) self.decoder = {v:k for k,v in self.encoder.items()} merges = open(merges_file, encoding='utf-8').read().split('\n')[1:-1] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} @property def vocab_size(self): return len(self.encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) word = tuple(list(word[:-1]) + [word[-1]+'</w>']) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word)-1 and word[i+1] == second: new_word.append(first+second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = '@@ '.join(word) word = word[:-4] self.cache[token] = word return word def _tokenize(self, text): """ Tokenize a string. """ split_tokens = [] text = text.split(' ') for token in text: split_tokens.extend([t for t in self.bpe(token).split(' ')]) return split_tokens def _convert_token_to_id(self, token): """ Converts a token (str/unicode) in an id using the vocab. """ return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (string/unicode) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = ' '.join(tokens).replace('@@ ', '').strip() return out_string def save_vocabulary(self, save_directory): """Save the tokenizer vocabulary and merge files to a directory.""" if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write(u'#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file)) index = token_index writer.write(' '.join(bpe_tokens) + u'\n') index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
7,964
31.777778
133
py
DeeBERT
DeeBERT-master/transformers/modeling_tf_gpt2.py
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 OpenAI GPT-2 model. """ from __future__ import absolute_import, division, print_function, unicode_literals import collections import json import logging import math import os import sys from io import open import numpy as np import tensorflow as tf from .modeling_tf_utils import (TFPreTrainedModel, TFConv1D, TFSharedEmbeddings, TFSequenceSummary, shape_list, get_initializer) from .configuration_gpt2 import GPT2Config from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-tf_model.h5", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-tf_model.h5", "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-tf_model.h5", "distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-tf_model.h5",} def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf class TFAttention(tf.keras.layers.Layer): def __init__(self, nx, n_ctx, config, scale=False, **kwargs): super(TFAttention, self).__init__(**kwargs) self.output_attentions = config.output_attentions n_state = nx # in Attention: n_state=768 (nx=n_embd) # [switch nx => n_state from Block to Attention to keep identical to TF implem] assert n_state % config.n_head == 0 self.n_ctx = n_ctx self.n_head = config.n_head self.split_size = n_state self.scale = scale self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name='c_attn') self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_proj') self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop) self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop) self.pruned_heads = set() def prune_heads(self, heads): pass @staticmethod def causal_attention_mask(nd, ns, dtype): """1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs. """ i = tf.range(nd)[:,None] j = tf.range(ns) m = i >= j - ns + nd return tf.cast(m, dtype) def _attn(self, inputs, training=False): q, k, v, attention_mask, head_mask = inputs # q, k, v have shape [batch, heads, sequence, features] w = tf.matmul(q, k, transpose_b=True) if self.scale: dk = tf.cast(tf.shape(k)[-1], tf.float32) # scale attention_scores w = w / tf.math.sqrt(dk) # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst. _, _, nd, ns = shape_list(w) b = self.causal_attention_mask(nd, ns, dtype=w.dtype) b = tf.reshape(b, [1, 1, nd, ns]) w = w * b - 1e4 * (1 - b) if attention_mask is not None: # Apply the attention mask w = w + attention_mask w = tf.nn.softmax(w, axis=-1) w = self.attn_dropout(w, training=training) # Mask heads if we want to if head_mask is not None: w = w * head_mask outputs = [tf.matmul(w, v)] if self.output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = tf.transpose(x, [0, 2, 1, 3]) x_shape = shape_list(x) new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]] return tf.reshape(x, new_x_shape) def split_heads(self, x): x_shape = shape_list(x) new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head] x = tf.reshape(x, new_x_shape) return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features) def call(self, inputs, training=False): x, layer_past, attention_mask, head_mask = inputs x = self.c_attn(x) query, key, value = tf.split(x, 3, axis=2) query = self.split_heads(query) key = self.split_heads(key) value = self.split_heads(value) if layer_past is not None: past_key, past_value = tf.unstack(layer_past, axis=1) key = tf.concat([past_key, key], axis=-2) value = tf.concat([past_value, value], axis=-2) present = tf.stack([key, value], axis=1) attn_outputs = self._attn([query, key, value, attention_mask, head_mask], training=training) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a, training=training) outputs = [a, present] + attn_outputs[1:] return outputs # a, present, (attentions) class TFMLP(tf.keras.layers.Layer): def __init__(self, n_state, config, **kwargs): super(TFMLP, self).__init__(**kwargs) nx = config.n_embd self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_fc') self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name='c_proj') self.act = gelu self.dropout = tf.keras.layers.Dropout(config.resid_pdrop) def call(self, x, training=False): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) h2 = self.dropout(h2, training=training) return h2 class TFBlock(tf.keras.layers.Layer): def __init__(self, n_ctx, config, scale=False, **kwargs): super(TFBlock, self).__init__(**kwargs) nx = config.n_embd self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_1') self.attn = TFAttention(nx, n_ctx, config, scale, name='attn') self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_2') self.mlp = TFMLP(4 * nx, config, name='mlp') def call(self, inputs, training=False): x, layer_past, attention_mask, head_mask = inputs a = self.ln_1(x) output_attn = self.attn([a, layer_past, attention_mask, head_mask], training=training) a = output_attn[0] # output_attn: a, present, (attentions) x = x + a m = self.ln_2(x) m = self.mlp(m, training=training) x = x + m outputs = [x] + output_attn[1:] return outputs # x, present, (attentions) class TFGPT2MainLayer(tf.keras.layers.Layer): def __init__(self, config, *inputs, **kwargs): super(TFGPT2MainLayer, self).__init__(config, *inputs, **kwargs) self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.num_hidden_layers = config.n_layer self.vocab_size = config.vocab_size self.n_embd = config.n_embd self.wte = TFSharedEmbeddings(config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name='wte') self.wpe = tf.keras.layers.Embedding(config.n_positions, config.n_embd, embeddings_initializer=get_initializer(config.initializer_range), name='wpe') self.drop = tf.keras.layers.Dropout(config.embd_pdrop) self.h = [TFBlock(config.n_ctx, config, scale=True, name='h_._{}'.format(i)) for i in range(config.n_layer)] self.ln_f = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_f') def get_input_embeddings(self): return self.wte def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] past = inputs[1] if len(inputs) > 1 else past attention_mask = inputs[2] if len(inputs) > 2 else attention_mask token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids position_ids = inputs[4] if len(inputs) > 4 else position_ids head_mask = inputs[5] if len(inputs) > 5 else head_mask inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds assert len(inputs) <= 7, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') past = inputs.get('past', past) attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) assert len(inputs) <= 7, "Too many inputs." else: input_ids = inputs if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if past is None: past_length = 0 past = [None] * len(self.h) else: past_length = shape_list(past[0][0])[-2] if position_ids is None: position_ids = tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32)[tf.newaxis, :] if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = tf.cast(attention_mask, tf.float32) attention_mask = (1.0 - attention_mask) * -10000.0 else: attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if not head_mask is None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: inputs_embeds = self.wte(input_ids, mode='embedding') position_embeds = self.wpe(position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.wte(token_type_ids, mode='embedding') else: token_type_embeds = 0 hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] presents = () all_attentions = [] all_hidden_states = () for i, (block, layer_past) in enumerate(zip(self.h, past)): if self.output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block([hidden_states, layer_past, attention_mask, head_mask[i]], training=training) hidden_states, present = outputs[:2] presents = presents + (present,) if self.output_attentions: all_attentions.append(outputs[2]) hidden_states = self.ln_f(hidden_states) hidden_states = tf.reshape(hidden_states, output_shape) # Add last hidden state if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states, presents) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) outputs = outputs + (all_attentions,) return outputs # last hidden state, presents, (all hidden_states), (attentions) class TFGPT2PreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = GPT2Config pretrained_model_archive_map = TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "transformer" GPT2_START_DOCSTRING = r""" OpenAI GPT-2 model was proposed in `Language Models are Unsupervised Multitask Learners`_ by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. It's a causal (unidirectional) transformer pre-trained using language modeling on a very large corpus of ~40 GB of text data. This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. _`Language Models are Unsupervised Multitask Learners`: https://openai.com/blog/better-language-models/ .. _`tf.keras.Model`: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model Note on the model inputs: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with input_ids only and nothing else: `model(inputs_ids) - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associaed to the input names given in the docstring: `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` Parameters: config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ GPT2_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`transformers.BPT2Tokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **past**: list of ``Numpy array`` or ``tf.Tensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `past` output below). Can be used to speed up sequential decoding. **attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: A parallel sequence of tokens (can be used to indicate various portions of the inputs). The embeddings from these tokens will be summed with the respective token embeddings. Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices). **position_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare GPT2 Model transformer outputing raw hidden-states without any specific head on top.", GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING) class TFGPT2Model(TFGPT2PreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the last layer of the model. **past**: list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import GPT2Tokenizer, TFGPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2Model.from_pretrained('gpt2') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config, *inputs, **kwargs): super(TFGPT2Model, self).__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name='transformer') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) return outputs @add_start_docstrings("""The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING) class TFGPT2LMHeadModel(TFGPT2PreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **prediction_scores**: `tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **past**: list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of `tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import GPT2Tokenizer, TFGPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2LMHeadModel.from_pretrained('gpt2') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) logits = outputs[0] """ def __init__(self, config, *inputs, **kwargs): super(TFGPT2LMHeadModel, self).__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name='transformer') def get_output_embeddings(self): return self.transformer.wte def call(self, inputs, **kwargs): transformer_outputs = self.transformer(inputs, **kwargs) hidden_states = transformer_outputs[0] lm_logits = self.transformer.wte(hidden_states, mode="linear") outputs = (lm_logits,) + transformer_outputs[1:] return outputs # lm_logits, presents, (all hidden_states), (attentions) @add_start_docstrings("""The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING) class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel): r""" **mc_token_ids**: (`optional`, default to index of the last token of the input) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, num_choices)``: Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) - 1[``. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **lm_prediction_scores**: `tf.Tensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **mc_prediction_scores**: `tf.Tensor`` of shape ``(batch_size, num_choices)`` Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax). **past**: list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of `tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import GPT2Tokenizer, TFGPT2DoubleHeadsModel tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2DoubleHeadsModel.from_pretrained('gpt2') # Add a [CLS] to the vocabulary (we should train it also!) # This option is currently not implemented in TF 2.0 raise NotImplementedError tokenizer.add_special_tokens({'cls_token': '[CLS]'}) model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] encoded_choices = [tokenizer.encode(s) for s in choices] cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] input_ids = tf.constant(encoded_choices)[None, :] # Batch size: 1, number of choices: 2 mc_token_ids = tf.constant([cls_token_location]) # Batch size: 1 outputs = model(input_ids, mc_token_ids=mc_token_ids) lm_prediction_scores, mc_prediction_scores = outputs[:2] """ def __init__(self, config, *inputs, **kwargs): super(TFGPT2DoubleHeadsModel, self).__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name='transformer') self.multiple_choice_head = TFSequenceSummary(config, initializer_range=config.initializer_range, name='multiple_choice_head') def get_output_embeddings(self): return self.transformer.wte def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, mc_token_ids=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] past = inputs[1] if len(inputs) > 1 else past attention_mask = inputs[2] if len(inputs) > 2 else attention_mask token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids position_ids = inputs[4] if len(inputs) > 4 else position_ids head_mask = inputs[5] if len(inputs) > 5 else head_mask inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds mc_token_ids = inputs[7] if len(inputs) > 7 else mc_token_ids assert len(inputs) <= 8, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') past = inputs.get('past', past) attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) mc_token_ids = inputs.get('mc_token_ids', mc_token_ids) assert len(inputs) <= 8, "Too many inputs." else: input_ids = inputs if input_ids is not None: input_shapes = shape_list(input_ids) else: input_shapes = shape_list(inputs_embeds)[:-1] seq_length = input_shapes[-1] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_inputs = [flat_input_ids, past, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, inputs_embeds] transformer_outputs = self.transformer(flat_inputs, training=training) hidden_states = transformer_outputs[0] hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:]) lm_logits = self.transformer.wte(hidden_states, mode="linear") mc_logits = self.multiple_choice_head([hidden_states, mc_token_ids], training=training) mc_logits = tf.squeeze(mc_logits, axis=-1) outputs = (lm_logits, mc_logits) + transformer_outputs[1:] return outputs # lm logits, mc logits, presents, (all hidden_states), (attentions)
32,228
49.834385
193
py
DeeBERT
DeeBERT-master/transformers/modeling_tf_transfo_xl.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 Transformer XL model. """ from __future__ import absolute_import, division, print_function, unicode_literals import os import json import math import logging import collections import sys from io import open import numpy as np import tensorflow as tf from .configuration_transfo_xl import TransfoXLConfig from .modeling_tf_utils import TFPreTrainedModel, TFConv1D, TFSequenceSummary, shape_list, get_initializer from .modeling_tf_transfo_xl_utilities import TFAdaptiveSoftmaxMask from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP = { 'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-tf_model.h5", } class TFPositionalEmbedding(tf.keras.layers.Layer): def __init__(self, demb, **kwargs): super(TFPositionalEmbedding, self).__init__(**kwargs) self.inv_freq = 1 / (10000 ** (tf.range(0, demb, 2.0) / demb)) def call(self, pos_seq, bsz=None): sinusoid_inp = tf.einsum('i,j->ij', pos_seq, self.inv_freq) pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1) if bsz is not None: return tf.tile(pos_emb[:, None, :], [1, bsz, 1]) else: return pos_emb[:, None, :] class TFPositionwiseFF(tf.keras.layers.Layer): def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5, init_std=0.02, **kwargs): super(TFPositionwiseFF, self).__init__(**kwargs) self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.layer_1 = tf.keras.layers.Dense(d_inner, kernel_initializer=get_initializer(init_std), activation=tf.nn.relu, name='CoreNet_._0') self.drop_1 = tf.keras.layers.Dropout(dropout) self.layer_2 = tf.keras.layers.Dense(d_model, kernel_initializer=get_initializer(init_std), name='CoreNet_._3') self.drop_2 = tf.keras.layers.Dropout(dropout) self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name='layer_norm') self.pre_lnorm = pre_lnorm def call(self, inp, training=False): if self.pre_lnorm: ##### layer normalization + positionwise feed-forward core_out = self.layer_norm(inp) core_out = self.layer_1(core_out) core_out = self.drop_1(core_out, training=training) core_out = self.layer_2(core_out) core_out = self.drop_2(core_out, training=training) ##### residual connection output = core_out + inp else: ##### positionwise feed-forward core_out = self.layer_1(inp) core_out = self.drop_1(core_out, training=training) core_out = self.layer_2(core_out) core_out = self.drop_2(core_out, training=training) ##### residual connection + layer normalization output = self.layer_norm(inp + core_out) return output class TFRelPartialLearnableMultiHeadAttn(tf.keras.layers.Layer): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False, r_r_bias=None, r_w_bias=None, output_attentions=False, layer_norm_epsilon=1e-5, init_std=0.02, **kwargs): super(TFRelPartialLearnableMultiHeadAttn, self).__init__(**kwargs) self.output_attentions = output_attentions self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.qkv_net = tf.keras.layers.Dense(3 * n_head * d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name='qkv_net') self.drop = tf.keras.layers.Dropout(dropout) self.dropatt = tf.keras.layers.Dropout(dropatt) self.o_net = tf.keras.layers.Dense(d_model, kernel_initializer=get_initializer(init_std), use_bias=False, name='o_net') self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name='layer_norm') self.scale = 1 / (d_head ** 0.5) self.pre_lnorm = pre_lnorm if r_r_bias is not None and r_w_bias is not None: # Biases are shared self.r_r_bias = r_r_bias self.r_w_bias = r_w_bias else: self.r_r_bias = None self.r_w_bias = None self.r_net = tf.keras.layers.Dense(self.n_head * self.d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name='r_net') def build(self, input_shape): if self.r_r_bias is None or self.r_w_bias is None: # Biases are not shared self.r_r_bias = self.add_weight(shape=(self.n_head, self.d_head), initializer='zeros', trainable=True, name='r_r_bias') self.r_w_bias = self.add_weight(shape=(self.n_head, self.d_head), initializer='zeros', trainable=True, name='r_w_bias') super(TFRelPartialLearnableMultiHeadAttn, self).build(input_shape) def _rel_shift(self, x): x_size = shape_list(x) x = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]]) x = tf.reshape(x, [x_size[1] + 1, x_size[0], x_size[2], x_size[3]]) x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1]) x = tf.reshape(x, x_size) return x def call(self, inputs, training=False): w, r, attn_mask, mems, head_mask = inputs qlen, rlen, bsz = shape_list(w)[0], shape_list(r)[0], shape_list(w)[1] if mems is not None: cat = tf.concat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1) klen = shape_list(w_head_k)[0] w_head_q = tf.reshape(w_head_q, (qlen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head w_head_k = tf.reshape(w_head_k, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head w_head_v = tf.reshape(w_head_v, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head r_head_k = tf.reshape(r_head_k, (rlen, self.n_head, self.d_head)) # qlen x n_head x d_head #### compute attention score rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head AC = tf.einsum('ibnd,jbnd->ijbn', rw_head_q, w_head_k) # qlen x klen x bsz x n_head rr_head_q = w_head_q + self.r_r_bias BD = tf.einsum('ibnd,jnd->ijbn', rr_head_q, r_head_k) # qlen x klen x bsz x n_head BD = self._rel_shift(BD) # [qlen x klen x bsz x n_head] attn_score = AC + BD attn_score = attn_score * self.scale #### compute attention probability if attn_mask is not None: attn_mask_t = attn_mask[:, :, None, None] attn_score = attn_score * (1 - attn_mask_t) - 1e30 * attn_mask_t # [qlen x klen x bsz x n_head] attn_prob = tf.nn.softmax(attn_score, axis=1) attn_prob = self.dropatt(attn_prob, training=training) # Mask heads if we want to if head_mask is not None: attn_prob = attn_prob * head_mask #### compute attention vector attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, w_head_v) # [qlen x bsz x n_head x d_head] attn_vec_sizes = shape_list(attn_vec) attn_vec = tf.reshape(attn_vec, (attn_vec_sizes[0], attn_vec_sizes[1], self.n_head * self.d_head)) ##### linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out, training=training) if self.pre_lnorm: ##### residual connection outputs = [w + attn_out] else: ##### residual connection + layer normalization outputs = [self.layer_norm(w + attn_out)] if self.output_attentions: outputs.append(attn_prob) return outputs class TFRelPartialLearnableDecoderLayer(tf.keras.layers.Layer): def __init__(self, n_head, d_model, d_head, d_inner, dropout, tgt_len=None, ext_len=None, mem_len=None, dropatt=0., pre_lnorm=False, r_w_bias=None, r_r_bias=None, output_attentions=False, layer_norm_epsilon=1e-5, init_std=0.02, **kwargs): super(TFRelPartialLearnableDecoderLayer, self).__init__(**kwargs) self.dec_attn = TFRelPartialLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len, dropatt=dropatt, pre_lnorm=pre_lnorm, r_w_bias=r_w_bias, r_r_bias=r_r_bias, init_std=init_std, output_attentions=output_attentions, layer_norm_epsilon=layer_norm_epsilon, name='dec_attn') self.pos_ff = TFPositionwiseFF(d_model, d_inner, dropout, pre_lnorm=pre_lnorm, init_std=init_std, layer_norm_epsilon=layer_norm_epsilon, name='pos_ff') def call(self, inputs, training=False): dec_inp, r, dec_attn_mask, mems, head_mask = inputs attn_outputs = self.dec_attn([dec_inp, r, dec_attn_mask, mems, head_mask], training=training) ff_output = self.pos_ff(attn_outputs[0], training=training) outputs = [ff_output] + attn_outputs[1:] return outputs class TFAdaptiveEmbedding(tf.keras.layers.Layer): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, init_std=0.02, sample_softmax=False, **kwargs): super(TFAdaptiveEmbedding, self).__init__(**kwargs) self.n_token = n_token self.d_embed = d_embed self.init_std = init_std self.cutoffs = cutoffs + [n_token] self.div_val = div_val self.d_proj = d_proj self.emb_scale = d_proj ** 0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers = [] self.emb_projs = [] if div_val == 1: raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i) self.emb_layers.append(tf.keras.layers.Embedding(r_idx-l_idx, d_emb_i, embeddings_initializer=get_initializer(init_std), name='emb_layers_._{}'.format(i))) def build(self, input_shape): for i in range(len(self.cutoffs)): d_emb_i = self.d_embed // (self.div_val ** i) self.emb_projs.append(self.add_weight(shape=(d_emb_i, self.d_proj), initializer=get_initializer(self.init_std), trainable=True, name='emb_projs_._{}'.format(i))) super(TFAdaptiveEmbedding, self).build(input_shape) def call(self, inp): if self.div_val == 1: raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint else: inp_flat = tf.reshape(inp, (-1,)) emb_flat = tf.zeros([shape_list(inp_flat)[0], self.d_proj]) for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx) inp_i = tf.boolean_mask(inp_flat, mask_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i = tf.einsum('id,de->ie', emb_i, self.emb_projs[i]) mask_idx = tf.cast(tf.where(mask_i), dtype=tf.int64) emb_flat += tf.scatter_nd(mask_idx, emb_i, tf.cast(tf.shape(emb_flat), dtype=tf.int64)) embed_shape = shape_list(inp) + [self.d_proj] embed = tf.reshape(emb_flat, embed_shape) embed *= self.emb_scale return embed class TFTransfoXLMainLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super(TFTransfoXLMainLayer, self).__init__(**kwargs) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.n_token = config.n_token self.d_embed = config.d_embed self.d_model = config.d_model self.n_head = config.n_head self.d_head = config.d_head self.untie_r = config.untie_r self.word_emb = TFAdaptiveEmbedding(config.n_token, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val, init_std=config.init_std, name='word_emb') self.drop = tf.keras.layers.Dropout(config.dropout) self.n_layer = config.n_layer self.tgt_len = config.tgt_len self.mem_len = config.mem_len self.ext_len = config.ext_len self.max_klen = config.tgt_len + config.ext_len + config.mem_len self.attn_type = config.attn_type self.layers = [] if config.attn_type == 0: # the default attention for i in range(config.n_layer): self.layers.append( TFRelPartialLearnableDecoderLayer( config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout, tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len, dropatt=config.dropatt, pre_lnorm=config.pre_lnorm, r_w_bias=None if self.untie_r else self.r_w_bias, r_r_bias=None if self.untie_r else self.r_r_bias, output_attentions=self.output_attentions, layer_norm_epsilon=config.layer_norm_epsilon, init_std=config.init_std, name='layers_._{}'.format(i)) ) else: # learnable embeddings and absolute embeddings raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint self.same_length = config.same_length self.clamp_len = config.clamp_len if self.attn_type == 0: # default attention self.pos_emb = TFPositionalEmbedding(self.d_model, name='pos_emb') else: # learnable embeddings and absolute embeddings raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint def build(self, input_shape): if not self.untie_r: self.r_w_bias = self.add_weight(shape=(self.n_head, self.d_head), initializer='zeros', trainable=True, name='r_w_bias') self.r_r_bias = self.add_weight(shape=(self.n_head, self.d_head), initializer='zeros', trainable=True, name='r_r_bias') super(TFTransfoXLMainLayer, self).build(input_shape) def get_input_embeddings(self): return self.word_emb def _resize_token_embeddings(self, new_num_tokens): return self.word_emb def backward_compatible(self): self.sample_softmax = -1 def reset_length(self, tgt_len, ext_len, mem_len): self.tgt_len = tgt_len self.mem_len = mem_len self.ext_len = ext_len def _prune_heads(self, heads): raise NotImplementedError def init_mems(self, bsz): if self.mem_len > 0: mems = [] for i in range(self.n_layer): empty = tf.zeros([self.mem_len, bsz, self.d_model]) mems.append(empty) return mems else: return None def _update_mems(self, hids, mems, qlen, mlen): # does not deal with None if mems is None: return None # mems is not None assert len(hids) == len(mems), 'len(hids) != len(mems)' # There are `mlen + qlen` steps that can be cached into mems # For the next step, the last `ext_len` of the `qlen` tokens # will be used as the extended context. Hence, we only cache # the tokens from `mlen + qlen - self.ext_len - self.mem_len` # to `mlen + qlen - self.ext_len`. new_mems = [] end_idx = mlen + max(0, qlen - 0 - self.ext_len) beg_idx = max(0, end_idx - self.mem_len) for i in range(len(hids)): cat = tf.concat([mems[i], hids[i]], axis=0) tf.stop_gradient(cat) new_mems.append(cat[beg_idx:end_idx]) return new_mems def call(self, inputs, mems=None, head_mask=None, inputs_embeds=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] mems = inputs[1] if len(inputs) > 1 else mems head_mask = inputs[2] if len(inputs) > 2 else head_mask inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds assert len(inputs) <= 4, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') mems = inputs.get('mems', mems) head_mask = inputs.get('head_mask', head_mask) inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) assert len(inputs) <= 4, "Too many inputs." else: input_ids = inputs # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library # so we transpose here from shape [bsz, len] to shape [len, bsz] if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_ids = tf.transpose(input_ids, perm=(1, 0)) qlen, bsz = shape_list(input_ids) elif inputs_embeds is not None: inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2)) qlen, bsz = shape_list(inputs_embeds)[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if mems is None: mems = self.init_mems(bsz) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer) # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head] if not head_mask is None: raise NotImplementedError else: head_mask = [None] * self.n_layer if inputs_embeds is not None: word_emb = inputs_embeds else: word_emb = self.word_emb(input_ids) mlen = shape_list(mems[0])[0] if mems is not None else 0 klen = mlen + qlen attn_mask = tf.ones([qlen, qlen]) mask_u = tf.linalg.band_part(attn_mask, 0, -1) mask_dia = tf.linalg.band_part(attn_mask, 0, 0) attn_mask_pad = tf.zeros([qlen, mlen]) dec_attn_mask = tf.concat([attn_mask_pad, mask_u - mask_dia], 1) if self.same_length: mask_l = tf.linalg.band_part(attn_mask, -1, 0) dec_attn_mask = tf.concat([dec_attn_mask[:, :qlen] + mask_l - mask_dia, dec_attn_mask[:, qlen:]], 1) # ::: PyTorch masking code for reference ::: # if self.same_length: # all_ones = word_emb.new_ones((qlen, klen), dtype=torch.uint8) # mask_len = klen - self.mem_len # if mask_len > 0: # mask_shift_len = qlen - mask_len # else: # mask_shift_len = qlen # dec_attn_mask = (torch.triu(all_ones, 1+mlen) # + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1 # else: # dec_attn_mask = torch.triu( # word_emb.new_ones((qlen, klen), dtype=torch.uint8), diagonal=1+mlen)[:,:,None] hids = [] attentions = [] if self.attn_type == 0: # default pos_seq = tf.range(klen-1, -1, -1.0) if self.clamp_len > 0: pos_seq = tf.minimum(pos_seq, self.clamp_len) pos_emb = self.pos_emb(pos_seq) core_out = self.drop(word_emb, training=training) pos_emb = self.drop(pos_emb, training=training) for i, layer in enumerate(self.layers): hids.append(core_out) mems_i = None if mems is None else mems[i] layer_outputs = layer([core_out, pos_emb, dec_attn_mask, mems_i, head_mask[i]], training=training) core_out = layer_outputs[0] if self.output_attentions: attentions.append(layer_outputs[1]) else: # learnable embeddings and absolute embeddings raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint core_out = self.drop(core_out, training=training) new_mems = self._update_mems(hids, mems, mlen, qlen) # We transpose back here to shape [bsz, len, hidden_dim] outputs = [tf.transpose(core_out, perm=(1, 0, 2)), new_mems] if self.output_hidden_states: # Add last layer and transpose to library standard shape [bsz, len, hidden_dim] hids.append(core_out) hids = list(tf.transpose(t, perm=(1, 0, 2)) for t in hids) outputs.append(hids) if self.output_attentions: # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len] attentions = list(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions) outputs.append(attentions) return outputs # last hidden state, new_mems, (all hidden states), (all attentions) class TFTransfoXLPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = TransfoXLConfig pretrained_model_archive_map = TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "transformer" TRANSFO_XL_START_DOCSTRING = r""" The Transformer-XL model was proposed in `Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context`_ by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. It's a causal (uni-directional) transformer with relative positioning (sinusoïdal) embeddings which can reuse previously computed hidden-states to attend to longer context (memory). This model also uses adaptive softmax inputs and outputs (tied). This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. _`Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context`: https://arxiv.org/abs/1901.02860 .. _`tf.keras.Model`: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model Note on the model inputs: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with input_ids only and nothing else: `model(inputs_ids) - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associaed to the input names given in the docstring: `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` Parameters: config (:class:`~transformers.TransfoXLConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ TRANSFO_XL_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. Transformer-XL is a model with relative position embeddings so you can either pad the inputs on the right or on the left. Indices can be obtained using :class:`transformers.TransfoXLTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **mems**: (`optional`) list of ``Numpy array`` or ``tf.Tensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `mems` output below). Can be used to speed up sequential decoding and attend to longer context. **head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.", TRANSFO_XL_START_DOCSTRING, TRANSFO_XL_INPUTS_DOCSTRING) class TFTransfoXLModel(TFTransfoXLPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the last layer of the model. **mems**: list of ``tf.Tensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `mems` input above). Can be used to speed up sequential decoding and attend to longer context. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import TransfoXLTokenizer, TFTransfoXLModel tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103') model = TFTransfoXLModel.from_pretrained('transfo-xl-wt103') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) last_hidden_states, mems = outputs[:2] """ def __init__(self, config, *inputs, **kwargs): super(TFTransfoXLModel, self).__init__(config, *inputs, **kwargs) self.transformer = TFTransfoXLMainLayer(config, name='transformer') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) return outputs @add_start_docstrings("""The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive input embeddings)""", TRANSFO_XL_START_DOCSTRING, TRANSFO_XL_INPUTS_DOCSTRING) class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **prediction_scores**: ``None`` if ``lm_labels`` is provided else ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). We don't output them when the loss is computed to speedup adaptive softmax decoding. **mems**: list of ``tf.Tensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `mems` input above). Can be used to speed up sequential decoding and attend to longer context. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import TransfoXLTokenizer, TFTransfoXLLMHeadModel tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103') model = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) prediction_scores, mems = outputs[:2] """ def __init__(self, config): super(TFTransfoXLLMHeadModel, self).__init__(config) self.transformer = TFTransfoXLMainLayer(config, name='transformer') self.sample_softmax = config.sample_softmax # use sampled softmax if config.sample_softmax > 0: raise NotImplementedError # use adaptive softmax (including standard softmax) else: self.crit = TFAdaptiveSoftmaxMask(config.n_token, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val, name='crit') def reset_length(self, tgt_len, ext_len, mem_len): self.transformer.reset_length(tgt_len, ext_len, mem_len) def init_mems(self, bsz): return self.transformer.init_mems(bsz) def call(self, inputs, mems=None, head_mask=None, inputs_embeds=None, labels=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] mems = inputs[1] if len(inputs) > 1 else mems head_mask = inputs[2] if len(inputs) > 2 else head_mask inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds labels = inputs[4] if len(inputs) > 4 else labels assert len(inputs) <= 5, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') mems = inputs.get('mems', mems) head_mask = inputs.get('head_mask', head_mask) inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) labels = inputs.get('labels', labels) assert len(inputs) <= 5, "Too many inputs." else: input_ids = inputs if input_ids is not None: bsz, tgt_len = shape_list(input_ids)[:2] else: bsz, tgt_len = shape_list(inputs_embeds)[:2] transformer_outputs = self.transformer([input_ids, mems, head_mask, inputs_embeds], training=training) last_hidden = transformer_outputs[0] pred_hid = last_hidden[:, -tgt_len:] outputs = transformer_outputs[1:] if self.sample_softmax > 0 and training: raise NotImplementedError else: # pred_hid = tf.reshape(pred_hid, (-1, shape_list(pred_hid)[-1])) softmax_output = self.crit([pred_hid, labels], training=training) # softmax_output = tf.reshape(softmax_output, (bsz, tgt_len, -1)) outputs = [softmax_output] + outputs return outputs # logits, new_mems, (all hidden states), (all attentions)
36,425
45.820051
193
py
DeeBERT
DeeBERT-master/transformers/tokenization_camembert.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for Camembert model.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import logging import os from shutil import copyfile import sentencepiece as spm from transformers.tokenization_utils import PreTrainedTokenizer logger = logging.getLogger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model'} PRETRAINED_VOCAB_FILES_MAP = { 'vocab_file': { 'camembert-base': "https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-sentencepiece.bpe.model", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'camembert-base': None, } class CamembertTokenizer(PreTrainedTokenizer): """ Adapted from RobertaTokenizer and XLNetTokenizer SentencePiece based tokenizer. Peculiarities: - requires `SentencePiece <https://github.com/google/sentencepiece>`_ """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token='<pad>', mask_token='<mask>', additional_special_tokens=['<s>NOTUSED', '<s>NOTUSED'], **kwargs): super(CamembertTokenizer, self).__init__(max_len=512, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs) self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> self.fairseq_tokens_to_ids = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3} self.fairseq_offset = len(self.fairseq_tokens_to_ids) self.fairseq_tokens_to_ids['<mask>'] = len(self.sp_model) + len(self.fairseq_tokens_to_ids) self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format: single sequence: <s> X </s> pair of sequences: <s> A </s></s> B </s> """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0: list of ids (must not contain special tokens) token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids for sequence pairs already_has_special_tokens: (default False) Set to True if the token list is already formated with special tokens for the model Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError("You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model.") return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A RoBERTa sequence pair mask has the following format: 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 | first sequence | second sequence if token_ids_1 is None, only returns the first portion of the mask (0's). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep) * [0] + len(token_ids_1 + sep) * [1] @property def vocab_size(self): return self.fairseq_offset + len(self.sp_model) def _tokenize(self, text): return self.sp_model.EncodeAsPieces(text) def _convert_token_to_id(self, token): """ Converts a token (str/unicode) in an id using the vocab. """ if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] return self.fairseq_offset + self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (string/unicode) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def save_vocabulary(self, save_directory): """ Save the sentencepiece vocabulary (copy original file) and special tokens file to a directory. """ if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
7,409
45.898734
124
py
DeeBERT
DeeBERT-master/transformers/modeling_tf_auto.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Model class. """ from __future__ import absolute_import, division, print_function, unicode_literals import logging from .modeling_tf_bert import TFBertModel, TFBertForMaskedLM, TFBertForSequenceClassification, TFBertForQuestionAnswering from .modeling_tf_openai import TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel from .modeling_tf_gpt2 import TFGPT2Model, TFGPT2LMHeadModel from .modeling_tf_transfo_xl import TFTransfoXLModel, TFTransfoXLLMHeadModel from .modeling_tf_xlnet import TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, TFXLNetForQuestionAnsweringSimple from .modeling_tf_xlm import TFXLMModel, TFXLMWithLMHeadModel, TFXLMForSequenceClassification, TFXLMForQuestionAnsweringSimple from .modeling_tf_roberta import TFRobertaModel, TFRobertaForMaskedLM, TFRobertaForSequenceClassification from .modeling_tf_distilbert import TFDistilBertModel, TFDistilBertForQuestionAnswering, TFDistilBertForMaskedLM, TFDistilBertForSequenceClassification from .modeling_tf_ctrl import TFCTRLModel, TFCTRLLMHeadModel from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) class TFAutoModel(object): r""" :class:`~transformers.TFAutoModel` is a generic model class that will be instantiated as one of the base model classes of the library when created with the `TFAutoModel.from_pretrained(pretrained_model_name_or_path)` class method. The `from_pretrained()` method takes care of returning the correct model class instance using pattern matching on the `pretrained_model_name_or_path` string. The base model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: TFDistilBertModel (DistilBERT model) - contains `roberta`: TFRobertaModel (RoBERTa model) - contains `bert`: TFBertModel (Bert model) - contains `openai-gpt`: TFOpenAIGPTModel (OpenAI GPT model) - contains `gpt2`: TFGPT2Model (OpenAI GPT-2 model) - contains `transfo-xl`: TFTransfoXLModel (Transformer-XL model) - contains `xlnet`: TFXLNetModel (XLNet model) - contains `xlm`: TFXLMModel (XLM model) - contains `ctrl`: TFCTRLModel (CTRL model) This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError("TFAutoModel is designed to be instantiated " "using the `TFAutoModel.from_pretrained(pretrained_model_name_or_path)` method.") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the base model classes of the library from a pre-trained model configuration. The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: TFDistilBertModel (DistilBERT model) - contains `roberta`: TFRobertaModel (RoBERTa model) - contains `bert`: TFTFBertModel (Bert model) - contains `openai-gpt`: TFOpenAIGPTModel (OpenAI GPT model) - contains `gpt2`: TFGPT2Model (OpenAI GPT-2 model) - contains `transfo-xl`: TFTransfoXLModel (Transformer-XL model) - contains `xlnet`: TFXLNetModel (XLNet model) - contains `ctrl`: TFCTRLModel (CTRL model) Params: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument. from_pt: (`Optional`) Boolean Set to True if the Checkpoint is a PyTorch checkpoint. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. Examples:: model = TFAutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = TFAutoModel.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = TFAutoModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = TFAutoModel.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) """ if 'distilbert' in pretrained_model_name_or_path: return TFDistilBertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'roberta' in pretrained_model_name_or_path: return TFRobertaModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'bert' in pretrained_model_name_or_path: return TFBertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'openai-gpt' in pretrained_model_name_or_path: return TFOpenAIGPTModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'gpt2' in pretrained_model_name_or_path: return TFGPT2Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'transfo-xl' in pretrained_model_name_or_path: return TFTransfoXLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'xlnet' in pretrained_model_name_or_path: return TFXLNetModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'xlm' in pretrained_model_name_or_path: return TFXLMModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'ctrl' in pretrained_model_name_or_path: return TFCTRLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) raise ValueError("Unrecognized model identifier in {}. Should contains one of " "'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', " "'xlm', 'roberta', 'ctrl'".format(pretrained_model_name_or_path)) class TFAutoModelWithLMHead(object): r""" :class:`~transformers.TFAutoModelWithLMHead` is a generic model class that will be instantiated as one of the language modeling model classes of the library when created with the `TFAutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` class method. The `from_pretrained()` method takes care of returning the correct model class instance using pattern matching on the `pretrained_model_name_or_path` string. The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: TFDistilBertForMaskedLM (DistilBERT model) - contains `roberta`: TFRobertaForMaskedLM (RoBERTa model) - contains `bert`: TFBertForMaskedLM (Bert model) - contains `openai-gpt`: TFOpenAIGPTLMHeadModel (OpenAI GPT model) - contains `gpt2`: TFGPT2LMHeadModel (OpenAI GPT-2 model) - contains `transfo-xl`: TFTransfoXLLMHeadModel (Transformer-XL model) - contains `xlnet`: TFXLNetLMHeadModel (XLNet model) - contains `xlm`: TFXLMWithLMHeadModel (XLM model) - contains `ctrl`: TFCTRLLMHeadModel (CTRL model) This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError("TFAutoModelWithLMHead is designed to be instantiated " "using the `TFAutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the language modeling model classes of the library from a pre-trained model configuration. The `from_pretrained()` method takes care of returning the correct model class instance using pattern matching on the `pretrained_model_name_or_path` string. The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: TFDistilBertForMaskedLM (DistilBERT model) - contains `roberta`: TFRobertaForMaskedLM (RoBERTa model) - contains `bert`: TFBertForMaskedLM (Bert model) - contains `openai-gpt`: TFOpenAIGPTLMHeadModel (OpenAI GPT model) - contains `gpt2`: TFGPT2LMHeadModel (OpenAI GPT-2 model) - contains `transfo-xl`: TFTransfoXLLMHeadModel (Transformer-XL model) - contains `xlnet`: TFXLNetLMHeadModel (XLNet model) - contains `xlm`: TFXLMWithLMHeadModel (XLM model) - contains `ctrl`: TFCTRLLMHeadModel (CTRL model) Params: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument. from_pt: (`Optional`) Boolean Set to True if the Checkpoint is a PyTorch checkpoint. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. Examples:: model = TFAutoModelWithLMHead.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = TFAutoModelWithLMHead.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = TFAutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = TFAutoModelWithLMHead.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) """ if 'distilbert' in pretrained_model_name_or_path: return TFDistilBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'roberta' in pretrained_model_name_or_path: return TFRobertaForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'bert' in pretrained_model_name_or_path: return TFBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'openai-gpt' in pretrained_model_name_or_path: return TFOpenAIGPTLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'gpt2' in pretrained_model_name_or_path: return TFGPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'transfo-xl' in pretrained_model_name_or_path: return TFTransfoXLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'xlnet' in pretrained_model_name_or_path: return TFXLNetLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'xlm' in pretrained_model_name_or_path: return TFXLMWithLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'ctrl' in pretrained_model_name_or_path: return TFCTRLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) raise ValueError("Unrecognized model identifier in {}. Should contains one of " "'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', " "'xlm', 'roberta', 'ctrl'".format(pretrained_model_name_or_path)) class TFAutoModelForSequenceClassification(object): r""" :class:`~transformers.TFAutoModelForSequenceClassification` is a generic model class that will be instantiated as one of the sequence classification model classes of the library when created with the `TFAutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` class method. The `from_pretrained()` method takes care of returning the correct model class instance using pattern matching on the `pretrained_model_name_or_path` string. The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: TFDistilBertForSequenceClassification (DistilBERT model) - contains `roberta`: TFRobertaForSequenceClassification (RoBERTa model) - contains `bert`: TFBertForSequenceClassification (Bert model) - contains `xlnet`: TFXLNetForSequenceClassification (XLNet model) - contains `xlm`: TFXLMForSequenceClassification (XLM model) This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError("TFAutoModelWithLMHead is designed to be instantiated " "using the `TFAutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the sequence classification model classes of the library from a pre-trained model configuration. The `from_pretrained()` method takes care of returning the correct model class instance using pattern matching on the `pretrained_model_name_or_path` string. The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: TFDistilBertForSequenceClassification (DistilBERT model) - contains `roberta`: TFRobertaForSequenceClassification (RoBERTa model) - contains `bert`: TFBertForSequenceClassification (Bert model) - contains `xlnet`: TFXLNetForSequenceClassification (XLNet model) - contains `xlm`: TFXLMForSequenceClassification (XLM model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` Params: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument. from_pt: (`Optional`) Boolean Set to True if the Checkpoint is a PyTorch checkpoint. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. Examples:: model = TFAutoModelForSequenceClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = TFAutoModelForSequenceClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = TFAutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = TFAutoModelForSequenceClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) """ if 'distilbert' in pretrained_model_name_or_path: return TFDistilBertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'roberta' in pretrained_model_name_or_path: return TFRobertaForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'bert' in pretrained_model_name_or_path: return TFBertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'xlnet' in pretrained_model_name_or_path: return TFXLNetForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'xlm' in pretrained_model_name_or_path: return TFXLMForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) raise ValueError("Unrecognized model identifier in {}. Should contains one of " "'bert', 'xlnet', 'xlm', 'roberta'".format(pretrained_model_name_or_path)) class TFAutoModelForQuestionAnswering(object): r""" :class:`~transformers.TFAutoModelForQuestionAnswering` is a generic model class that will be instantiated as one of the question answering model classes of the library when created with the `TFAutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` class method. The `from_pretrained()` method takes care of returning the correct model class instance using pattern matching on the `pretrained_model_name_or_path` string. The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: TFDistilBertForQuestionAnswering (DistilBERT model) - contains `bert`: TFBertForQuestionAnswering (Bert model) - contains `xlnet`: TFXLNetForQuestionAnswering (XLNet model) - contains `xlm`: TFXLMForQuestionAnswering (XLM model) This class cannot be instantiated using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError("TFAutoModelWithLMHead is designed to be instantiated " "using the `TFAutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates one of the question answering model classes of the library from a pre-trained model configuration. The `from_pretrained()` method takes care of returning the correct model class instance using pattern matching on the `pretrained_model_name_or_path` string. The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: TFDistilBertForQuestionAnswering (DistilBERT model) - contains `bert`: TFBertForQuestionAnswering (Bert model) - contains `xlnet`: TFXLNetForQuestionAnswering (XLNet model) - contains `xlm`: TFXLMForQuestionAnswering (XLM model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` Params: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument. from_pt: (`Optional`) Boolean Set to True if the Checkpoint is a PyTorch checkpoint. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. Examples:: model = TFAutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = TFAutoModelForQuestionAnswering.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = TFAutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = TFAutoModelForQuestionAnswering.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) """ if 'distilbert' in pretrained_model_name_or_path: return TFDistilBertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'bert' in pretrained_model_name_or_path: return TFBertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'xlnet' in pretrained_model_name_or_path: return TFXLNetForQuestionAnsweringSimple.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'xlm' in pretrained_model_name_or_path: return TFXLMForQuestionAnsweringSimple.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) raise ValueError("Unrecognized model identifier in {}. Should contains one of " "'bert', 'xlnet', 'xlm'".format(pretrained_model_name_or_path))
36,704
70.970588
472
py
DeeBERT
DeeBERT-master/transformers/configuration_bert.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BERT model configuration """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import sys from io import open from .configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json", 'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json", 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json", 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json", 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json", 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json", 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json", 'bert-base-german-dbmdz-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-config.json", 'bert-base-german-dbmdz-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-config.json", } class BertConfig(PretrainedConfig): r""" :class:`~transformers.BertConfig` is the configuration class to store the configuration of a `BertModel`. Arguments: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps: The epsilon used by LayerNorm. """ pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__(self, vocab_size_or_config_json_file=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, **kwargs): super(BertConfig, self).__init__(**kwargs) if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 and isinstance(vocab_size_or_config_json_file, unicode)): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps else: raise ValueError("First argument must be either a vocabulary size (int)" " or the path to a pretrained model config file (str)")
6,684
56.62931
181
py
DeeBERT
DeeBERT-master/transformers/modeling_utils.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import copy import json import logging import os from io import open import six import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from .configuration_utils import PretrainedConfig from .file_utils import cached_path, WEIGHTS_NAME, TF_WEIGHTS_NAME, TF2_WEIGHTS_NAME logger = logging.getLogger(__name__) try: from torch.nn import Identity except ImportError: # Older PyTorch compatibility class Identity(nn.Module): r"""A placeholder identity operator that is argument-insensitive. """ def __init__(self, *args, **kwargs): super(Identity, self).__init__() def forward(self, input): return input class PreTrainedModel(nn.Module): r""" Base class for all models. :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads. Class attributes (overridden by derived classes): - ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture. - ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values. - ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments: - ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`, - ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`, - ``path``: a path (string) to the TensorFlow checkpoint. - ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. """ config_class = None pretrained_model_archive_map = {} load_tf_weights = lambda model, config, path: None base_model_prefix = "" def __init__(self, config, *inputs, **kwargs): super(PreTrainedModel, self).__init__() if not isinstance(config, PretrainedConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. " "To create a model from a pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ )) # Save config in model self.config = config @property def base_model(self): return getattr(self, self.base_model_prefix, self) def get_input_embeddings(self): """ Get model's input embeddings """ base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: return base_model.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value): """ Set model's input embeddings """ base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: base_model.set_input_embeddings(value) else: raise NotImplementedError def get_output_embeddings(self): """ Get model's output embeddings Return None if the model doesn't have output embeddings """ return None # Overwrite for models with output embeddings def tie_weights(self): """ Make sure we are sharing the input and output embeddings. Export to TorchScript can't handle parameter sharing so we are cloning them instead. """ output_embeddings = self.get_output_embeddings() if output_embeddings is not None: self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) def _tie_or_clone_weights(self, output_embeddings, input_embeddings): """ Tie or clone module weights depending of weither we are using TorchScript or not """ if self.config.torchscript: output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) else: output_embeddings.weight = input_embeddings.weight if hasattr(output_embeddings, 'bias') and output_embeddings.bias is not None: output_embeddings.bias.data = torch.nn.functional.pad( output_embeddings.bias.data, (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]), 'constant', 0 ) if hasattr(output_embeddings, 'out_features') and hasattr(input_embeddings, 'num_embeddings'): output_embeddings.out_features = input_embeddings.num_embeddings def resize_token_embeddings(self, new_num_tokens=None): """ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. Arguments: new_num_tokens: (`optional`) int: New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model. Return: ``torch.nn.Embeddings`` Pointer to the input tokens Embeddings Module of the model """ base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed model_embeds = base_model._resize_token_embeddings(new_num_tokens) if new_num_tokens is None: return model_embeds # Update base model and current model config self.config.vocab_size = new_num_tokens base_model.vocab_size = new_num_tokens # Tie weights again if needed if hasattr(self, 'tie_weights'): self.tie_weights() return model_embeds def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self.get_input_embeddings() new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) self.set_input_embeddings(new_embeddings) return self.get_input_embeddings() def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None): """ Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly initialized vectors at the end Reducing the size will remove vectors from the end Args: new_num_tokens: (`optional`) int New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end Reducing the size will remove vectors from the end If not provided or None: return the provided token Embedding Module. Return: ``torch.nn.Embeddings`` Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None """ if new_num_tokens is None: return old_embeddings old_num_tokens, old_embedding_dim = old_embeddings.weight.size() if old_num_tokens == new_num_tokens: return old_embeddings # Build new embeddings new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim) new_embeddings.to(old_embeddings.weight.device) # initialize all new embeddings (in particular added tokens) self._init_weights(new_embeddings) # Copy word embeddings from the previous weights num_tokens_to_copy = min(old_num_tokens, new_num_tokens) new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :] return new_embeddings def init_weights(self): """ Initialize and prunes weights if needed. """ # Initialize weights self.apply(self._init_weights) # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) # Tie weights if needed self.tie_weights() def prune_heads(self, heads_to_prune): """ Prunes heads of the base model. Arguments: heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`). E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads for layer, heads in heads_to_prune.items(): union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON self.base_model._prune_heads(heads_to_prune) def save_pretrained(self, save_directory): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method. """ assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved" # Only save the model itself if we are using distributed training model_to_save = self.module if hasattr(self, 'module') else self # Save configuration file model_to_save.config.save_pretrained(save_directory) # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, WEIGHTS_NAME) torch.save(model_to_save.state_dict(), output_model_file) logger.info("Model weights saved in {}".format(output_model_file)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r"""Instantiate a pretrained pytorch model from a pre-trained model configuration. The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with ``model.train()`` The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. - None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``) model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. Examples:: model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json') model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop('config', None) state_dict = kwargs.pop('state_dict', None) cache_dir = kwargs.pop('cache_dir', None) from_tf = kwargs.pop('from_tf', False) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) output_loading_info = kwargs.pop('output_loading_info', False) # Load config if config is None: config, model_kwargs = cls.config_class.from_pretrained( pretrained_model_name_or_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, proxies=proxies, **kwargs ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if pretrained_model_name_or_path in cls.pretrained_model_archive_map: archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path] elif os.path.isdir(pretrained_model_name_or_path): if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")): # Load from a TF 1.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index") elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) else: raise EnvironmentError("Error no file named {} found in directory {} or `from_tf` set to False".format( [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"], pretrained_model_name_or_path)) elif os.path.isfile(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path else: assert from_tf, "Error finding file {}, no file or TF 1.X checkpoint found".format(pretrained_model_name_or_path) archive_file = pretrained_model_name_or_path + ".index" # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies) except EnvironmentError: if pretrained_model_name_or_path in cls.pretrained_model_archive_map: msg = "Couldn't reach server at '{}' to download pretrained weights.".format( archive_file) else: msg = "Model name '{}' was not found in model name list ({}). " \ "We assumed '{}' was a path or url to model weight files named one of {} but " \ "couldn't find any such file at this path or url.".format( pretrained_model_name_or_path, ', '.join(cls.pretrained_model_archive_map.keys()), archive_file, [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME]) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info("loading weights file {}".format(archive_file)) else: logger.info("loading weights file {} from cache at {}".format( archive_file, resolved_archive_file)) else: resolved_archive_file = None # Instantiate model. model = cls(config, *model_args, **model_kwargs) if state_dict is None and not from_tf: state_dict = torch.load(resolved_archive_file, map_location='cpu') missing_keys = [] unexpected_keys = [] error_msgs = [] if from_tf: if resolved_archive_file.endswith('.index'): # Load from a TensorFlow 1.X checkpoint - provided by original authors model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' else: # Load from our TensorFlow 2.0 checkpoints try: from transformers import load_tf2_checkpoint_in_pytorch_model model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True) except ImportError as e: logger.error("Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.") raise e else: # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants # so we need to apply the function recursively. def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') # Make sure we are able to load base models as well as derived models (with heads) start_prefix = '' model_to_load = model if not hasattr(model, cls.base_model_prefix) and any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()): start_prefix = cls.base_model_prefix + '.' if hasattr(model, cls.base_model_prefix) and not any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()): model_to_load = getattr(model, cls.base_model_prefix) load(model_to_load, prefix=start_prefix) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) if hasattr(model, 'tie_weights'): model.tie_weights() # make sure word embedding weights are still tied # Set model in evaluation mode to desactivate DropOut modules by default model.eval() if output_loading_info: loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs} return model, loading_info return model class Conv1D(nn.Module): def __init__(self, nf, nx): """ Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2) Basically works like a Linear layer but the weights are transposed """ super(Conv1D, self).__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = nn.Parameter(w) self.bias = nn.Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class PoolerStartLogits(nn.Module): """ Compute SQuAD start_logits from sequence hidden states. """ def __init__(self, config): super(PoolerStartLogits, self).__init__() self.dense = nn.Linear(config.hidden_size, 1) def forward(self, hidden_states, p_mask=None): """ Args: **p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)` invalid position mask such as query and special symbols (PAD, SEP, CLS) 1.0 means token should be masked. """ x = self.dense(hidden_states).squeeze(-1) if p_mask is not None: if next(self.parameters()).dtype == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x class PoolerEndLogits(nn.Module): """ Compute SQuAD end_logits from sequence hidden states and start token hidden state. """ def __init__(self, config): super(PoolerEndLogits, self).__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense_1 = nn.Linear(config.hidden_size, 1) def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None): """ Args: One of ``start_states``, ``start_positions`` should be not None. If both are set, ``start_positions`` overrides ``start_states``. **start_states**: ``torch.LongTensor`` of shape identical to hidden_states hidden states of the first tokens for the labeled span. **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)`` position of the first token for the labeled span: **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)`` Mask of invalid position such as query and special symbols (PAD, SEP, CLS) 1.0 means token should be masked. """ assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None" if start_positions is not None: slen, hsz = hidden_states.shape[-2:] start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz) start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz) x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) x = self.activation(x) x = self.LayerNorm(x) x = self.dense_1(x).squeeze(-1) if p_mask is not None: if next(self.parameters()).dtype == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x class PoolerAnswerClass(nn.Module): """ Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """ def __init__(self, config): super(PoolerAnswerClass, self).__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None): """ Args: One of ``start_states``, ``start_positions`` should be not None. If both are set, ``start_positions`` overrides ``start_states``. **start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``. hidden states of the first tokens for the labeled span. **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)`` position of the first token for the labeled span. **cls_index**: torch.LongTensor of shape ``(batch_size,)`` position of the CLS token. If None, take the last token. note(Original repo): no dependency on end_feature so that we can obtain one single `cls_logits` for each sample """ hsz = hidden_states.shape[-1] assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None" if start_positions is not None: start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz) if cls_index is not None: cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz) else: cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz) x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) x = self.activation(x) x = self.dense_1(x).squeeze(-1) return x class SQuADHead(nn.Module): r""" A SQuAD head inspired by XLNet. Parameters: config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model. Inputs: **hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)`` hidden states of sequence tokens **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)`` position of the first token for the labeled span. **end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)`` position of the last token for the labeled span. **cls_index**: torch.LongTensor of shape ``(batch_size,)`` position of the CLS token. If None, take the last token. **is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)`` Whether the question has a possible answer in the paragraph or not. **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)`` Mask of invalid position such as query and special symbols (PAD, SEP, CLS) 1.0 means token should be masked. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. **start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)`` Log probabilities for the top config.start_n_top start token possibilities (beam-search). **start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)`` Indices for the top config.start_n_top start token possibilities (beam-search). **end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)`` Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). **end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)`` Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). **cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.FloatTensor`` of shape ``(batch_size,)`` Log probabilities for the ``is_impossible`` label of the answers. """ def __init__(self, config): super(SQuADHead, self).__init__() self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.start_logits = PoolerStartLogits(config) self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) def forward(self, hidden_states, start_positions=None, end_positions=None, cls_index=None, is_impossible=None, p_mask=None): outputs = () start_logits = self.start_logits(hidden_states, p_mask=p_mask) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss total_loss += cls_loss * 0.5 outputs = (total_loss,) + outputs else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hidden_states.size() start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen) start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs # return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits # or (if labels are provided) (total_loss,) return outputs class SequenceSummary(nn.Module): r""" Compute a single vector summary of a sequence hidden states according to various possibilities: Args of the config class: summary_type: - 'last' => [default] take the last token hidden state (like XLNet) - 'first' => take the first token hidden state (like Bert) - 'mean' => take the mean of all tokens hidden states - 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2) - 'attn' => Not implemented now, use multi-head attention summary_use_proj: Add a projection after the vector extraction summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False. summary_activation: 'tanh' => add a tanh activation to the output, Other => no activation. Default summary_first_dropout: Add a dropout before the projection and activation summary_last_dropout: Add a dropout after the projection and activation """ def __init__(self, config): super(SequenceSummary, self).__init__() self.summary_type = config.summary_type if hasattr(config, 'summary_use_proj') else 'last' if self.summary_type == 'attn': # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.summary = Identity() if hasattr(config, 'summary_use_proj') and config.summary_use_proj: if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = nn.Linear(config.hidden_size, num_classes) self.activation = Identity() if hasattr(config, 'summary_activation') and config.summary_activation == 'tanh': self.activation = nn.Tanh() self.first_dropout = Identity() if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(config.summary_first_dropout) self.last_dropout = Identity() if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward(self, hidden_states, cls_index=None): """ hidden_states: float Tensor in shape [bsz, ..., seq_len, hidden_size], the hidden-states of the last layer. cls_index: [optional] position of the classification token if summary_type == 'cls_index', shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states. if summary_type == 'cls_index' and cls_index is None: we take the last token of the sequence as classification token """ if self.summary_type == 'last': output = hidden_states[:, -1] elif self.summary_type == 'first': output = hidden_states[:, 0] elif self.summary_type == 'mean': output = hidden_states.mean(dim=1) elif self.summary_type == 'cls_index': if cls_index is None: cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2]-1, dtype=torch.long) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) elif self.summary_type == 'attn': raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output def prune_linear_layer(layer, index, dim=0): """ Prune a linear layer (a model parameters) to keep only entries in index. Return the pruned layer as a new layer with requires_grad=True. Used to remove heads. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer def prune_conv1d_layer(layer, index, dim=1): """ Prune a Conv1D layer (a model parameters) to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed. Return the pruned layer as a new layer with requires_grad=True. Used to remove heads. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if dim == 0: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer def prune_layer(layer, index, dim=None): """ Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index. Return the pruned layer as a new layer with requires_grad=True. Used to remove heads. """ if isinstance(layer, nn.Linear): return prune_linear_layer(layer, index, dim=0 if dim is None else dim) elif isinstance(layer, Conv1D): return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim) else: raise ValueError("Can't prune layer of class {}".format(layer.__class__))
44,706
51.596471
472
py
DeeBERT
DeeBERT-master/transformers/modeling_highway_bert.py
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from .modeling_bert import BertLayer, BertLayerNorm, BertPreTrainedModel def entropy(x): # x: torch.Tensor, logits BEFORE softmax x = torch.softmax(x, dim=-1) # softmax normalized prob distribution return -torch.sum(x*torch.log(x), dim=-1) # entropy calculation on probs: -\sum(p \ln(p)) class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super(BertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertEncoder(nn.Module): def __init__(self, config): super(BertEncoder, self).__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)]) self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)] def set_early_exit_entropy(self, x): print(x) if (type(x) is float) or (type(x) is int): for i in range(len(self.early_exit_entropy)): self.early_exit_entropy[i] = x else: self.early_exit_entropy = x def init_highway_pooler(self, pooler): loaded_model = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name]) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None): all_hidden_states = () all_attentions = () all_highway_exits = () for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) current_outputs = (hidden_states,) if self.output_hidden_states: current_outputs = current_outputs + (all_hidden_states,) if self.output_attentions: current_outputs = current_outputs + (all_attentions,) highway_exit = self.highway[i](current_outputs) # logits, pooled_output if not self.training: highway_logits = highway_exit[0] highway_entropy = entropy(highway_logits) highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy all_highway_exits = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: # weight_func = lambda x: torch.exp(-3 * x) - 0.5**3 # weight_func = lambda x: 2 - torch.exp(x) # weighted_logits = \ # sum([weight_func(x[2]) * x[0] for x in all_highway_exits]) /\ # sum([weight_func(x[2]) for x in all_highway_exits]) # new_output = (weighted_logits,) + current_outputs[1:] + (all_highway_exits,) new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i+1) else: all_highway_exits = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) outputs = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() # Pooler weights also needs to be loaded, especially in Highway! def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertModel(BertPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the output of the last layer of the model. **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during Bert pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config): super(BertModel, self).__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def init_highway_pooler(self): self.encoder.init_highway_pooler(self.pooler) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None): """ Forward pass on the Model. The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`; an `encoder_hidden_states` is expected as an input to the forward pass. .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762 """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if attention_mask.dim() == 2: if self.config.is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class HighwayException(Exception): def __init__(self, message, exit_layer): self.message = message self.exit_layer = exit_layer # start from 1! class BertHighway(nn.Module): r"""A module to provide a shortcut from the output of one non-final BertLayer in BertEncoder to cross-entropy computation in BertForSequenceClassification """ def __init__(self, config): super(BertHighway, self).__init__() self.pooler = BertPooler(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_outputs): # Pooler pooler_input = encoder_outputs[0] pooler_output = self.pooler(pooler_input) # "return" pooler_output # BertModel bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bodel_output # Dropout and classification pooled_output = bmodel_output[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) return logits, pooled_output class BertForSequenceClassification(BertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForSequenceClassification.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ def __init__(self, config): super(BertForSequenceClassification, self).__init__(config) self.num_labels = config.num_labels self.num_layers = config.num_hidden_layers self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_layer=-1, train_highway=False): exit_layer = self.num_layers try: outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: outputs = e.message exit_layer = e.exit_layer logits = outputs[0] if not self.training: original_entropy = entropy(logits) highway_entropy = [] highway_logits_all = [] if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) # work with highway exits highway_losses = [] for highway_exit in outputs[-1]: highway_logits = highway_exit[0] if not self.training: highway_logits_all.append(highway_logits) highway_entropy.append(highway_exit[2]) if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) highway_losses.append(highway_loss) if train_highway: outputs = (sum(highway_losses[:-1]),) + outputs # exclude the final highway, of course else: outputs = (loss,) + outputs if not self.training: outputs = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: outputs = (outputs[0],) +\ (highway_logits_all[output_layer],) +\ outputs[2:] ## use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (entropies), (exit_layer)
22,285
48.414634
150
py
DeeBERT
DeeBERT-master/transformers/modeling_tf_openai.py
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 OpenAI GPT model.""" from __future__ import absolute_import, division, print_function, unicode_literals import collections import json import logging import math import os import sys from io import open import numpy as np import tensorflow as tf from .modeling_tf_utils import (TFPreTrainedModel, TFConv1D, TFSharedEmbeddings, TFSequenceSummary, shape_list, get_initializer) from .configuration_openai import OpenAIGPTConfig from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-tf_model.h5"} def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf def swish(x): return x * tf.math.sigmoid(x) ACT_FNS = {"gelu": tf.keras.layers.Activation(gelu), "relu": tf.keras.activations.relu, "swish": tf.keras.layers.Activation(swish)} class TFAttention(tf.keras.layers.Layer): def __init__(self, nx, n_ctx, config, scale=False, **kwargs): super(TFAttention, self).__init__(**kwargs) self.output_attentions = config.output_attentions n_state = nx # in Attention: n_state=768 (nx=n_embd) # [switch nx => n_state from Block to Attention to keep identical to TF implem] assert n_state % config.n_head == 0 self.n_ctx = n_ctx self.n_head = config.n_head self.split_size = n_state self.scale = scale self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name='c_attn') self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_proj') self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop) self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop) self.pruned_heads = set() def prune_heads(self, heads): pass @staticmethod def causal_attention_mask(nd, ns, dtype): """1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs. """ i = tf.range(nd)[:,None] j = tf.range(ns) m = i >= j - ns + nd return tf.cast(m, dtype) def _attn(self, inputs, training=False): q, k, v, attention_mask, head_mask = inputs # q, k, v have shape [batch, heads, sequence, features] w = tf.matmul(q, k, transpose_b=True) if self.scale: dk = tf.cast(tf.shape(k)[-1], tf.float32) # scale attention_scores w = w / tf.math.sqrt(dk) # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst. _, _, nd, ns = shape_list(w) b = self.causal_attention_mask(nd, ns, dtype=w.dtype) b = tf.reshape(b, [1, 1, nd, ns]) w = w * b - 1e4 * (1 - b) if attention_mask is not None: # Apply the attention mask w = w + attention_mask w = tf.nn.softmax(w, axis=-1) w = self.attn_dropout(w, training=training) # Mask heads if we want to if head_mask is not None: w = w * head_mask outputs = [tf.matmul(w, v)] if self.output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = tf.transpose(x, [0, 2, 1, 3]) x_shape = shape_list(x) new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]] return tf.reshape(x, new_x_shape) def split_heads(self, x): x_shape = shape_list(x) new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head] x = tf.reshape(x, new_x_shape) return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features) def call(self, inputs, training=False): x, attention_mask, head_mask = inputs x = self.c_attn(x) query, key, value = tf.split(x, 3, axis=2) query = self.split_heads(query) key = self.split_heads(key) value = self.split_heads(value) attn_outputs = self._attn([query, key, value, attention_mask, head_mask], training=training) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a, training=training) outputs = [a] + attn_outputs[1:] return outputs # a, (attentions) class TFMLP(tf.keras.layers.Layer): def __init__(self, n_state, config, **kwargs): super(TFMLP, self).__init__(**kwargs) nx = config.n_embd self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_fc') self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name='c_proj') self.act = gelu self.dropout = tf.keras.layers.Dropout(config.resid_pdrop) def call(self, x, training=False): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) h2 = self.dropout(h2, training=training) return h2 class TFBlock(tf.keras.layers.Layer): def __init__(self, n_ctx, config, scale=False, **kwargs): super(TFBlock, self).__init__(**kwargs) nx = config.n_embd self.attn = TFAttention(nx, n_ctx, config, scale, name='attn') self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_1') self.mlp = TFMLP(4 * nx, config, name='mlp') self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_2') def call(self, inputs, training=False): x, attention_mask, head_mask = inputs output_attn = self.attn([x, attention_mask, head_mask], training=training) a = output_attn[0] # output_attn: a, (attentions) n = self.ln_1(x + a) m = self.mlp(n, training=training) h = self.ln_2(n + m) outputs = [h] + output_attn[1:] return outputs # x, (attentions) class TFOpenAIGPTMainLayer(tf.keras.layers.Layer): def __init__(self, config, *inputs, **kwargs): super(TFOpenAIGPTMainLayer, self).__init__(config, *inputs, **kwargs) self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.num_hidden_layers = config.n_layer self.vocab_size = config.vocab_size self.n_embd = config.n_embd self.tokens_embed = TFSharedEmbeddings(config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name='tokens_embed') self.positions_embed = tf.keras.layers.Embedding(config.n_positions, config.n_embd, embeddings_initializer=get_initializer(config.initializer_range), name='positions_embed') self.drop = tf.keras.layers.Dropout(config.embd_pdrop) self.h = [TFBlock(config.n_ctx, config, scale=True, name='h_._{}'.format(i)) for i in range(config.n_layer)] def get_input_embeddings(self): return self.tokens_embed def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds assert len(inputs) <= 6, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) assert len(inputs) <= 6, "Too many inputs." else: input_ids = inputs if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if position_ids is None: position_ids = tf.range(input_shape[-1], dtype=tf.int32)[tf.newaxis, :] if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = tf.cast(attention_mask, tf.float32) attention_mask = (1.0 - attention_mask) * -10000.0 else: attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if not head_mask is None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: inputs_embeds = self.tokens_embed(input_ids, mode='embedding') position_embeds = self.positions_embed(position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.tokens_embed(token_type_ids, mode='embedding') else: token_type_embeds = 0 hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] all_attentions = [] all_hidden_states = () for i, block in enumerate(self.h): if self.output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block([hidden_states, attention_mask, head_mask[i]], training=training) hidden_states = outputs[0] if self.output_attentions: all_attentions.append(outputs[1]) hidden_states = tf.reshape(hidden_states, output_shape) # Add last hidden state if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) outputs = outputs + (all_attentions,) return outputs # last hidden state, (all hidden_states), (attentions) class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = OpenAIGPTConfig pretrained_model_archive_map = TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "transformer" OPENAI_GPT_START_DOCSTRING = r""" OpenAI GPT model was proposed in `Improving Language Understanding by Generative Pre-Training`_ by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. It's a causal (unidirectional) transformer pre-trained using language modeling on a large corpus will long range dependencies, the Toronto Book Corpus. This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. _`Improving Language Understanding by Generative Pre-Training`: https://openai.com/blog/language-unsupervised/ .. _`tf.keras.Model`: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model Note on the model inputs: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with input_ids only and nothing else: `model(inputs_ids) - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associaed to the input names given in the docstring: `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` Parameters: config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ OPENAI_GPT_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. GPT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`transformers.BPT2Tokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: A parallel sequence of tokens (can be used to indicate various portions of the inputs). The embeddings from these tokens will be summed with the respective token embeddings. Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices) **position_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare OpenAI GPT transformer model outputing raw hidden-states without any specific head on top.", OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING) class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the last layer of the model. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import OpenAIGPTTokenizer, TFOpenAIGPTModel tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = TFOpenAIGPTModel.from_pretrained('openai-gpt') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config, *inputs, **kwargs): super(TFOpenAIGPTModel, self).__init__(config, *inputs, **kwargs) self.transformer = TFOpenAIGPTMainLayer(config, name='transformer') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) return outputs @add_start_docstrings("""OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING) class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import OpenAIGPTTokenizer, TFOpenAIGPTLMHeadModel tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = TFOpenAIGPTLMHeadModel.from_pretrained('openai-gpt') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) logits = outputs[0] """ def __init__(self, config, *inputs, **kwargs): super(TFOpenAIGPTLMHeadModel, self).__init__(config, *inputs, **kwargs) self.transformer = TFOpenAIGPTMainLayer(config, name='transformer') def get_output_embeddings(self): return self.transformer.tokens_embed def call(self, inputs, **kwargs): transformer_outputs = self.transformer(inputs, **kwargs) hidden_states = transformer_outputs[0] lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear") outputs = (lm_logits,) + transformer_outputs[1:] return outputs # lm_logits, (all hidden_states), (attentions) @add_start_docstrings("""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """, OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING) class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel): r""" **mc_token_ids**: (`optional`, default to index of the last token of the input) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, num_choices)``: Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) - 1[``. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import OpenAIGPTTokenizer, TFOpenAIGPTDoubleHeadsModel tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt') # Add a [CLS] to the vocabulary (we should train it also!) # This option is currently not implemented in TF 2.0 raise NotImplementedError tokenizer.add_special_tokens({'cls_token': '[CLS]'}) model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] input_ids = tf.constant([tokenizer.encode(s) for s in choices])[None, :] # Batch size 1, 2 choices mc_token_ids = tf.constant([input_ids.size(-1), input_ids.size(-1)])[None, :] # Batch size 1 outputs = model(input_ids, mc_token_ids=mc_token_ids) lm_prediction_scores, mc_prediction_scores = outputs[:2] """ def __init__(self, config, *inputs, **kwargs): super(TFOpenAIGPTDoubleHeadsModel, self).__init__(config, *inputs, **kwargs) self.transformer = TFOpenAIGPTMainLayer(config, name='transformer') self.multiple_choice_head = TFSequenceSummary(config, initializer_range=config.initializer_range, name='multiple_choice_head') def get_output_embeddings(self): return self.transformer.tokens_embed def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, mc_token_ids=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds mc_token_ids = inputs[6] if len(inputs) > 6 else mc_token_ids assert len(inputs) <= 7, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) mc_token_ids = inputs.get('mc_token_ids', mc_token_ids) assert len(inputs) <= 7, "Too many inputs." else: input_ids = inputs if input_ids is not None: input_shapes = shape_list(input_ids) else: input_shapes = shape_list(inputs_embeds)[:-1] seq_length = input_shapes[-1] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_inputs = [flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, inputs_embeds] transformer_outputs = self.transformer(flat_inputs, training=training) hidden_states = transformer_outputs[0] hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:]) lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear") mc_logits = self.multiple_choice_head([hidden_states, mc_token_ids], training=training) mc_logits = tf.squeeze(mc_logits, axis=-1) outputs = (lm_logits, mc_logits) + transformer_outputs[1:] return outputs # lm logits, mc logits, (all hidden_states), (attentions)
30,118
49.535235
193
py
DeeBERT
DeeBERT-master/transformers/modeling_bert.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model. """ from __future__ import absolute_import, division, print_function, unicode_literals import logging import math import os import sys import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from .modeling_utils import PreTrainedModel, prune_linear_layer from .configuration_bert import BertConfig from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) BERT_PRETRAINED_MODEL_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin", 'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin", 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin", 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin", 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin", 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin", 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin", 'bert-base-german-dbmdz-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-pytorch_model.bin", 'bert-base-german-dbmdz-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-pytorch_model.bin", } def load_tf_weights_in_bert(model, config, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model. """ try: import re import numpy as np import tensorflow as tf except ImportError: logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m", "global_step"] for n in name): logger.info("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif l[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, l[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model def gelu(x): """ Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def gelu_new(x): """ Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT). Also see https://arxiv.org/abs/1606.08415 """ return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new} BertLayerNorm = torch.nn.LayerNorm class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super(BertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.output_attentions = config.output_attentions self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. if encoder_hidden_states is not None: mixed_key_layer = self.key(encoder_hidden_states) mixed_value_layer = self.value(encoder_hidden_states) attention_mask = encoder_attention_mask else: mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size) heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None): self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super(BertIntermediate, self).__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config): super(BertLayer, self).__init__() self.attention = BertAttention(config) self.is_decoder = config.is_decoder if self.is_decoder: self.crossattention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None): self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if self.is_decoder and encoder_hidden_states is not None: cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs return outputs class BertEncoder(nn.Module): def __init__(self, config): super(BertEncoder, self).__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None): all_hidden_states = () all_attentions = () for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) return outputs # last-layer hidden state, (all hidden states), (all attentions) class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super(BertOnlyNSPHead, self).__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config): super(BertPreTrainingHeads, self).__init__() self.predictions = BertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = BertConfig pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() BERT_START_DOCSTRING = r""" The BERT model was proposed in `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer pre-trained using a combination of masked language modeling objective and next sentence prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia. This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. .. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`: https://arxiv.org/abs/1810.04805 .. _`torch.nn.Module`: https://pytorch.org/docs/stable/nn.html#module Parameters: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows: (a) For sequence pairs: ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` (b) For single sequences: ``tokens: [CLS] the dog is hairy . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0`` Bert is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. **encoder_hidden_states**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``: Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. **encoder_attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. """ @add_start_docstrings("The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertModel(BertPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the output of the last layer of the model. **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during Bert pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config): super(BertModel, self).__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None): """ Forward pass on the Model. The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`; an `encoder_hidden_states` is expected as an input to the forward pass. .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762 """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if attention_mask.dim() == 2: if self.config.is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with two heads on top as done during the pre-training: a `masked language modeling` head and a `next sentence prediction (classification)` head. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForPreTraining(BertPreTrainedModel): r""" **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the masked language modeling loss. Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring) Indices should be in ``[0, 1]``. ``0`` indicates sequence B is a continuation of sequence A, ``1`` indicates sequence B is a random sequence. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``: Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)`` Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForPreTraining.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) prediction_scores, seq_relationship_scores = outputs[:2] """ def __init__(self, config): super(BertForPreTraining, self).__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None, next_sentence_label=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here if masked_lm_labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss outputs = (total_loss,) + outputs return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForMaskedLM(BertPreTrainedModel): r""" **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the masked language modeling loss. Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **masked_lm_loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Masked language modeling loss. **ltr_lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Next token prediction loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForMaskedLM.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids, masked_lm_labels=input_ids) loss, prediction_scores = outputs[:2] """ def __init__(self, config): super(BertForMaskedLM, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None, encoder_hidden_states=None, encoder_attention_mask=None, lm_labels=None, ): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here # Although this may seem awkward, BertForMaskedLM supports two scenarios: # 1. If a tensor that contains the indices of masked labels is provided, # the cross-entropy is the MLM cross-entropy that measures the likelihood # of predictions for masked words. # 2. If `lm_labels` is provided we are in a causal scenario where we # try to predict the next token for each input in the decoder. if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) # -1 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) outputs = (masked_lm_loss,) + outputs if lm_labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one prediction_scores = prediction_scores[:, :-1, :].contiguous() lm_labels = lm_labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss(ignore_index=-1) ltr_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), lm_labels.view(-1)) outputs = (ltr_lm_loss,) + outputs return outputs # (masked_lm_loss), (ltr_lm_loss), prediction_scores, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForNextSentencePrediction(BertPreTrainedModel): r""" **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring) Indices should be in ``[0, 1]``. ``0`` indicates sequence B is a continuation of sequence A, ``1`` indicates sequence B is a random sequence. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Next sequence prediction (classification) loss. **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)`` Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) seq_relationship_scores = outputs[0] """ def __init__(self, config): super(BertForNextSentencePrediction, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, next_sentence_label=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) pooled_output = outputs[1] seq_relationship_score = self.cls(pooled_output) outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here if next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) outputs = (next_sentence_loss,) + outputs return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions) @add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForSequenceClassification(BertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForSequenceClassification.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ def __init__(self, config): super(BertForSequenceClassification, self).__init__(config) self.num_labels = config.num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), logits, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForMultipleChoice(BertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above) Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss. **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForMultipleChoice.from_pretrained('bert-base-uncased') choices = ["Hello, my dog is cute", "Hello, my cat is amazing"] input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices labels = torch.tensor(1).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, classification_scores = outputs[:2] """ def __init__(self, config): super(BertForMultipleChoice, self).__init__(config) self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None): num_choices = input_ids.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) outputs = (loss,) + outputs return outputs # (loss), reshaped_logits, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForTokenClassification(BertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss. **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` Classification scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForTokenClassification.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, scores = outputs[:2] """ def __init__(self, config): super(BertForTokenClassification, self).__init__(config) self.num_labels = config.num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), scores, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) class BertForQuestionAnswering(BertPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]" input_ids = tokenizer.encode(input_text) token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))] start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids])) all_tokens = tokenizer.convert_ids_to_tokens(input_ids) print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])) # a nice puppet """ def __init__(self, config): super(BertForQuestionAnswering, self).__init__(config) self.num_labels = config.num_labels self.bert = BertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) outputs = (start_logits, end_logits,) + outputs[2:] if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 outputs = (total_loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
67,826
52.239403
187
py