repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/losses.py
|
"""
Helpers for various likelihood-based losses. These are ported from the original
Ho et al. diffusion models codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
"""
import numpy as np
import torch as th
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0 + logvar2 - logvar1 + th.exp(logvar1 - logvar2) + ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
| 2,502 | 32.824324 | 109 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/logger.py
|
"""
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
import os
import sys
import os.path as osp
import json
import time
import datetime
import tempfile
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(
"| %s%s | %s%s |"
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
)
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("OPENAI_LOGDIR")
if dir is None:
dir = osp.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
| 13,964 | 27.269231 | 132 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/nn.py
|
"""
Various utilities for neural networks.
"""
import math
import torch as th
import torch.nn as nn
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
class SiLU(nn.Module):
def forward(self, x):
return x * th.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(th.autograd.Function):
@staticmethod
@th.cuda.amp.custom_fwd
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_length = length
ctx.save_for_backward(*args)
with th.no_grad():
output_tensors = ctx.run_function(*args[:length])
return output_tensors
@staticmethod
@th.cuda.amp.custom_bwd
def backward(ctx, *output_grads):
args = list(ctx.saved_tensors)
# Filter for inputs that require grad. If none, exit early.
input_indices = [i for (i, x) in enumerate(args) if x.requires_grad]
if not input_indices:
return (None, None) + tuple(None for _ in args)
with th.enable_grad():
for i in input_indices:
if i < ctx.input_length:
# Not sure why the OAI code does this little
# dance. It might not be necessary.
args[i] = args[i].detach().requires_grad_()
args[i] = args[i].view_as(args[i])
output_tensors = ctx.run_function(*args[:ctx.input_length])
if isinstance(output_tensors, th.Tensor):
output_tensors = [output_tensors]
# Filter for outputs that require grad. If none, exit early.
out_and_grads = [(o, g) for (o, g) in zip(output_tensors, output_grads) if o.requires_grad]
if not out_and_grads:
return (None, None) + tuple(None for _ in args)
# Compute gradients on the filtered tensors.
computed_grads = th.autograd.grad(
[o for (o, g) in out_and_grads],
[args[i] for i in input_indices],
[g for (o, g) in out_and_grads]
)
# Reassemble the complete gradient tuple.
input_grads = [None for _ in args]
for (i, g) in zip(input_indices, computed_grads):
input_grads[i] = g
return (None, None) + tuple(input_grads)
| 5,835 | 29.554974 | 99 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/fp16_util.py
|
"""
Helpers to train with 16-bit precision.
"""
import numpy as np
import torch as th
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from . import logger
INITIAL_LOG_LOSS_SCALE = 20.0
def convert_module_to_f16(ll):
"""
Convert primitive modules to float16.
"""
if isinstance(ll, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
ll.weight.data = ll.weight.data.half()
if ll.bias is not None:
ll.bias.data = ll.bias.data.half()
def convert_module_to_f32(ll):
"""
Convert primitive modules to float32, undoing convert_module_to_f16().
"""
if isinstance(ll, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
ll.weight.data = ll.weight.data.float()
if ll.bias is not None:
ll.bias.data = ll.bias.data.float()
def make_master_params(param_groups_and_shapes):
"""
Copy model parameters into a (differently-shaped) list of full-precision
parameters.
"""
master_params = []
for param_group, shape in param_groups_and_shapes:
master_param = nn.Parameter(
_flatten_dense_tensors(
[param.detach().float() for (_, param) in param_group]
).view(shape)
)
master_param.requires_grad = True
master_params.append(master_param)
return master_params
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
"""
Copy the gradients from the model parameters into the master parameters
from make_master_params().
"""
for master_param, (param_group, shape) in zip(
master_params, param_groups_and_shapes
):
master_param.grad = _flatten_dense_tensors(
[param_grad_or_zeros(param) for (_, param) in param_group]
).view(shape)
def master_params_to_model_params(param_groups_and_shapes, master_params):
"""
Copy the master parameter data back into the model parameters.
"""
# Without copying to a list, if a generator is passed, this will
# silently not copy any parameters.
for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes):
for (_, param), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
param.detach().copy_(unflat_master_param)
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim <= 1],
(-1),
)
matrix_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim > 1],
(1, -1),
)
return [scalar_vector_named_params, matrix_named_params]
def master_params_to_state_dict(
model, param_groups_and_shapes, master_params, use_fp16
):
if use_fp16:
state_dict = model.state_dict()
for master_param, (param_group, _) in zip(
master_params, param_groups_and_shapes
):
for (name, _), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
assert name in state_dict
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for i, (name, _value) in enumerate(model.named_parameters()):
assert name in state_dict
state_dict[name] = master_params[i]
return state_dict
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [
(name, state_dict[name]) for name, _ in model.named_parameters()
]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for name, _ in model.named_parameters()]
return master_params
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
def zero_grad(model_params):
for param in model_params:
# Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
def param_grad_or_zeros(param):
if param.grad is not None:
return param.grad.data.detach()
else:
return th.zeros_like(param)
class MixedPrecisionTrainer:
def __init__(
self,
*,
model,
use_fp16=False,
fp16_scale_growth=1e-3,
initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE,
):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(
self.model.named_parameters()
)
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = 2 ** self.lg_loss_scale
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean("lg_loss_scale", self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
grad_norm, param_norm = self._compute_norms(grad_scale=2 ** self.lg_loss_scale)
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
zero_master_grads(self.master_params)
return False
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
grad_norm, param_norm = self._compute_norms()
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2
if p.grad is not None:
grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2
return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm)
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(
self.model, self.param_groups_and_shapes, master_params, self.use_fp16
)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
def check_overflow(value):
return (value == float("inf")) or (value == -float("inf")) or (value != value)
| 7,955 | 32.56962 | 114 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/unet.py
|
from abc import abstractmethod
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from .fp16_util import convert_module_to_f16, convert_module_to_f32
from .nn import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from transformers import PreTrainedModel, PretrainedConfig
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(
th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
use_checkpoint=False,
use_new_attention_order=False,
):
super().__init__()
self.channels = channels
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.use_checkpoint = use_checkpoint
self.norm = normalization(channels)
self.qkv = conv_nd(1, channels, channels * 3, 1)
if use_new_attention_order:
# split qkv before split heads
self.attention = QKVAttention(self.num_heads)
else:
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
def forward(self, x):
return checkpoint(self._forward, (x,), self.parameters(), self.use_checkpoint)
def _forward(self, x):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
h = self.attention(qkv)
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial ** 2) * c
model.total_ops += th.DoubleTensor([matmul_ops])
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts",
(q * scale).view(bs * self.n_heads, ch, length),
(k * scale).view(bs * self.n_heads, ch, length),
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
ch = input_ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=int(model_channels * mult),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(model_channels * mult)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
if level and i == num_res_blocks:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
def forward(self, x, timesteps, y=None):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
hs.append(h)
h = self.middle_block(h, emb)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb)
h = h.type(x.dtype)
return self.out(h)
class SuperResModel(UNetModel):
"""
A UNetModel that performs super-resolution.
Expects an extra kwarg `low_res` to condition on a low-resolution image.
"""
def __init__(self, image_size, in_channels, *args, **kwargs):
super().__init__(image_size, in_channels * 2, *args, **kwargs)
def forward(self, x, timesteps, low_res=None, **kwargs):
_, _, new_height, new_width = x.shape
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
x = th.cat([x, upsampled], dim=1)
return super().forward(x, timesteps, **kwargs)
class EncoderUNetModel(nn.Module):
"""
The half UNet model with attention and timestep embedding.
For usage, see UNet.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
pool="adaptive",
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.pool = pool
if pool == "adaptive":
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.AdaptiveAvgPool2d((1, 1)),
zero_module(conv_nd(dims, ch, out_channels, 1)),
nn.Flatten(),
)
elif pool == "attention":
assert num_head_channels != -1
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
AttentionPool2d(
(image_size // ds), ch, num_head_channels, out_channels
),
)
elif pool == "spatial":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
nn.ReLU(),
nn.Linear(2048, self.out_channels),
)
elif pool == "spatial_v2":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
normalization(2048),
nn.SiLU(),
nn.Linear(2048, self.out_channels),
)
else:
raise NotImplementedError(f"Unexpected {pool} pooling")
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
def forward(self, x, timesteps):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:return: an [N x K] Tensor of outputs.
"""
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
results = []
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = self.middle_block(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = th.cat(results, axis=-1)
return self.out(h)
else:
h = h.type(x.dtype)
return self.out(h)
class UNetConfig(PretrainedConfig):
def __init__(
self,
image_size=512,
in_channels=3,
model_channels=256,
out_channels=6,
num_res_blocks=2,
attention_resolutions=[16, 32, 64],
dropout=0.0,
channel_mult=(0.5, 1, 1, 2, 2, 4, 4),
num_classes=None,
use_checkpoint=False,
use_fp16=True,
num_heads=4,
num_head_channels=64,
num_heads_upsample=-1,
use_scale_shift_norm=True,
resblock_updown=True,
use_new_attention_order=False,
**kwargs
):
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.use_fp16 = use_fp16
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
self.use_scale_shift_norm = use_scale_shift_norm
self.resblock_updown = resblock_updown
self.use_new_attention_order = use_new_attention_order
super().__init__(**kwargs)
class HFUNetModel(PreTrainedModel):
config_class = UNetConfig
def __init__(self, config):
super().__init__(config)
self.model = UNetModel(
image_size=config.image_size,
in_channels=config.in_channels,
model_channels=config.model_channels,
out_channels=config.out_channels,
num_res_blocks=config.num_res_blocks,
attention_resolutions=config.attention_resolutions,
dropout=config.dropout,
channel_mult=config.channel_mult,
num_classes=config.num_classes,
use_checkpoint=config.use_checkpoint,
use_fp16=config.use_fp16,
num_heads=config.num_heads,
num_head_channels=config.num_head_channels,
num_heads_upsample=config.num_heads_upsample,
use_scale_shift_norm=config.use_scale_shift_norm,
resblock_updown=config.resblock_updown,
use_new_attention_order=config.use_new_attention_order,
)
def forward(self, x, timesteps, y=None):
return self.model.forward(x, timesteps, y)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.model.input_blocks.apply(convert_module_to_f16)
self.model.middle_block.apply(convert_module_to_f16)
self.model.output_blocks.apply(convert_module_to_f16)
| 34,109 | 33.94877 | 124 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/script_util.py
|
import argparse
import inspect
from . import gaussian_diffusion as gd
from .respace import SpacedDiffusion, space_timesteps
from .unet import SuperResModel, EncoderUNetModel
NUM_CLASSES = 1000
def diffusion_defaults():
"""
Defaults for image and classifier training.
"""
return dict(
learn_sigma=False,
diffusion_steps=1000,
noise_schedule="linear",
timestep_respacing="",
use_kl=False,
predict_xstart=False,
rescale_timesteps=False,
rescale_learned_sigmas=False,
)
def classifier_defaults():
"""
Defaults for classifier models.
"""
return dict(
image_size=64,
classifier_use_fp16=False,
classifier_width=128,
classifier_depth=2,
classifier_attention_resolutions="32,16,8", # 16
classifier_use_scale_shift_norm=True, # False
classifier_resblock_updown=True, # False
classifier_pool="attention",
)
def model_and_diffusion_defaults():
"""
Defaults for image training.
"""
res = dict(
image_size=64,
num_channels=128,
num_res_blocks=2,
num_heads=4,
num_heads_upsample=-1,
num_head_channels=-1,
attention_resolutions="16,8",
channel_mult="",
dropout=0.0,
class_cond=False,
use_checkpoint=False,
use_scale_shift_norm=True,
resblock_updown=False,
use_fp16=False,
use_new_attention_order=False,
)
res.update(diffusion_defaults())
return res
def classifier_and_diffusion_defaults():
res = classifier_defaults()
res.update(diffusion_defaults())
return res
def create_model_and_diffusion(
image_size,
class_cond,
learn_sigma,
num_channels,
num_res_blocks,
channel_mult,
num_heads,
num_head_channels,
num_heads_upsample,
attention_resolutions,
dropout,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
use_checkpoint,
use_scale_shift_norm,
resblock_updown,
use_fp16,
use_new_attention_order,
):
model = create_model(
image_size,
num_channels,
num_res_blocks,
channel_mult=channel_mult,
learn_sigma=learn_sigma,
class_cond=class_cond,
use_checkpoint=use_checkpoint,
attention_resolutions=attention_resolutions,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
dropout=dropout,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
use_new_attention_order=use_new_attention_order,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return model, diffusion
def create_model(
image_size,
num_channels,
num_res_blocks,
channel_mult="",
learn_sigma=False,
class_cond=False,
use_checkpoint=False,
attention_resolutions="16",
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
dropout=0,
resblock_updown=False,
use_fp16=False,
use_new_attention_order=False,
):
if channel_mult == "":
if image_size == 512:
channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
elif image_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif image_size == 128:
channel_mult = (1, 1, 2, 3, 4)
elif image_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported image size: {image_size}")
else:
channel_mult = tuple(int(ch_mult) for ch_mult in channel_mult.split(","))
attention_ds = []
for res in attention_resolutions.split(","):
attention_ds.append(image_size // int(res))
# config = UNetConfig()
# return HFUNetModel(config=config)
return None
# return UNetModel(
# image_size=image_size,
# in_channels=3,
# model_channels=num_channels,
# out_channels=(3 if not learn_sigma else 6),
# num_res_blocks=num_res_blocks,
# attention_resolutions=tuple(attention_ds),
# dropout=dropout,
# channel_mult=channel_mult,
# num_classes=(NUM_CLASSES if class_cond else None),
# use_checkpoint=use_checkpoint,
# use_fp16=use_fp16,
# num_heads=num_heads,
# num_head_channels=num_head_channels,
# num_heads_upsample=num_heads_upsample,
# use_scale_shift_norm=use_scale_shift_norm,
# resblock_updown=resblock_updown,
# use_new_attention_order=use_new_attention_order,
# )
def create_classifier_and_diffusion(
image_size,
classifier_use_fp16,
classifier_width,
classifier_depth,
classifier_attention_resolutions,
classifier_use_scale_shift_norm,
classifier_resblock_updown,
classifier_pool,
learn_sigma,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
):
classifier = create_classifier(
image_size,
classifier_use_fp16,
classifier_width,
classifier_depth,
classifier_attention_resolutions,
classifier_use_scale_shift_norm,
classifier_resblock_updown,
classifier_pool,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return classifier, diffusion
def create_classifier(
image_size,
classifier_use_fp16,
classifier_width,
classifier_depth,
classifier_attention_resolutions,
classifier_use_scale_shift_norm,
classifier_resblock_updown,
classifier_pool,
):
if image_size == 512:
channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
elif image_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif image_size == 128:
channel_mult = (1, 1, 2, 3, 4)
elif image_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported image size: {image_size}")
attention_ds = []
for res in classifier_attention_resolutions.split(","):
attention_ds.append(image_size // int(res))
return EncoderUNetModel(
image_size=image_size,
in_channels=3,
model_channels=classifier_width,
out_channels=1000,
num_res_blocks=classifier_depth,
attention_resolutions=tuple(attention_ds),
channel_mult=channel_mult,
use_fp16=classifier_use_fp16,
num_head_channels=64,
use_scale_shift_norm=classifier_use_scale_shift_norm,
resblock_updown=classifier_resblock_updown,
pool=classifier_pool,
)
def sr_model_and_diffusion_defaults():
res = model_and_diffusion_defaults()
res["large_size"] = 256
res["small_size"] = 64
arg_names = inspect.getfullargspec(sr_create_model_and_diffusion)[0]
for k in res.copy().keys():
if k not in arg_names:
del res[k]
return res
def sr_create_model_and_diffusion(
large_size,
small_size,
class_cond,
learn_sigma,
num_channels,
num_res_blocks,
num_heads,
num_head_channels,
num_heads_upsample,
attention_resolutions,
dropout,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
use_checkpoint,
use_scale_shift_norm,
resblock_updown,
use_fp16,
):
model = sr_create_model(
large_size,
small_size,
num_channels,
num_res_blocks,
learn_sigma=learn_sigma,
class_cond=class_cond,
use_checkpoint=use_checkpoint,
attention_resolutions=attention_resolutions,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
dropout=dropout,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return model, diffusion
def sr_create_model(
large_size,
small_size,
num_channels,
num_res_blocks,
learn_sigma,
class_cond,
use_checkpoint,
attention_resolutions,
num_heads,
num_head_channels,
num_heads_upsample,
use_scale_shift_norm,
dropout,
resblock_updown,
use_fp16,
):
_ = small_size # hack to prevent unused variable
if large_size == 512:
channel_mult = (1, 1, 2, 2, 4, 4)
elif large_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif large_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported large size: {large_size}")
attention_ds = []
for res in attention_resolutions.split(","):
attention_ds.append(large_size // int(res))
return SuperResModel(
image_size=large_size,
in_channels=3,
model_channels=num_channels,
out_channels=(3 if not learn_sigma else 6),
num_res_blocks=num_res_blocks,
attention_resolutions=tuple(attention_ds),
dropout=dropout,
channel_mult=channel_mult,
num_classes=(NUM_CLASSES if class_cond else None),
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
)
def create_gaussian_diffusion(
*,
steps=1000,
learn_sigma=False,
sigma_small=False,
noise_schedule="linear",
use_kl=False,
predict_xstart=False,
rescale_timesteps=False,
rescale_learned_sigmas=False,
timestep_respacing="",
):
betas = gd.get_named_beta_schedule(noise_schedule, steps)
if use_kl:
loss_type = gd.LossType.RESCALED_KL
elif rescale_learned_sigmas:
loss_type = gd.LossType.RESCALED_MSE
else:
loss_type = gd.LossType.MSE
if not timestep_respacing:
timestep_respacing = [steps]
return SpacedDiffusion(
use_timesteps=space_timesteps(steps, timestep_respacing),
betas=betas,
model_mean_type=(
gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X
),
model_var_type=(
(
gd.ModelVarType.FIXED_LARGE
if not sigma_small
else gd.ModelVarType.FIXED_SMALL
)
if not learn_sigma
else gd.ModelVarType.LEARNED_RANGE
),
loss_type=loss_type,
rescale_timesteps=rescale_timesteps,
)
def add_dict_to_argparser(parser, default_dict):
for k, v in default_dict.items():
v_type = type(v)
if v is None:
v_type = str
elif isinstance(v, bool):
v_type = str2bool
parser.add_argument(f"--{k}", default=v, type=v_type)
def args_to_dict(args, keys):
return {k: getattr(args, k) for k in keys}
def str2bool(v):
"""
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected")
| 12,461 | 26.269147 | 88 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/gaussian_diffusion.py
|
"""
This code started out as a PyTorch port of Ho et al's diffusion models:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
"""
import enum
import math
import numpy as np
import torch as th
from .nn import mean_flat
from .losses import normal_kl, discretized_gaussian_log_likelihood
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
:param loss_type: a LossType determining the loss function to use.
:param rescale_timesteps: if True, pass floating point timesteps into the
model so that they are always scaled like in the
original paper (0 to 1000).
"""
def __init__(
self,
*,
betas,
model_mean_type,
model_var_type,
loss_type,
rescale_timesteps=False,
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
)
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(
self.log_one_minus_alphas_cumprod, t, x_start.shape
)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0] == posterior_variance.shape[0] == posterior_log_variance_clipped.shape[0] == x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None
):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
if self.model_var_type == ModelVarType.LEARNED:
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x.shape
)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
pred_xstart = process_xstart(
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
)
model_mean = model_output
elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(
x_start=pred_xstart, x_t=x, t=t
)
else:
raise NotImplementedError(self.model_mean_type)
assert (
model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
)
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev - _extract_into_tensor(self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape) * x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return t.float() * (1000.0 / self.num_timesteps)
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = (
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
)
return new_mean
def condition_mean_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, t, p_mean_var, **model_kwargs)
new_mean = (
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
)
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
x, self._scale_timesteps(t), **model_kwargs
)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(
x_start=out["pred_xstart"], x_t=x, t=t
)
return out
def condition_score_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
x, t, p_mean_var, **model_kwargs
)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(
x_start=out["pred_xstart"], x_t=x, t=t
)
return out
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(
cond_fn, out, x, t, model_kwargs=model_kwargs
)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_with_grad(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
with th.enable_grad():
x = x.detach().requires_grad_()
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean_with_grad(
cond_fn, out, x, t, model_kwargs=model_kwargs
)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"].detach()}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
cond_fn_with_grad=cond_fn_with_grad,
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
if init_image is not None:
my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0]
img = self.q_sample(init_image, my_t, img)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices, desc="Steps")
for i in indices:
t = th.tensor([i] * shape[0], device=device)
if randomize_class and 'y' in model_kwargs:
model_kwargs['y'] = th.randint(low=0, high=model.num_classes,
size=model_kwargs['y'].shape,
device=model_kwargs['y'].device)
with th.no_grad():
sample_fn = self.p_sample_with_grad if cond_fn_with_grad else self.p_sample
out = sample_fn(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
inpainting_mode=False,
orig_img=None,
mask_inpaint=None,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
if inpainting_mode:
noised_orig_img = th.sqrt(alpha_bar) * orig_img + \
th.sqrt(1 - alpha_bar) * th.randn_like(x)
# noised_orig_img_pil = TF.to_pil_image(noised_orig_img[0].add(1).div(2).clamp(0, 1))
# noised_orig_img_pil.save(f'/content/drive/MyDrive/AI/Disco_Diffusion/images_out/InpaintingTest/inpainting_dump/noised_orig_{t[0].item()}.png')
x = (1 - mask_inpaint) * noised_orig_img + mask_inpaint * x
# mixed_x = TF.to_pil_image(x[0].add(1).div(2).clamp(0, 1))
# mixed_x.save(f'/content/drive/MyDrive/AI/Disco_Diffusion/images_out/InpaintingTest/inpainting_dump/mixed_x_{t[0].item()}.png')
out_orig = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out_orig, x, t, model_kwargs=model_kwargs)
else:
out = out_orig
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) * th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out_orig["pred_xstart"]}
def ddim_sample_with_grad(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
with th.enable_grad():
x = x.detach().requires_grad_()
out_orig = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score_with_grad(cond_fn, out_orig, x, t,
model_kwargs=model_kwargs)
else:
out = out_orig
out["pred_xstart"] = out["pred_xstart"].detach()
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) * th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out_orig["pred_xstart"].detach()}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x - out["pred_xstart"]) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps
)
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
cond_fn_with_grad=cond_fn_with_grad,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
transformation_fn=None,
transformation_percent=[],
inpainting_mode=False,
mask_inpaint=None,
skip_timesteps_orig=None
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
transformation_steps = [int(len(indices) * (1 - i)) for i in transformation_percent]
if init_image is not None:
my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0]
img = self.q_sample(init_image, my_t, img)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices, desc="Steps")
if inpainting_mode and skip_timesteps_orig is None:
skip_timesteps_orig = self.num_timesteps
for i in indices:
t = th.tensor([i] * shape[0], device=device)
if randomize_class and 'y' in model_kwargs:
model_kwargs['y'] = th.randint(low=0, high=model.num_classes,
size=model_kwargs['y'].shape,
device=model_kwargs['y'].device)
with th.no_grad():
if i in transformation_steps and transformation_fn is not None:
img = transformation_fn(img)
sample_fn = self.ddim_sample_with_grad if cond_fn_with_grad else self.ddim_sample
if inpainting_mode \
and i >= self.num_timesteps - skip_timesteps_orig \
and not cond_fn_with_grad:
out = sample_fn(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
inpainting_mode=inpainting_mode,
orig_img=init_image,
mask_inpaint=mask_inpaint,
)
else:
out = sample_fn(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def plms_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
cond_fn_with_grad=False,
order=2,
old_out=None,
):
"""
Sample x_{t-1} from the model using Pseudo Linear Multistep.
Same usage as p_sample().
"""
if not int(order) or not 1 <= order <= 4:
raise ValueError('order is invalid (should be int from 1-4).')
def get_model_output(x, t):
with th.set_grad_enabled(cond_fn_with_grad and cond_fn is not None):
x = x.detach().requires_grad_() if cond_fn_with_grad else x
out_orig = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
if cond_fn_with_grad:
out = self.condition_score_with_grad(cond_fn, out_orig, x, t, model_kwargs=model_kwargs)
x = x.detach()
else:
out = self.condition_score(cond_fn, out_orig, x, t, model_kwargs=model_kwargs)
else:
out = out_orig
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
return eps, out, out_orig
# alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
eps, out, out_orig = get_model_output(x, t)
if order > 1 and old_out is None:
# Pseudo Improved Euler
old_eps = [eps]
mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev) * eps
eps_2, _, _ = get_model_output(mean_pred, t - 1)
eps_prime = (eps + eps_2) / 2
pred_prime = self._predict_xstart_from_eps(x, t, eps_prime)
mean_pred = pred_prime * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev) * eps_prime
else:
# Pseudo Linear Multistep (Adams-Bashforth)
old_eps = old_out["old_eps"]
old_eps.append(eps)
cur_order = min(order, len(old_eps))
if cur_order == 1:
eps_prime = old_eps[-1]
elif cur_order == 2:
eps_prime = (3 * old_eps[-1] - old_eps[-2]) / 2
elif cur_order == 3:
eps_prime = (23 * old_eps[-1] - 16 * old_eps[-2] + 5 * old_eps[-3]) / 12
elif cur_order == 4:
eps_prime = (55 * old_eps[-1] - 59 * old_eps[-2] + 37 * old_eps[-3] - 9 * old_eps[-4]) / 24
else:
raise RuntimeError('cur_order is invalid.')
pred_prime = self._predict_xstart_from_eps(x, t, eps_prime)
mean_pred = pred_prime * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev) * eps_prime
if len(old_eps) >= order:
old_eps.pop(0)
nonzero_mask = (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
sample = mean_pred * nonzero_mask + out["pred_xstart"] * (1 - nonzero_mask)
return {"sample": sample, "pred_xstart": out_orig["pred_xstart"], "old_eps": old_eps}
def plms_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
order=2,
):
"""
Generate samples from the model using Pseudo Linear Multistep.
Same usage as p_sample_loop().
"""
final = None
for sample in self.plms_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
cond_fn_with_grad=cond_fn_with_grad,
order=order,
):
final = sample
return final["sample"]
def plms_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
order=2,
):
"""
Use PLMS to sample from the model and yield intermediate samples from each
timestep of PLMS.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
if init_image is not None:
my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0]
img = self.q_sample(init_image, my_t, img)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices, desc="Steps")
old_out = None
for i in indices:
t = th.tensor([i] * shape[0], device=device)
if randomize_class and 'y' in model_kwargs:
model_kwargs['y'] = th.randint(low=0, high=model.num_classes,
size=model_kwargs['y'].shape,
device=model_kwargs['y'].device)
with th.no_grad():
out = self.plms_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
cond_fn_with_grad=cond_fn_with_grad,
order=order,
old_out=old_out,
)
yield out
old_out = out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
| 50,680 | 37.482156 | 185 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/__init__.py
|
"""
Codebase for "Improved Denoising Diffusion Probabilistic Models".
"""
| 74 | 17.75 | 65 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/respace.py
|
import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim"):])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(GaussianDiffusion):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(
model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, **kwargs)
| 5,192 | 39.255814 | 85 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pretrain_erlangshen_deberta_v2/pretrain_deberta.py
|
from dataclasses import dataclass
from transformers import (
DebertaV2Config,
DebertaV2ForMaskedLM,
AutoTokenizer,
)
from pytorch_lightning import (
LightningModule,
Trainer,
)
from pytorch_lightning.callbacks import (
LearningRateMonitor,
)
import argparse
import torch
import os
import numpy as np
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.data.data_utils.truncate_utils import truncate_segments
from fengshen.data.data_utils.token_type_utils import create_tokens_and_tokentypes
from fengshen.data.data_utils.mask_utils import create_masked_lm_predictions
from fengshen.models.model_utils import (
add_module_args,
configure_optimizers,
get_total_steps,
)
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from torch.utils.data._utils.collate import default_collate
SHOW_DATA = False
@dataclass
class DeBERTaV2Collator:
'''
由input处理成samples,也就是最终模型的输入
其中主要处理逻辑在__call__里
包含Mask任务,使用Whole Word Mask
'''
tokenizer: None # 分词
max_seq_length: 512
masked_lm_prob: 0.15
content_key: str = 'text'
# 一些预处理操作
def setup(self):
self.np_rng = np.random.RandomState(seed=42)
inv_vocab = {v: k for k, v in self.tokenizer.vocab.items()}
self.vocab_id_list = list(inv_vocab.keys())
self.vocab_id_to_token_dict = inv_vocab
import jieba_fast
self.zh_tokenizer = jieba_fast.lcut
def __call__(self, samples):
'''
samples: 一个sample长这样{"text": "hello world"}
'''
model_inputs = []
for s in samples:
tokenized_sentences = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.tokenize(s[self.content_key]))
if len(tokenized_sentences) == 0:
print('find empty sentence')
continue
tokens_a = tokenized_sentences
# max_seq_length - 3因为还需要拼上[CLS] [SEP] [SEP]
if len(tokens_a) == 0:
continue
_ = truncate_segments(tokens_a, [], len(tokens_a),
0, self.max_seq_length-3, self.np_rng)
# Build tokens and toketypes.
tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, [],
self.tokenizer.cls_token_id, self.tokenizer.sep_token_id)
# Masking.
max_predictions_per_seq = self.masked_lm_prob * len(tokens)
(tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions(
tokens, self.vocab_id_list, self.vocab_id_to_token_dict, self.masked_lm_prob,
self.tokenizer.cls_token_id, self.tokenizer.sep_token_id, self.tokenizer.mask_token_id,
max_predictions_per_seq, self.np_rng,
masking_style='bert',
zh_tokenizer=self.zh_tokenizer)
# Some checks.
num_tokens = len(tokens)
padding_length = self.max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [self.tokenizer.pad_token_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length,
dtype=np.int64)
# Lables and loss mask.
labels = [-100] * self.max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
labels_np = np.array(labels, dtype=np.int64)
model_inputs.append(
{
'input_ids': tokens_np,
'attention_mask': padding_mask_np,
'token_type_ids': tokentypes_np,
'labels': labels_np,
}
)
return default_collate(model_inputs)
class ErlangshenDeBERTaV2(LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('Erlangshen Bert')
parser.add_argument('--masked_lm_prob', type=float, default=0.15)
parser.add_argument('--max_seq_length', type=int, default=512)
parser.add_argument('--sample_content_key', type=str, default='text')
return parent_parser
def __init__(self, args, tokenizer, **kwargs) -> None:
super().__init__()
self.save_hyperparameters(args)
config = DebertaV2Config.from_pretrained(args.model_path)
self.config = config
self.tokenizer = tokenizer
self.model = DebertaV2ForMaskedLM(config)
def setup(self, stage) -> None:
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
return configure_optimizers(self)
def forward(self, **batch):
return self.model(**batch)
def detokenize(self, token_ids):
toks = self.tokenizer.convert_ids_to_tokens(token_ids)
return self.tokenizer.convert_tokens_to_string(toks)
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.shape[0]
return acc
def training_step(self, batch, batch_idx):
if self.trainer.global_rank == 0:
global SHOW_DATA
if not SHOW_DATA:
print(self.config)
print(self.model)
SHOW_DATA = True
print('source: {}'.format(batch['input_ids'][0]))
print('target: {}'.format(batch['labels'][0]))
print('source: {}'.format(self.detokenize(batch['input_ids'][0])))
label_idx = batch['labels'][0] != -100
print('target: {}'.format(self.detokenize(
batch['labels'][0][label_idx])))
output = self(**batch)
self.log('train_loss', output.loss, sync_dist=True)
label_idx = batch['labels'] != -100
acc = self.comput_metrix(
output.logits[label_idx].view(-1, output.logits.size(-1)), batch['labels'][label_idx])
self.log('train_acc', acc, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx):
output = self(**batch)
self.log('val_loss', output.loss, sync_dist=True)
return output.loss
def on_load_checkpoint(self, checkpoint) -> None:
# 兼容低版本lightning,低版本lightning从ckpt起来时steps数会被重置为0
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = add_module_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = ErlangshenDeBERTaV2.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained(args.model_path)
collate_fn = DeBERTaV2Collator(
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
masked_lm_prob=args.masked_lm_prob,
content_key=args.sample_content_key,
)
collate_fn.setup()
data_module = UniversalDataModule(tokenizer=tokenizer, args=args, collate_fn=collate_fn)
print('data load complete')
model = ErlangshenDeBERTaV2(args, tokenizer=tokenizer)
print('model load complete')
lr_monitor = LearningRateMonitor(logging_interval='step')
checkpoint_callback = UniversalCheckpoint(args)
# 做兼容,如果目录不存在的话把这个参数去掉,不然会报错
if args.load_ckpt_path is not None and \
not os.path.exists(args.load_ckpt_path):
print('--------warning no checkpoint found--------, remove args')
args.load_ckpt_path = None
trainer = Trainer.from_argparse_args(args,
callbacks=[
lr_monitor,
checkpoint_callback])
trainer.fit(model, data_module, ckpt_path=args.load_ckpt_path)
| 8,886 | 37.97807 | 119 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/tcbert/example.py
|
import argparse
from fengshen.pipelines.tcbert import TCBertPipelines
from pytorch_lightning import seed_everything
def main():
seed_everything(123)
total_parser = argparse.ArgumentParser("Topic Classification")
total_parser = TCBertPipelines.piplines_args(total_parser)
args = total_parser.parse_args()
pretrained_model_path = 'IDEA-CCNL/Erlangshen-TCBert-110M-Classification-Chinese'
args.learning_rate = 2e-5
args.max_length = 512
args.max_epochs = 5
args.batchsize = 4
args.train = 'train'
args.default_root_dir = './'
# args.gpus = 1 #注意:目前使用CPU进行训练,取消注释会使用GPU,但需要配置相应GPU环境版本
args.fixed_lablen = 2 #注意:可以设置固定标签长度,由于样本对应的标签长度可能不一致,建议选择适中的数值表示标签长度
train_data = [ # 训练数据
{"content": "真正的放养教育,放的是孩子的思维,养的是孩子的习惯", "label": "故事"},
{"content": "《唐人街探案》捧红了王宝强跟刘昊然,唯独戏份不少的他发展最差", "label": "娱乐"},
{"content": "油价攀升 阿曼经济加速增长", "label": "财经"},
{"content": "日本男篮近期动作频频,中国队的未来劲敌会是他们吗?", "label": "体育"},
{"content": "教育部:坚决防止因撤并乡村小规模学校导致学生上学困难", "label": "教育"},
{"content": "LOL设计最完美的三个英雄,玩家们都很认可!", "label": "电竞"},
{"content": "上联:浅看红楼终是梦,怎么对下联?", "label": "文化"},
{"content": "楼市再出新政!北京部分限房价项目或转为共有产权房", "label": "房产"},
{"content": "企业怎样选云服务器?云服务器哪家比较好?", "label": "科技"},
{"content": "贝纳利的三缸车TRE899K、TRE1130K华丽转身", "label": "汽车"},
{"content": "如何评价:刘姝威的《严惩做空中国股市者》?", "label": "股票"},
{"content": "宁夏邀深圳市民共赴“寻找穿越”之旅", "label": "旅游"},
{"content": "日本自民党又一派系力挺安倍 称会竭尽全力", "label": "国际"},
{"content": "农村养老保险每年交5000,交满15年退休后能每月领多少钱?", "label": "农业"},
{"content": "国产舰载机首次现身,进度超过预期,将率先在滑跃航母测试", "label": "军事"}
]
dev_data = [ # 验证数据
{"content": "西游记后传中,灵儿最爱的女人是谁?不是碧游!", "label": "故事"},
{"content": "小李子莱奥纳多有特别的提袋子技能,这些年他还有过哪些神奇的造型?", "label": "娱乐"},
{"content": "现在手上有钱是投资买房还是存钱,为什么?", "label": "财经"},
{"content": "迪卡侬的衣服值得购买吗?", "label": "体育"},
{"content": "黑龙江省旅游委在齐齐哈尔组织举办导游培训班", "label": "教育"},
{"content": "《王者荣耀》中,哪些英雄的大招最“废柴”?", "label": "电竞"},
{"content": "上交演绎马勒《复活》,用音乐带来抚慰和希望", "label": "文化"},
{"content": "All in服务业,58集团在租房、住房市场的全力以赋", "label": "房产"},
{"content": "为什么有的人宁愿选择骁龙660的X21,也不买骁龙845的小米MIX2S?", "label": "科技"},
{"content": "众泰大型SUV来袭,售13.98万,2.0T榨出231马力,汉兰达要危险了", "label": "汽车"},
{"content": "股票放量下趺,大资金出逃谁在接盘?", "label": "股票"},
{"content": "广西博白最大的特色是什么?", "label": "旅游"},
{"content": "特朗普退出《伊朗核协议》,对此你怎么看?", "label": "国际"},
{"content": "卖水果利润怎么样?", "label": "农业"},
{"content": "特种兵都是身材高大的猛男么?别再被电视骗了,超过1米8都不合格", "label": "军事"}
]
test_data = [ # 测试数据
{"content": "廖凡重出“江湖”再争影帝 亮相戛纳红毯霸气有型"},
{"content": "《绝地求生: 刺激战场》越玩越卡?竟是手机厂商没交“保护费”!"},
{"content": "买涡轮增压还是自然吸气车?今天终于有答案了!"},
]
#标签映射 将真实标签可以映射为更合适prompt的标签
prompt_label = {
"体育":"体育", "军事":"军事", "农业":"农业", "国际":"国际",
"娱乐":"娱乐", "房产":"房产", "故事":"故事", "教育":"教育",
"文化":"文化", "旅游":"旅游", "汽车":"汽车", "电竞":"电竞",
"科技":"科技", "股票":"股票", "财经":"财经"
}
#不同的prompt会影响模型效果
#prompt = "这一句描述{}的内容如下:"
prompt = "下面是一则关于{}的新闻:"
model = TCBertPipelines(args, model_path=pretrained_model_path, nlabels=len(prompt_label))
if args.train:
model.train(train_data, dev_data, prompt, prompt_label)
result = model.predict(test_data, prompt, prompt_label)
for i, line in enumerate(result):
print({"content":test_data[i]["content"], "label":list(prompt_label.keys())[line]})
if __name__ == "__main__":
main()
| 3,693 | 41.45977 | 94 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/tcbert/__init__.py
| 0 | 0 | 0 |
py
|
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/ziya_inference/llama_cpp_quantizatin_inference.py
|
"""
dependencies
llama.cpp (https://github.com/ggerganov/llama.cpp)
llama-cpp-python (https://github.com/abetlen/llama-cpp-python)
llama.cpp
1. 通过llama.cpp将模型转换为ggml格式
2. 参考llama.cpp对转换后的模型量化到 (q4_0, q4_1, q5_0, q5_1, q8_0)
- 在转换过程中会遇到tokenizer对不齐的问题,自行在词表中添加相应个数token即可
3. 依据自身环境执行MAKE或CMAKE命令 (若使用GPU则应有相应的cuda-toolkit)
4. ./main -m $(model_path) 运行
llama-cpp-python
1. 参考 https://abetlen.github.io/llama-cpp-python/#llama_cpp.Llama
2. 若要使用gpu, 需要在conda环境内安装合适的cuda-toolkit
3. 执行 CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python 安装命令使用GPU
"""
from llama_cpp import Llama
import numpy as np
"""
以下提供一个基于llama-cpp-python较为简单的脚本
即llama-cpp-python中的high-level-api
以及
"""
# load llama.cpp ggml format model
# MODEL_PATH = "/models/ggml/ggml-model-q5_0.bin"
MODEL_PATH = "/data0/zhangyuhan/llama_cpp/ggml-model-q5_0.bin"
llm = Llama(model_path=MODEL_PATH, verbose=True, n_gpu_layers=40)
#infer
output = llm("<human>: 林黛玉葬花这一情节出自哪个国家的著作? <bot>: ", max_tokens=32, stop=["<human>:"], echo=True)
print(output)
#generator
def stop_criteria(inputid, logits):
#直接设定为ziya结束的tokenid: 2
return np.argmax(logits) == 2
query = "<human>: 林黛玉葬花这一情节出自哪个国家的著作? <bot>: ".encode("utf-8")
tokens = llm.tokenize(text = bytes(query))
for token in llm.generate(tokens, top_k=40, top_p=0.95, temp=1.0, repeat_penalty=1.1, stopping_criteria=stop_criteria):
print(llm.detokenize([token]).decode("utf-8"))
| 1,505 | 31.73913 | 119 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/ziya_inference/hf_quantizatin_inference.py
|
"""
这是基于hugging face社区开源的框架accelerate制定的基础量化推理方案
该框架主要实现了int8、int4量化,以及cpu或者disk offload
实现了用低存储,小设备运行大模型
具体可以见wiki:http://wiki.team.idea.edu.cn/pages/viewpage.action?pageId=31464125
"""
import time
from transformers import AutoModelForCausalLM, AutoTokenizer
import bitsandbytes as bnb
from bitsandbytes.nn import Linear8bitLt
import torch
# 量化的方案集成到from_pretrained方法中了
# 如果要量化加载,device_map必须设置
# 量化的参数主要是:load_in_8bit,load_in_4bit (最新的main分支有文档说明,transformer4.29.2还没有4bit)
# 更多参考文档:https://huggingface.co/docs/accelerate/usage_guides/big_modeling
def load_model_source(model_path, load_in_8bit=True):
if load_in_8bit:
lm = AutoModelForCausalLM.from_pretrained(model_path, device_map='auto', load_in_8bit=load_in_8bit).eval()
else:
lm = AutoModelForCausalLM.from_pretrained(model_path,device_map='auto',torch_dtype=torch.float16).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path)
# 查看加载后的模型,所占内存
print(f'模型所占显存: {lm.get_memory_footprint()/1024/1024/1024} GB')
# 查看模型的分布
print('模型在设备上的分布:\n', lm.hf_device_map)
return lm, tokenizer
def decode_speed_test(lm, tokenizer, batch_size=1, generate_lenght=100, test_round=5):
"""
测试推理速度
"""
st = time.time()
text = ['中国的首都是'] * batch_size
input_ids = tokenizer(text, return_tensors='pt').input_ids.to(0)
for _ in range(test_round):
out = lm.generate(input_ids, max_new_tokens=generate_lenght)
time_cost = time.time()-st
total_token_gen = batch_size*generate_lenght*test_round
token_gen_speed = total_token_gen/time_cost
per_token_time_cost = time_cost/total_token_gen*1000
info = f"""
bs:{batch_size} max_new_tokes:{generate_lenght} test_round:{test_round}
generate total token: {total_token_gen} sec
speed: {token_gen_speed:.2f} token/sec
token_time_cost: {per_token_time_cost:.2f} ms
"""
print(info)
return out, info
def generate(text, max_new_tokens=128, do_sample=True, top_p=0.9, return_n=5):
text = f'<human>:{text.strip()}\n<bot>:'
input_ids = tokenizer(text, return_tensors='pt').input_ids.to(0)
out = lm.generate(input_ids,
max_new_tokens=max_new_tokens,
do_sample=do_sample,
top_p=top_p,
num_return_sequences=return_n)
seq = tokenizer.batch_decode(out)
return out, seq
if __name__ == '__main__':
model_path = '/cognitive_comp/common_data/Huggingface-Models/IDEA-CCNL/Ziya-LLaMA-13B-RLHF-V1'
lm, tokenizer = load_model_source(model_path)
# _, _ = decode_speed_test(lm, tokenizer)
_,seq = generate('中国的首都是哪里?')
| 2,638 | 35.652778 | 114 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/qa_t5/finetune_t5_cmrc.py
|
# -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : finetune_t5_cmrc.py
@Time : 2022/10/28 19:57
@Author : He Junqing
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
# here put the import lib
import pytorch_lightning as pl
import os
import sys
import time
import torch
import argparse
from collections import Counter
from fengshen.utils.utils import chinese_char_tokenize
from fengshen.data.universal_datamodule import UniversalDataModule
from pytorch_lightning import Trainer, loggers
from pytorch_lightning.callbacks import LearningRateMonitor
from transformers import MT5ForConditionalGeneration, T5Tokenizer, MT5Config
from torchmetrics.text.rouge import ROUGEScore
from nltk.translate.bleu_score import corpus_bleu
torch.cuda.empty_cache()
class QAFinetuneModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group("BaseModel")
parser.add_argument("--prediction_res_path", default=None, type=str)
parser.add_argument(
"--decode_strategy",
default="greedy",
choices=["beamsearch", "sampling", "greedy"],
)
return parent_args
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
self.formator = args.formator
self.max_target_length = args.max_target_length
self.decode_strategy = args.decode_strategy
self.rouge_metric = ROUGEScore(
rouge_keys=("rougeL", "rouge1", "rouge2"), normalizer=lambda x: x
)
self.loss_func = torch.nn.CrossEntropyLoss(reduction="none")
self.model = MT5ForConditionalGeneration.from_pretrained(
args.pretrained_model_path
)
print("using MT5 model")
if args.tokenizer_type == "t5_tokenizer":
self.tokenizer = T5Tokenizer.from_pretrained(args.pretrained_model_path)
print("vocab_size:", len(self.tokenizer))
# self.tokenizer.add_special_tokens(special_token_dict)
# print('add special tokens to tokenizer,vocab size:',len(self.tokenizer))
else:
print("now only the t5_tokenizer is supported")
self.bleu_val = []
def setup(self, stage=None) -> None:
if stage == "fit":
train_loader = (
self.trainer._data_connector._train_dataloader_source.dataloader()
)
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches * float(
self.trainer.max_epochs
)
self.total_steps = (
len(train_loader.dataset) * self.trainer.max_epochs // tb_size
) // ab_size
else:
self.total_steps = (
self.trainer.max_steps // self.trainer.accumulate_grad_batches
)
print("Total steps: {}".format(self.total_steps))
# return super().setup(stage)
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
def on_save_checkpoint(self, checkpoint) -> None:
# Save the current loop info in the mid of epoch
# if you lightning <= 1.6.0 uncomment the line below
# checkpoint['loops'] = self.trainer.checkpoint_connector._get_loops_state_dict()
if (
self.trainer.global_rank == 0
and self.trainer.global_step % self.hparams.every_n_train_steps == 0
):
self.model.save_pretrained(
os.path.join(
self.trainer.checkpoint_callback.dirpath,
"hf_pretrained_epoch{}_step{}".format(
self.trainer.current_epoch, self.trainer.global_step
),
)
)
def on_load_checkpoint(self, checkpoint) -> None:
global_step_offset = checkpoint["global_step"]
if "global_samples" in checkpoint:
self.consumed_samples = checkpoint["global_samples"]
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
def training_step(self, batch, batch_idx): # todo: change
if self.formator == "t5style":
output = self.model(
input_ids=batch["input_ids"],
labels=batch["labels"],
decoder_input_ids=batch["decoder_input_ids"],
)
else:
output = self.model(
input_ids=batch["input_ids"],
input_token_type=batch["token_types"],
labels=batch["labels"],
decoder_input_ids=batch["decoder_input_ids"],
)
# print(output.logits)
acc = self.comput_metrix(output.logits, batch["labels"])
grad = get_gradient_norm(self.model)
self.log("train_loss", output.loss, sync_dist=True)
self.log("train_acc", acc, sync_dist=True)
self.log("train_grad", grad, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx):
output = self.model(
input_ids=batch["input_ids"],
labels=batch["labels"],
)
pred_ids = self.model.generate(
input_ids=batch["input_ids"], max_new_tokens=self.max_target_length
)
acc = self.comput_metrix(output.logits, batch["labels"])
# print(output.logits.shape)
self.log("val_loss", output.loss, sync_dist=True)
self.log("val_acc", acc, sync_dist=True)
batch_labels = torch.where(
batch["labels"] != -100, batch["labels"], self.tokenizer.pad_token_id
)
ppl = torch.exp(output.loss)
self.log("val_ppl", ppl, sync_dist=True)
pred_tokens = self.tokenizer.batch_decode(
pred_ids, cleanup_tokenization_space=True, skip_special_tokens=True
)
label_tokens = self.tokenizer.batch_decode(
batch_labels, cleanup_tokenization_space=True, skip_special_tokens=True
)
pred_sentences = list(map(remove_pad, pred_tokens))
# print(label_tokens)
self.bleu_val.append(compute_bleu(pred_sentences, [[t] for t in label_tokens]))
candidate = [
chinese_char_tokenize(p).lstrip("<extra_id_0>") for p in pred_tokens
]
target = [
generate_sentence(chinese_char_tokenize(sent)).lstrip("<extra_id_0>")
for sent in label_tokens
]
self.rouge_metric.update(preds=candidate, target=target)
f1 = compute_f1(candidate, label_tokens)
self.log("val_f1", f1, sync_dist=True)
def on_validation_epoch_end(self) -> None:
n = len(self.bleu_val)
avg_bleu = float(sum(self.bleu_val)) / n
print("bleu:", avg_bleu)
self.log("val_bleu", avg_bleu)
self.bleu_val = []
rouge_dict = self.rouge_metric.compute()
# reset the metric after once validation
self.rouge_metric.reset()
for k, v in rouge_dict.items():
self.log("val_{}".format(k), v, sync_dist=True)
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
print("rouge:\n", rouge_dict)
return
def predict_step(self, batch, batch_idx):
num_beams = 1
do_sample = False
top_p = None
if self.decode_strategy == "beamsearch":
num_beams = 10
elif self.decode_strategy == "sampling":
num_beams = 4
top_p = 0.9
do_sample = True
prediction_dic = self.model.generate(
input_ids=batch["input_ids"],
max_new_tokens=self.max_target_length,
num_beams=num_beams,
do_sample=do_sample,
top_p=top_p,
no_repeat_ngram_size=3,
return_dict_in_generate=True,
output_scores=True,
)
output = self.model(
input_ids=batch["input_ids"],
labels=batch["labels"],
)
prediction_ids = prediction_dic["sequences"]
loss_tensor = self.loss_func(output.logits.transpose(1, 2), batch["labels"])
indexes = torch.where(batch["labels"] == self.tokenizer.eos_token_id)[1]
loss = torch.sum(loss_tensor, dim=1) / indexes
return {
"input_ids": batch["input_ids"],
"predict_ids": prediction_ids,
"labels": batch["labels"],
"decoder_inputs": batch["decoder_input_ids"],
"loss": loss,
}
def save_preditions(self, result, args):
with open(args.prediction_res_path, "w", encoding="utf8") as fw:
preditions = []
labels = []
for batch in result:
print(batch.keys())
batch_labels = torch.where(
batch["labels"] != -100,
batch["labels"],
self.tokenizer.pad_token_id,
)
for i in range(len(batch["input_ids"])):
context = self.tokenizer.decode(
batch["input_ids"][i],
skip_special_tokens=True,
cleanup_tokenization_space=True,
)
pred = self.tokenizer.decode(
batch["predict_ids"][i],
cleanup_tokenization_space=True,
skip_special_tokens=True,
)
target = generate_sentence(
self.tokenizer.batch_decode(
batch_labels[i], cleanup_tokenization_space=True
)
)
pred = pred.lstrip("<extra_id_0>")
target = target.lstrip("<extra_id_0>")
self.rouge_metric.update(
preds=chinese_char_tokenize(pred),
target=chinese_char_tokenize(target),
)
preditions.append(list(pred))
labels.append([list(target)])
fw.write("context:" + "".join(context) + "\n")
fw.write("pred:" + pred + "\n")
fw.write("target" + target + "\n")
fw.write("loss:{:.6f}\n".format(batch["loss"][i].item()))
fw.write("\n")
bleu = compute_bleu(preditions, labels)
fw.write("bleu:{}".format(bleu))
print("finish prediction, saved in {}".format(args.prediction_res_path))
return preditions, labels
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_true = labels.float()
pad_num = torch.sum(torch.eq(labels, -100))
corr = torch.eq(y_pred, y_true)
acc = (torch.sum(corr.float()) - pad_num) / (
y_true.view(size=(-1,)).shape[0] - pad_num
)
return acc
class PredictDataModule(UniversalDataModule):
def predict_dataloader(self):
return self.test_dataloader()
def main():
total_parser = argparse.ArgumentParser("Finetune Dialogue model.")
total_parser.add_argument("--do_eval_only", action="store_true", default=False)
total_parser.add_argument("--pretrained_model_path", default=None, type=str)
total_parser.add_argument("--new_vocab_path", default=None, type=str)
total_parser.add_argument(
"--tokenizer_type",
default="t5_tokenizer",
choices=["t5_tokenizer", "bert_tokenizer"],
)
total_parser.add_argument("--train_split_size", default=0.995, type=int)
total_parser.add_argument("--preprocessing_num_workers", default="10", type=int)
total_parser.add_argument("--ckpt_path", default=None, type=str)
total_parser.add_argument("--use_cache", default=False, type=bool)
total_parser.add_argument(
"--formator", default="dialog", choices=["dialog", "ccqa", "t5style"]
)
sys.path.append("../../../")
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from qa_dataset import T5StyleDataset, TextGenCollator
total_parser = T5StyleDataset.add_data_specific_args(total_parser)
total_parser = UniversalDataModule.add_data_specific_args(
total_parser
) # TaskDataModel
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = QAFinetuneModel.add_model_specific_args(
total_parser
) # todo: check names
args = total_parser.parse_args()
print("Argument parse success.")
print("superviseT5DataModel load start {}".format(get_time_str()))
config = MT5Config.from_pretrained(args.pretrained_model_path)
collate_fn = TextGenCollator(
config=config,
pad_token_id=config.pad_token_id,
decoder_start_token_id=config.decoder_start_token_id,
formator=args.formator)
if not args.do_eval_only:
datasets = {'train': T5StyleDataset(args.train_file, args, load_data_type=0, data="train"),
'validation': T5StyleDataset(args.val_file, args, load_data_type=0, data="dev")}
model = QAFinetuneModel(args)
print("superviseT5DataModel load end {}".format(get_time_str()))
data_model = UniversalDataModule(
tokenizer=None, args=args, collate_fn=collate_fn, datasets=datasets
)
print('data loaded')
checkpoint_callback = UniversalCheckpoint(args)
lr_monitor = LearningRateMonitor(logging_interval="step")
logger = loggers.TensorBoardLogger(
save_dir=os.path.join(args.default_root_dir, "logs/") # TOCHANGE
)
trainer = Trainer.from_argparse_args(
args, logger=logger, callbacks=[checkpoint_callback, lr_monitor]
)
trainer.fit(model, data_model)
else:
datasets = {'test': T5StyleDataset(args.test_file, args, load_data_type=0, data="test")}
data_model = PredictDataModule(
tokenizer=None, args=args, collate_fn=collate_fn, datasets=datasets
)
tokenizer = T5Tokenizer.from_pretrained(args.pretrained_model_path)
model = QAFinetuneModel(args=args)
trainer = Trainer.from_argparse_args(args)
result = trainer.predict(model, data_model, ckpt_path=args.ckpt_path)
predictions, labels = model.save_preditions(result, args)
sample = result[0] # first_batch
batch_labels = torch.where(
sample["labels"] != -100, sample["labels"], model.tokenizer.pad_token_id
)
for i in range(4):
print(tokenizer.batch_decode(sample["input_ids"][i]))
print(tokenizer.batch_decode(sample["predict_ids"][i]))
print(tokenizer.batch_decode(batch_labels[i]))
def compute_f1(cand, ref):
f1_score = []
for p, t in zip(cand, ref):
p_tokens = p.split()
t_tokens = t.split()
common = Counter() & Counter(t.split())
num_same = sum(common.values())
if len(t_tokens) == 0 or len(p_tokens) == 0:
f1 = int(p == t)
elif num_same == 0:
f1 = 0
else:
precision = 1.0 * num_same / len(p_tokens)
recall = 1.0 * num_same / len(t_tokens)
f1 = (2 * precision * recall) / (precision + recall + 1e-8)
f1_score.append(f1)
f1 = sum(f1_score) / float(len(cand))
return f1
def generate_sentence(raw_list):
words = []
i = 0
while i < len(raw_list) and raw_list[i] != "</s>":
words.append(raw_list[i])
i += 1
return "".join(words)
def remove_pad(raw_text, ref=False):
if ref:
return [raw_text.lstrip("<pad>")]
else:
return raw_text.lstrip("<pad>")
def compute_bleu(preditions, labels):
score_nltk = corpus_bleu(labels, preditions)
return score_nltk
def get_gradient_norm(model):
total_norm = 0
parameters = [
p for p in model.parameters() if p.grad is not None and p.requires_grad
]
for p in parameters:
param_norm = p.grad.detach().data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm**0.5
return total_norm
def get_time_str():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if __name__ == "__main__":
main()
| 17,183 | 37.101996 | 100 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/qa_t5/qa_dataset.py
|
# -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : qa_dataset.py
@Time : 2022/10/28 19:57
@Author : He Junqing
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
# here put the import lib
from dataclasses import dataclass
import numpy as np
import torch
from torch.nn.utils.rnn import pad_sequence
from fengshen.data.t5_dataloader.t5_gen_datasets import DialogDataset
class T5StyleDataset(DialogDataset):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group("Dataset")
parser.add_argument("--max_seq_length", default=512, type=int)
parser.add_argument("--max_knowledge_length", default=128, type=int)
parser.add_argument("--max_target_length", default=128, type=int)
return parent_args
def regular_tokenize(self, sample):
"""
sample.keys:question:str,context:stc, answer:[],idx:int,ans_span:[]
"""
plain_text = (
"question:"
+ sample["question"]
+ "knowledge:"
+ sample["context"][: self.max_knowledge_length]
)
l_text = len(plain_text)
ctx_len = self.max_seq_length - l_text - 1
if ctx_len > 0 and "history" in sample:
context = "[SEP]".join(sample["history"])
plain_text += "context:" + context
res_prefix = self.tokenizer.encode("answer:", add_special_tokens=False)
# res_prefix.tolist()
l_rp = len(res_prefix)
tokenized = self.tokenizer.encode(
plain_text,
add_special_tokens=False,
truncation=True,
max_length=self.max_seq_length - 2 - l_rp,
)
# tokenized.tolist()
tokenized += res_prefix
# add maskid
mask_id = self.tokenizer.convert_tokens_to_ids("<extra_id_0>")
tokenized.append(mask_id)
tokenized.append(self.eos_token_id)
# print(tokenized)
target_ids = self.tokenizer.encode(
"<extra_id_0>" + sample["answer"][0],
add_special_tokens=True,
truncation=True,
max_length=self.max_target_length,
)
# print(target_ids)
tokenized_sample = {}
tokenized_sample["input_ids"] = np.array(tokenized, dtype=np.int32)
tokenized_sample["attention_mask"] = np.ones(len(tokenized), dtype=np.int8)
tokenized_sample["labels"] = np.array(target_ids, dtype=np.int32)
tokenized_sample["idx"] = sample["idx"]
# print(tokenized_sample)
return tokenized_sample
@dataclass
class TextGenCollator:
'''
'''
config: None
pad_token_id: -100
decoder_start_token_id: 0
formator: str = 't5style'
def setup(self):
pass
def __call__(self, samples):
batch = {
k: [
torch.tensor(samples[i][k], dtype=torch.int64)
for i in range(len(samples))
]
for k in ["input_ids", "attention_mask", "labels"]
}
batch["idx"] = torch.tensor([samples[i]["idx"] for i in range(len(samples))])
# print(batch)
for k, v in batch.items():
if k != "labels" and k != "idx":
batch[k] = pad_sequence(
v, batch_first=True, padding_value=self.pad_token_id
)
elif k == "labels":
batch[k] = pad_sequence(v, batch_first=True, padding_value=-100)
batch["decoder_input_ids"] = torch.tensor(
self.shift_tokens_right(
batch["labels"], self.pad_token_id, self.decoder_start_token_id
),
dtype=torch.long,
)
return batch
def shift_tokens_right(
self, input_ids: np.array, pad_token_id: int, decoder_start_token_id: int
) -> np.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros_like(input_ids)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids = np.where(
shifted_input_ids == -100, pad_token_id, shifted_input_ids
)
return shifted_input_ids
if __name__ == "__main__":
# test
import argparse
total_parser = argparse.ArgumentParser("DATASET parser")
total_parser.add_argument(
"--tokenizer_type",
default="t5_tokenizer",
choices=["bert_tokenizer", "t5_tokenizer"],
)
total_parser.add_argument("--preprocessing_num_workers", default="4", type=int)
total_parser.add_argument(
"--new_vocab_path",
default=None,
type=str,
)
total_parser.add_argument(
"--pretrained_model_path",
default="YOUR DOWNLOAD MODEL PATH",
)
total_parser.add_argument("--train_split_size", default=0.995, type=int)
total_parser.add_argument(
"--formator", default="t5style", choices=["t5style", "squad", "dialog"]
)
total_parser = TextGenCollator.add_data_specific_args(total_parser)
args = total_parser.parse_args()
args.train_data_path = "cmrc"
ds = T5StyleDataset("cmrc", args, "dev")
print(len(ds))
for i in range(10):
print(ds[i])
dl = TextGenCollator(args)
for i in range(5):
for batch in dl.val_dataloader():
print(batch)
print(batch["input_ids"])
print(batch["no_answer"])
print(batch["decoder_input_ids"])
print(batch["labels"])
| 6,086 | 31.37766 | 96 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/hubert/pretrain_hubert.py
|
import fengshen.data.hubert.hubert_dataset as datasets
from fengshen.data.universal_datamodule import UniversalDataModule
from transformers import HubertConfig, HubertModel
# from transformers.models.hubert.modeling_hubert import _compute_mask_indices
import argparse
from fairseq.data import Dictionary
from pytorch_lightning import (
LightningModule,
Trainer,
loggers,
)
from pytorch_lightning.callbacks import LearningRateMonitor
import torch
import os
import torch.nn.functional as F
import torch.nn as nn
class LabelEncoder(object):
def __init__(self, dictionary: Dictionary):
self.dictionary = dictionary
def __call__(self, label: str):
return self.dictionary.encode_line(
label,
append_eos=False,
add_if_not_exist=False,
)
class HubertPretrainDataLoader():
def __init__(self, args):
self.cfg = args
self.dictionaries = self.load_dictionaries()
self.load_datasets = {}
# TODO 改成HuggingFace Tokenizer
def load_dictionaries(self):
label_dir = self.cfg.data if self.cfg.label_dir is None else self.cfg.label_dir
dictionaries = [
Dictionary.load(f"{label_dir}/dict.{label}.txt")
for label in self.cfg.labels
]
return dictionaries
def get_label_dir(self):
if self.cfg.label_dir is None:
return self.cfg.data
return self.cfg.label_dir
@property
def datasets(self):
return self.load_datasets
def load_dataset(self, split: str, **kwargs):
manifest = f"{self.cfg.data}/{split}.tsv"
dicts = self.dictionaries
pad_list = [dict.pad() for dict in dicts]
eos_list = [dict.eos() for dict in dicts]
procs = [LabelEncoder(dict) for dict in dicts]
paths = [f"{self.get_label_dir()}/{split}.{lb}" for lb in self.cfg.labels]
# hubert v1: pad_audio=True, random_crop=False;
self.load_datasets[split] = datasets.HubertDataset(
manifest,
sample_rate=self.cfg.sample_rate,
label_paths=paths,
label_rates=self.cfg.label_rate,
pad_list=pad_list,
eos_list=eos_list,
label_processors=procs,
max_keep_sample_size=self.cfg.max_keep_size,
min_keep_sample_size=self.cfg.min_sample_size,
max_sample_size=self.cfg.max_sample_size,
pad_audio=self.cfg.pad_audio,
normalize=self.cfg.normalize,
store_labels=False,
random_crop=self.cfg.random_crop,
single_target=self.cfg.single_target,
)
def perpare_data(args):
loader = HubertPretrainDataLoader(args)
loader.load_dataset('train')
loader.load_dataset('valid')
return loader
class HubertLightning(LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('HuBert Lightning')
parser.add_argument('--pred_masked_weight', type=float, default=1.0)
parser.add_argument('--logit_temp', type=float, default=1.0)
parser.add_argument('--loss_weights', type=float, nargs='+')
# parser.add_argument('--mask_prob', type=float, default=0.65)
# parser.add_argument('--mask_length', type=int, default=10)
# parser.add_argument('--mask_selection', type=str, default='static',
# choice=["static", "uniform", "normal", "poisson"])
# parser.add_argument('--mask_other', type=float, default=0)
# parser.add_argument('--no_mask_overlap', type=bool, default=False)
# parser.add_argument('--mask_min_space', type=int, default=1)
return parent_parser
def __init__(self, args, loader, ** kwargs) -> None:
super().__init__()
self.save_hyperparameters(args)
config = HubertConfig.from_pretrained(args.model_path)
self.config = config
self.model = HubertModel(config=config)
self.num_classes = [len(d) for d in loader.dictionaries]
self.label_embs_concat = nn.Parameter(
torch.FloatTensor(sum(self.num_classes), self.config.conv_dim[-1] // 2)
)
self.final_proj = nn.Linear(
self.config.hidden_size, self.config.conv_dim[-1] // 2 * len(loader.dictionaries)
)
nn.init.uniform_(self.label_embs_concat)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
def compute_nce(self, x, pos, negs):
neg_is_pos = (pos == negs).all(-1)
pos = pos.unsqueeze(0)
targets = torch.cat([pos, negs], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)
logits /= self.hparams.logit_temp
if neg_is_pos.any():
logits[1:][neg_is_pos] = float("-inf")
logits = logits.transpose(0, 1) # (num_x, num_cls+1)
return logits
def forward(self, **batch):
target_list = batch['target_list']
padding_mask = batch['net_input']['padding_mask']
input_values = batch['net_input']['source']
output = self.model(input_values=input_values,
attention_mask=padding_mask,
target_list=target_list,
mask_time_indices=None,
return_dict=False)
def compute_pred(proj_x, target, label_embs):
# compute logits for the i-th label set
y = torch.index_select(label_embs, 0, target.long())
negs = label_embs.unsqueeze(1).expand(-1, proj_x.size(0), -1)
# proj_x: (S, D)
# y: (S, D)
# negs: (Neg, S, D)
return self.compute_nce(proj_x, y, negs)
label_embs_list = self.label_embs_concat.split(self.num_classes, 0)
x, extra_losses, target_list, mask_indices, padding_mask = output[
0], output[-4], output[-3], output[-2], output[-1]
masked_indices = torch.logical_and(~padding_mask, mask_indices)
proj_x_m = self.final_proj(x[masked_indices])
proj_x_m_list = proj_x_m.chunk(len(target_list), dim=-1)
logp_m_list = [
compute_pred(proj_x_m, t[masked_indices], label_embs_list[i])
for i, (proj_x_m, t) in enumerate(zip(proj_x_m_list, target_list))
]
targ_m_list = [x.new_zeros(x.size(0), dtype=torch.long) for x in logp_m_list]
loss = 0.0
loss_m_list = []
for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)):
loss_m = F.cross_entropy(logp_m, targ_m)
loss_m_list.append(loss_m)
self.log(f"loss_m_{i}", loss_m.detach().item())
loss += self.hparams.pred_masked_weight * sum(loss_m_list)
loss_weights = self.hparams.loss_weights
if loss_weights is not None:
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
names = ['extra']
if len(loss_weights) == 1 and len(extra_losses) != 1:
loss_weights = [loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(
loss_weights
), f"{len(extra_losses)}, {len(loss_weights)}"
for p, n, coef in zip(extra_losses, names, loss_weights):
if coef != 0 and p is not None:
p = coef * p.float()
loss += p
self.log(f"loss_{n}", p.item())
return {'loss': loss}
def training_step(self, batch, batch_idx):
output = self(**batch)
self.log('train_loss', output['loss'])
return output
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float()) / y_true.size()[0]
return acc
def validation_step(self, batch, batch_idx):
output = self(**batch)
# self.log('val_loss', output.loss, sync_dist=True)
# acc = self.comput_metrix(output.logits, batch['labels'])
# self.log('val_acc', acc, sync_dist=True)
return output
def on_save_checkpoint(self, checkpoint) -> None:
# Save the current loop info in the mid of epoch
# if you lightning <= 1.6.0 uncomment the line below
# checkpoint['loops'] = self.trainer.checkpoint_connector._get_loops_state_dict()
if self.trainer.global_rank == 0:
self.model.save_pretrained(os.path.join(
self.trainer.checkpoint_callback.dirpath,
'hf_pretrained_epoch{}_step{}'.format(self.trainer.current_epoch, self.trainer.global_step)))
def on_load_checkpoint(self, checkpoint) -> None:
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
from fengshen.utils import UniversalCheckpoint
from fengshen.models.model_utils import add_module_args
args_parser = add_module_args(args_parser)
args_parser = datasets.add_data_specific_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = HubertLightning.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args_parser.add_argument('--ckpt_path', type=str, )
args = args_parser.parse_args()
data_module = UniversalDataModule(args=args, tokenizer=None, collate_fn=None)
data_loader = perpare_data(args)
data_module.datasets = data_loader.datasets
module = HubertLightning(args, loader=data_loader)
lr_monitor = LearningRateMonitor(logging_interval='step')
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'logs/'),
name=os.path.basename(os.path.dirname(args.model_path)))
checkpoint_callback = UniversalCheckpoint(args).callbacks
if args.ckpt_path is not None and \
not os.path.exists(args.ckpt_path):
print('--------warning no checkpoint found--------, remove args')
args.ckpt_path = None
trainer = Trainer.from_argparse_args(args,
logger=logger,
callbacks=[
lr_monitor,
checkpoint_callback])
trainer.fit(module, data_module, ckpt_path=args.ckpt_path)
| 11,643 | 39.430556 | 109 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pretrain_erlangshen_bert/pretrain_erlangshen.py
|
from dataclasses import dataclass
from transformers import (
MegatronBertConfig,
MegatronBertForPreTraining,
AutoTokenizer,
)
from pytorch_lightning import (
LightningModule,
Trainer,
)
from pytorch_lightning.callbacks import (
LearningRateMonitor,
)
import argparse
import torch
import os
import numpy as np
import time
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.data.data_utils.sop_utils import get_a_and_b_segments
from fengshen.data.data_utils.truncate_utils import truncate_segments
from fengshen.data.data_utils.token_type_utils import create_tokens_and_tokentypes
from fengshen.data.data_utils.mask_utils import create_masked_lm_predictions
from fengshen.models.model_utils import (
add_module_args,
configure_optimizers,
get_total_steps,
)
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from torch.utils.data._utils.collate import default_collate
SHOW_DATA = False
@dataclass
class ErLangShenCollator:
'''
由input处理成samples,也就是最终模型的输入
其中主要处理逻辑在__call__里
包含Mask和Sop任务
'''
tokenizer: None # 分词
max_seq_length: 512
masked_lm_prob: 0.15
content_key: str = 'text'
# 一些预处理操作
def setup(self):
from fengshen.data.data_utils.sentence_split import ChineseSentenceSplitter
self.sentence_split = ChineseSentenceSplitter()
self.np_rng = np.random.RandomState(seed=((int(time.time()) % 2**32)))
inv_vocab = {v: k for k, v in self.tokenizer.vocab.items()}
self.vocab_id_list = list(inv_vocab.keys())
self.vocab_id_to_token_dict = inv_vocab
def __call__(self, samples):
'''
samples: 一个sample长这样{"text": "hello world"}
'''
model_inputs = []
for s in samples:
sentences = self.sentence_split.tokenize(s[self.content_key])
# Divide sample into two segments (A and B).
tokenized_sentences = [self.tokenizer.convert_tokens_to_ids(
self.tokenizer.tokenize(sent)) for sent in sentences]
if len(tokenized_sentences) == 0:
print('find empty sentence')
continue
if len(tokenized_sentences) > 1:
tokens_a, tokens_b, is_next_random = get_a_and_b_segments(tokenized_sentences,
self.np_rng)
else:
tokens_a = tokenized_sentences[0]
tokens_b = []
is_next_random = False
# max_seq_length - 3因为还需要拼上[CLS] [SEP] [SEP]
if len(tokens_a) == 0:
continue
_ = truncate_segments(tokens_a, tokens_b, len(tokens_a),
len(tokens_b), self.max_seq_length-3, self.np_rng)
# Build tokens and toketypes.
tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, tokens_b,
self.tokenizer.cls_token_id, self.tokenizer.sep_token_id)
# Masking.
max_predictions_per_seq = self.masked_lm_prob * len(tokens)
(tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions(
tokens, self.vocab_id_list, self.vocab_id_to_token_dict, self.masked_lm_prob,
self.tokenizer.cls_token_id, self.tokenizer.sep_token_id, self.tokenizer.mask_token_id,
max_predictions_per_seq, self.np_rng,
masking_style='bert')
# Some checks.
num_tokens = len(tokens)
padding_length = self.max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [self.tokenizer.pad_token_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length,
dtype=np.int64)
# Lables and loss mask.
labels = [-100] * self.max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
labels_np = np.array(labels, dtype=np.int64)
model_inputs.append(
{
'input_ids': tokens_np,
'attention_mask': padding_mask_np,
'token_type_ids': tokentypes_np,
'labels': labels_np,
'next_sentence_label': int(is_next_random)
}
)
return default_collate(model_inputs)
class ErLangShenBert(LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('Erlangshen Bert')
parser.add_argument('--masked_lm_prob', type=float, default=0.15)
parser.add_argument('--max_seq_length', type=int, default=512)
parser.add_argument('--sample_content_key', type=str, default='text')
return parent_parser
def __init__(self, args, tokenizer, **kwargs) -> None:
super().__init__()
self.save_hyperparameters(args)
config = MegatronBertConfig.from_pretrained(args.model_path)
self.config = config
self.tokenizer = tokenizer
self.model = MegatronBertForPreTraining(config)
def setup(self, stage) -> None:
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
return configure_optimizers(self)
def forward(self, **batch):
return self.model(**batch)
def detokenize(self, token_ids):
toks = self.tokenizer.convert_ids_to_tokens(token_ids)
return self.tokenizer.convert_tokens_to_string(toks)
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.shape[0]
return acc
def training_step(self, batch, batch_idx):
if self.trainer.global_rank == 0:
global SHOW_DATA
if not SHOW_DATA:
print(self.config)
print(self.model)
SHOW_DATA = True
print('source: {}'.format(batch['input_ids'][0]))
print('target: {}'.format(batch['labels'][0]))
print('source: {}'.format(self.detokenize(batch['input_ids'][0])))
label_idx = batch['labels'][0] != -100
print('target: {}'.format(self.detokenize(
batch['labels'][0][label_idx])))
output = self(**batch)
self.log('train_loss', output.loss, sync_dist=True)
label_idx = batch['labels'] != -100
acc = self.comput_metrix(
output.prediction_logits[label_idx].view(-1, output.prediction_logits.size(-1)), batch['labels'][label_idx])
self.log('train_acc', acc, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx):
output = self(**batch)
self.log('val_loss', output.loss, sync_dist=True)
return output.loss
def on_load_checkpoint(self, checkpoint) -> None:
# 兼容低版本lightning,低版本lightning从ckpt起来时steps数会被重置为0
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = add_module_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = ErLangShenBert.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained(args.model_path)
collate_fn = ErLangShenCollator(
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
masked_lm_prob=args.masked_lm_prob,
content_key=args.sample_content_key,
)
collate_fn.setup()
data_module = UniversalDataModule(tokenizer=tokenizer, args=args, collate_fn=collate_fn)
print('data load complete')
model = ErLangShenBert(args, tokenizer=tokenizer)
print('model load complete')
lr_monitor = LearningRateMonitor(logging_interval='step')
checkpoint_callback = UniversalCheckpoint(args)
# 做兼容,如果目录不存在的话把这个参数去掉,不然会报错
if args.load_ckpt_path is not None and \
not os.path.exists(args.load_ckpt_path):
print('--------warning no checkpoint found--------, remove args')
args.load_ckpt_path = None
trainer = Trainer.from_argparse_args(args,
callbacks=[
lr_monitor,
checkpoint_callback])
trainer.fit(model, data_module, ckpt_path=args.load_ckpt_path)
| 9,575 | 39.235294 | 120 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/wenzhong_qa/finetune_medicalQA.py
|
from transformers import GPT2LMHeadModel
from data.task_dataloader.medicalQADataset import GPT2QADataModel
from transformers.optimization import get_linear_schedule_with_warmup
from pytorch_lightning import Trainer, loggers
from pytorch_lightning.callbacks import ModelCheckpoint
import pytorch_lightning as pl
import argparse
import torch
import os
import sys
sys.path.insert(0, '/cognitive_comp/wuziwei/codes/fengshen/fengshen')
# sys.path.append('../../')
# sys.path.append('../')
# os.environ["CUDA_VISIBLE_DEVICES"] = '4,5,6,7'
class GPT2FinetuneMedicalQAModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./ckpt/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_last', action='store_true', default=True)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=1000, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
# every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename,
save_last=args.save_last)
class GPT2FinetuneMedicalQA(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--learning_rate', default=1e-4, type=float)
parser.add_argument('--weight_decay', default=0.1, type=float)
parser.add_argument('--warmup', default=0.01, type=float)
return parent_args
def __init__(self, args, num_data):
super().__init__()
self.args = args
self.num_data = num_data
print('num_data:', num_data)
self.model = GPT2LMHeadModel.from_pretrained(
args.pretrained_model_path)
def setup(self, stage) -> None:
if stage == 'fit':
num_gpus = self.trainer.gpus if self.trainer.gpus is not None else 0
self.total_step = int(self.trainer.max_epochs * self.num_data /
(max(1, num_gpus) * self.trainer.accumulate_grad_batches))
print('Total training step:', self.total_step)
def training_step(self, batch, batch_idx):
output = self.model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'], labels=batch['labels'])
# output = self.model(input_ids=batch['input_ids'], labels=batch['labels'])
# acc = self.comput_metrix(output.logits, batch['labels'])
self.log('train_loss', output.loss)
return output.loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.size()[0]
return acc
def validation_step(self, batch, batch_idx):
output = self.model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'], labels=batch['labels'])
# output = self.model(input_ids=batch['input_ids'], labels=batch['labels'])
# acc = self.comput_metrix(output.logits, batch['labels'])
self.log('val_loss', output.loss)
# self.log('val_acc', acc)
def configure_optimizers(self):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
paras = list(
filter(lambda p: p[1].requires_grad, self.named_parameters()))
paras = [{
'params':
[p for n, p in paras if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay
}, {
'params': [p for n, p in paras if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}]
optimizer = torch.optim.AdamW(paras, lr=self.args.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, int(self.total_step * self.args.warmup),
self.total_step)
return [{
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'interval': 'step',
'frequency': 1
}
}]
def main():
total_parser = argparse.ArgumentParser("Summary Task")
total_parser.add_argument(
'--do_eval_only', action='store_true', default=False)
total_parser.add_argument(
'--pretrained_model_path', default=None, type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = GPT2QADataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = GPT2FinetuneMedicalQAModelCheckpoint.add_argparse_args(
total_parser)
total_parser = GPT2FinetuneMedicalQA.add_model_specific_args(total_parser)
# * Args for base model
args = total_parser.parse_args()
data_model = GPT2QADataModel(args)
if not args.do_eval_only:
model = GPT2FinetuneMedicalQA(args, len(data_model.train_dataloader()))
checkpoint_callback = GPT2FinetuneMedicalQAModelCheckpoint(
args).callbacks
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'log/'), name='MedicalQA-GPT2')
trainer = Trainer.from_argparse_args(args,
logger=logger,
callbacks=[checkpoint_callback]
)
trainer.fit(model, data_model)
# result = trainer.predict(model, data_model)
# with open('test_results.txt', 'wt', encoding='utf-8') as w:
# for line in result:
# w.writelines(line)
model.model.save_pretrained(
'/cognitive_comp/wuziwei/pretrained_model_hf')
else:
print('save to hf.....')
trainer = Trainer.from_argparse_args(args)
model = GPT2FinetuneMedicalQA(
args, len(data_model.predict_dataloader()))
result = trainer.predict(
model, data_model, ckpt_path='/cognitive_comp/wuziwei/task/fs_medical_qa_finetune/ckpt/last.ckpt')
# with open('test_results.txt','wt',encoding='utf-8') as w:
# for line in result:
# w.writelines(line)
model.model.save_pretrained(
'/cognitive_comp/wuziwei/pretrained_model_hf')
if __name__ == '__main__':
main()
| 7,423 | 40.943503 | 110 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/wenzhong_qa/finetune_wenzhong.py
|
# sys.path.append('./')
import os
import torch
import argparse
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import Trainer, loggers
from transformers.optimization import get_linear_schedule_with_warmup
from transformers import GPT2LMHeadModel
from fengshen.data.task_dataloader.medicalQADataset import GPT2QADataModel
class GPT2FinetuneMedicalQAModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./ckpt/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_last', action='store_true', default=True)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename,
save_last=args.save_last)
class GPT2FinetuneMedicalQA(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--learning_rate', default=1e-4, type=float)
parser.add_argument('--weight_decay', default=0.1, type=float)
parser.add_argument('--warmup', default=0.01, type=float)
return parent_args
def __init__(self, args, num_data):
super().__init__()
self.args = args
self.num_data = num_data
print('num_data:', num_data)
self.model = GPT2LMHeadModel.from_pretrained(args.pretrained_model_path)
def setup(self, stage) -> None:
if stage == 'fit':
num_gpus = self.trainer.gpus if self.trainer.gpus is not None else 0
self.total_step = int(self.trainer.max_epochs * self.num_data
/ (max(1, num_gpus) * self.trainer.accumulate_grad_batches))
print('Total training step:', self.total_step)
def training_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'], attention_mask=batch['attention_mask'], labels=batch['labels'])
# output = self.model(input_ids=batch['input_ids'], labels=batch['labels'])
# acc = self.comput_metrix(output.logits, batch['labels'])
self.log('train_loss', output.loss)
return output.loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float()) / labels.size()[0]
return acc
def validation_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'], attention_mask=batch['attention_mask'], labels=batch['labels'])
# output = self.model(input_ids=batch['input_ids'], labels=batch['labels'])
# acc = self.comput_metrix(output.logits, batch['labels'])
self.log('val_loss', output.loss)
# self.log('val_acc', acc)
def configure_optimizers(self):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
paras = list(
filter(lambda p: p[1].requires_grad, self.named_parameters()))
paras = [{
'params':
[p for n, p in paras if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay
}, {
'params': [p for n, p in paras if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}]
optimizer = torch.optim.AdamW(paras, lr=self.args.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, int(self.total_step * self.args.warmup),
self.total_step)
return [{
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'interval': 'step',
'frequency': 1
}
}]
def main():
total_parser = argparse.ArgumentParser("QA Task")
total_parser.add_argument('--do_eval_only', action='store_true', default=False)
total_parser.add_argument('--pretrained_model_path', default='google/mt5-small', type=str)
total_parser.add_argument('--output_save_path', default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = GPT2QADataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = GPT2FinetuneMedicalQAModelCheckpoint.add_argparse_args(total_parser)
total_parser = GPT2FinetuneMedicalQA.add_model_specific_args(total_parser)
# * Args for base model
args = total_parser.parse_args()
data_model = GPT2QADataModel(args)
if not args.do_eval_only:
model = GPT2FinetuneMedicalQA(args, len(data_model.train_dataloader()))
checkpoint_callback = GPT2FinetuneMedicalQAModelCheckpoint(args).callbacks
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'log/'), name='WenZhong')
trainer = Trainer.from_argparse_args(args,
logger=logger,
callbacks=[checkpoint_callback]
)
trainer.fit(model, data_model)
if __name__ == '__main__':
main()
# test()
'''
# python examples/mt5_summary.py --gpus=1 --test_data=test_public.jsonl
# --default_root_dir=/cognitive_comp/ganruyi/fengshen/mt5_summary/eval
# --do_eval_only
# --resume_from_checkpoint=/cognitive_comp/ganruyi/fengshen/mt5_summary/ckpt/model-epoch=01-train_loss=1.9166.ckpt
# --strategy=ddp
'''
| 6,611 | 41.935065 | 114 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/finetune_bart_qg/utils.py
|
# -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : utils.py
@Time : 2022/10/28 18:27
@Author : Qi Yang
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn.functional as F
class LabelSmoothingCrossEntropy(torch.nn.Module):
def __init__(self, smoothing=0.1):
super(LabelSmoothingCrossEntropy, self).__init__()
self.smoothing = smoothing
self.ignore_index = -100
def forward(self, x, target):
confidence = 1. - self.smoothing
logprobs = F.log_softmax(x, dim=-1)
targets_ignore = torch.where(target != self.ignore_index, target, 0)
nll_loss = -logprobs.gather(dim=-1, index=targets_ignore.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
def truncate_sequence(document: str, max_num_tokens: int, reverse=False):
total_length = len(document)
if total_length <= max_num_tokens:
return document
else:
if reverse:
return document[-1*max_num_tokens:]
else:
return document[:max_num_tokens]
def padding_to_maxlength(ids, max_length, pad_id):
cur_len = len(ids)
len_diff = max_length - len(ids)
return ids + [pad_id] * len_diff, [1] * cur_len + [0] * len_diff
def white_space_fix(text):
return "".join(text.split(" "))
def remove_prompt(text):
if ":" in text:
return text.split(":")[1]
return text
| 2,208 | 30.112676 | 96 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/finetune_bart_qg/finetune_bart.py
|
# -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : finetune_bart.py
@Time : 2022/10/28 18:23
@Author : Qi Yang
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
from fengshen.models.model_utils import configure_optimizers
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.utils import chinese_char_tokenize
from utils import truncate_sequence, white_space_fix
from utils import LabelSmoothingCrossEntropy
import sys
import os
import torch
import argparse
import pytorch_lightning as pl
from dataclasses import dataclass
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor
from transformers import BartForConditionalGeneration
from transformers import BertTokenizer, AutoTokenizer
from torchmetrics.text.rouge import ROUGEScore
sys.path.append('../../../')
@dataclass
class QGT5Collator:
@ staticmethod
def add_data_specific_args(parent_args):
# the hyperparameters should be determined according to the max length of context in dataset
parser = parent_args.add_argument_group('BART DIalo Collator')
parser.add_argument('--max_seq_length', default=512, type=int)
parser.add_argument('--max_src_length', default=32, type=int)
parser.add_argument('--max_kno_length', default=416, type=int)
parser.add_argument('--max_tgt_length', default=64, type=int)
parser.add_argument('--mask_ans_style',
default='normal',
type=str,
choices=['normal', 'unmask', 'anstoken', 'postag', 'anstoken_multispan', 'postag_multispan', 'normal_multispan'])
return parent_args
def __init__(self, tokenizer, args):
self.args = args
self.tokenizer = tokenizer
self.max_seq_length = args.max_seq_length
self.print_example = True
self.mask_ans_style = args.mask_ans_style
self.do_eval_only = args.do_eval_only
self.tokenizer_type = args.tokenizer_type
def encode(self, x, y):
if self.tokenizer_type == "bert":
x = x
y = y
else:
# t5 sentence piece
x = self.tokenizer.bos_token + x + self.tokenizer.eos_token
y = y + self.tokenizer.eos_token
encoder_input = self.tokenizer.encode_plus(
x,
max_length=self.args.max_kno_length + self.args.max_src_length,
padding="max_length",
truncation=True,
return_tensors='pt'
)
decoder_output = self.tokenizer.encode_plus(
y,
max_length=self.args.max_tgt_length,
padding="max_length",
truncation=True,
return_tensors='pt'
)
return encoder_input, decoder_output
def mask(self, s):
def replace_span(source, target, sptoken):
ans_bos, ans_eos = s["ans_span"][0]
return source[:ans_bos] + sptoken + source[ans_eos:]
def replace_all(source, target, sptoken):
return source.replace(target, sptoken)
if 'multispan' in self.mask_ans_style:
fn = replace_all
else:
fn = replace_span
# unmask: 北京是中国的首都
if 'unmask' in self.mask_ans_style:
return s["context"]
# normal: 北京是 <mask> 的首都
if 'normal' in self.mask_ans_style:
self.anstoken = self.tokenizer.mask_token
masked_context = fn(s["context"], s["answer"][0], self.anstoken)
return masked_context
# anstoken: 北京是 [ANS] 的首都
if 'anstoken' in self.mask_ans_style:
anstoken_dict = {
"bert": "[ANS]",
"bart": "<ans>"
}
self.anstoken = anstoken_dict[self.tokenizer_type]
masked_context = fn(s["context"], s["answer"][0], self.anstoken)
return masked_context
# postag: 北京是 <beg> 中国 <eos> 的首都
if 'postag' in self.mask_ans_style:
begtoken, endtoken = "<beg>", "<eos>"
self.anstoken = begtoken + s["answer"][0] + endtoken
masked_context = fn(s["context"], s["answer"][0], self.anstoken)
return masked_context
return masked_context
def prompt(self, context, answer, question):
pre_prompt, mid_prompt, post_prompt = "知识:", "回答:", "问题:" # prompt
context = truncate_sequence(context, self.args.max_kno_length-len(pre_prompt)-1)
# used in squad-2.0
# noted that src and tgt is reversed in qg
answer = truncate_sequence(answer, self.args.max_src_length - len(mid_prompt)-1)
question = truncate_sequence(question, self.args.max_tgt_length-len(post_prompt)-1)
x_trunc = f'{pre_prompt}{context}{mid_prompt}{answer}'
y_trunc = f'{post_prompt}{question}'
return x_trunc, y_trunc
def __call__(self, samples):
"""
ans_num = 1 适用于 Train 数据只有 1 条 answer 取第一条情况
ans_num > 1 适用于 Dev 数据有多条 answer 情况
Input:
input_ids: input_ids (text + answer)
attn_mask: input attn mask
labels: decoder_ids (question)
"""
input_ids, attn_mask, labels = [], [], []
ans, qes, ctx, ans_spans, idxs, imp = [], [], [], [], [], []
for s in samples:
if self.do_eval_only:
# log origin answer to compare
ans.append(s["answer"])
qes.append(s["question"])
ctx.append(s["context"])
ans_spans.append(s["ans_span"])
idxs.append(s["idx"])
if "is_impossible" in s:
imp.append(s["is_impossible"])
else:
imp.append(False) # SQUAD 1.0 don't have is_impossible
if not s["is_impossible"]: # have ans and ans_span
context = self.mask(s)
answer = s["answer"][0]
question = s["question"]
else: # no ans and ans_span
context = s["context"]
answer = "无答案"
question = s["question"]
x_trunc, y_trunc = self.prompt(context, answer, question)
encoder_input, decoder_output = self.encode(x_trunc, y_trunc)
input_ids.append(encoder_input["input_ids"])
attn_mask.append(encoder_input["attention_mask"])
labels.append(decoder_output["input_ids"])
labels = torch.cat(labels)
if self.tokenizer_type == "bart":
end_token_index = torch.where(labels == self.tokenizer.eos_token_id)[1]
else:
end_token_index = torch.where(labels == self.tokenizer.sep_token_id)[1]
for idx, end_idx in enumerate(end_token_index):
labels[idx][end_idx + 1:] = -100 # cross entropy cal
data = {
'input_ids': torch.cat(input_ids),
'attention_mask': torch.cat(attn_mask),
'labels': labels
}
if self.do_eval_only:
data.update({
'answer': ans,
'question': qes,
'context': ctx,
'ans_span': ans_spans,
'idx': idxs,
'is_impossible': imp
})
if self.print_example:
print(x_trunc)
print(y_trunc)
self.print_example = False
return data
class BARTFinetuneModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--model_path', type=str, default='')
parser.add_argument('--learning_rate', default=1e-5, type=float)
parser.add_argument('--min_learning_rate', default=1e-7, type=float)
parser.add_argument('--lr_decay_steps', default=0, type=int)
parser.add_argument('--lr_decay_ratio', default=1.0, type=float)
parser.add_argument('--weight_decay', default=0.1, type=float)
parser.add_argument('--warmup_steps', default=1000, type=int)
parser.add_argument('--warmup_ratio', default=0.01, type=float)
parser.add_argument('--label_smooth', default=0, type=float)
parser.add_argument('--new_token_path', default="./", type=str) # save new token after add special token
parser.add_argument('--adam_beta1', default=0.9, type=float)
parser.add_argument('--adam_beta2', default=0.999, type=float)
parser.add_argument('--adam_epsilon', default=1e-8, type=float)
parser.add_argument('--scheduler_type', default='polynomial', type=str)
return parent_args
def __init__(self, tokenizer, args):
super().__init__()
self.save_hyperparameters(args)
self.model = BartForConditionalGeneration.from_pretrained(args.model_path)
self.tokenizer = tokenizer
# add special token ans
# self.tokenizer.save_vocabulary(self.args.model_path)
new_vocab = args.model_path+"/sp_vocab/"
if not os.path.exists(new_vocab):
os.makedirs(new_vocab)
self.tokenizer.save_pretrained(new_vocab)
self.model.resize_token_embeddings(len(tokenizer))
self.vocab_size = len(tokenizer)
self.rougescore = ROUGEScore(rouge_keys=('rougeL'), normalizer=lambda x: x)
if self.hparams.label_smooth:
self.loss_fct = LabelSmoothingCrossEntropy(smoothing=0.1)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs)
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
return configure_optimizers(self)
def training_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'])
loss = output.loss
if self.hparams.label_smooth:
loss = self.loss_fct(output.logits.view(-1, self.vocab_size), batch["labels"].view(-1))
self.log('train_loss', loss, sync_dist=True)
return loss
def validation_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'])
acc = self.compute_acc(output.logits, batch['labels'])
self.log('val_loss', output.loss, sync_dist=True)
self.log('val_acc', acc, sync_dist=True)
self.log('val_ppl', torch.exp(output.loss), sync_dist=True)
cond_output = self.model.generate(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
do_sample=True,
num_beams=5,
early_stopping=True,
max_length=64,
top_p=0.9,
)
batch_label = torch.where(batch["labels"] != -100, batch["labels"], self.tokenizer.pad_token_id)
pred = self.tokenizer.batch_decode(cond_output, clean_up_tokenization_spaces=True, skip_special_tokens=True)
ques = self.tokenizer.batch_decode(batch_label, clean_up_tokenization_spaces=True, skip_special_tokens=True)
pred = [chinese_char_tokenize(white_space_fix(p)) for p in pred]
ques = [chinese_char_tokenize(white_space_fix(q)) for q in ques]
self.rougescore.update(pred, ques)
return pred
def validation_epoch_end(self, validation_step_outputs):
rouge = self.rougescore.compute()
self.log('val_rouge', rouge["rougeL_fmeasure"], sync_dist=True)
def on_predict_start(self):
self.loss_fct = torch.nn.CrossEntropyLoss(reduction='none')
def predict_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'])
loss_tensor = self.loss_fct(output.logits.transpose(1, 2), batch["labels"])
if self.hparams.tokenizer_type == 'bart':
eos_index = torch.where(batch['labels'] == self.tokenizer.eos_token_id)[1]
elif self.hparams.tokenizer_type == 'bert':
eos_index = torch.where(batch['labels'] == self.tokenizer.sep_token_id)[1]
loss = torch.sum(loss_tensor, dim=1) / eos_index
with torch.no_grad():
cond_output = self.model.generate(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
do_sample=True,
num_beams=5,
max_length=64,
top_p=0.9,
output_scores=True,
return_dict_in_generate=True
)
pred = self.tokenizer.batch_decode(
cond_output.sequences, clean_up_tokenization_spaces=True, skip_special_tokens=True) # ['sequences']
pred = [white_space_fix(p) for p in pred] # remove prompt and white space
score = cond_output.sequences_scores
return pred, score, loss
def compute_acc(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/y_true.shape[0]
return acc
def on_save_checkpoint(self, checkpoint) -> None:
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
self.model.save_pretrained(os.path.join(
self.trainer.checkpoint_callback.dirpath,
'hf_pretrained_epoch{}_step{}'.format(checkpoint['epoch'], checkpoint['global_step'])))
def on_load_checkpoint(self, checkpoint) -> None:
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
def get_tokenizer(tokenizer_type, pretrained_model_path):
if tokenizer_type == 'bart':
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_path, use_fast=False, additional_special_tokens=["<ans>", "<beg>", "<end>"])
print(len(tokenizer))
elif tokenizer_type == 'bert':
tokenizer = BertTokenizer.from_pretrained(
pretrained_model_path, use_fast=False, additional_special_tokens=["[ANS]"])
return tokenizer
def main():
total_parser = argparse.ArgumentParser("Finetune BART for QG")
total_parser.add_argument('--do_eval_only', action='store_true', default=False)
total_parser.add_argument('--tokenizer_type', type=str, default="bart", choices=['bart', 'bert'])
total_parser.add_argument('--tensorboard_dir', type=str, default="bart")
total_parser.add_argument('--deepspeed')
total_parser = UniversalDataModule.add_data_specific_args(total_parser)
total_parser = QGT5Collator.add_data_specific_args(total_parser)
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = BARTFinetuneModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
tokenizer = get_tokenizer(args.tokenizer_type, args.model_path)
collator = QGT5Collator(tokenizer=tokenizer, args=args)
data_model = UniversalDataModule(collate_fn=collator, tokenizer=tokenizer, args=args)
print("Data load complete...")
if args.deepspeed is not None:
os.environ['PL_DEEPSPEED_CONFIG_PATH'] = args.deepspeed
model = BARTFinetuneModel(tokenizer, args)
checkpoint_callback = UniversalCheckpoint(args)
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = Trainer.from_argparse_args(args,
callbacks=[checkpoint_callback, lr_monitor]
)
if not args.do_eval_only:
trainer.fit(model, data_model)
if __name__ == '__main__':
main()
| 17,301 | 39.237209 | 141 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/zen1_finetune/fengshen_token_level_ft_task.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fengshen.models.zen1.ngram_utils import ZenNgramDict
from fengshen.models.zen1.modeling import ZenForTokenClassification
from fengshen.metric.metric import SeqEntityScore
from fengshen.models.zen1.tokenization import BertTokenizer
from random import shuffle
from pytorch_lightning.callbacks import LearningRateMonitor
from dataclasses import dataclass
import logging
import math
import numpy as np
import os
import json
import torch
import pytorch_lightning as pl
import argparse
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.ERROR)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, ngram_ids, ngram_positions, ngram_lengths,
ngram_tuples, ngram_seg_ids, ngram_masks, valid_ids=None, label_mask=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.valid_ids = valid_ids
self.label_mask = label_mask
self.ngram_ids = ngram_ids
self.ngram_positions = ngram_positions
self.ngram_lengths = ngram_lengths
self.ngram_tuples = ngram_tuples
self.ngram_seg_ids = ngram_seg_ids
self.ngram_masks = ngram_masks
def convert_examples_to_features(examples, label_map, max_seq_length, tokenizer, ngram_dict):
"""Loads a data file into a list of `InputBatch`s."""
# label_map = {label: i for i, label in enumerate(label_list, 1)}
features = []
for (ex_index, example) in enumerate(examples):
textlist = example.text_a
labellist = example.label
tokens = []
labels = []
valid = []
label_mask = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
valid.append(1)
label_mask.append(1)
else:
valid.append(0)
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
valid = valid[0:(max_seq_length - 2)]
label_mask = label_mask[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
valid.insert(0, 1)
label_mask.insert(0, 1)
label_ids.append(label_map["[CLS]"])
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
if len(labels) > i:
label_ids.append(label_map[labels[i]])
ntokens.append("[SEP]")
segment_ids.append(0)
valid.append(1)
label_mask.append(1)
label_ids.append(label_map["[SEP]"])
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
label_mask = [1] * len(label_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
valid.append(1)
label_mask.append(0)
while len(label_ids) < max_seq_length:
label_ids.append(0)
label_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(valid) == max_seq_length
assert len(label_mask) == max_seq_length
# ----------- code for ngram BEGIN-----------
ngram_matches = []
# Filter the ngram segment from 2 to 7 to check whether there is a ngram
for p in range(2, 8):
for q in range(0, len(tokens) - p + 1):
character_segment = tokens[q:q + p]
# j is the starting position of the ngram
# i is the length of the current ngram
character_segment = tuple(character_segment)
if character_segment in ngram_dict.ngram_to_id_dict:
ngram_index = ngram_dict.ngram_to_id_dict[character_segment]
ngram_matches.append([ngram_index, q, p, character_segment])
shuffle(ngram_matches)
max_ngram_in_seq_proportion = math.ceil((len(tokens) / max_seq_length) * ngram_dict.max_ngram_in_seq)
if len(ngram_matches) > max_ngram_in_seq_proportion:
ngram_matches = ngram_matches[:max_ngram_in_seq_proportion]
ngram_ids = [ngram[0] for ngram in ngram_matches]
ngram_positions = [ngram[1] for ngram in ngram_matches]
ngram_lengths = [ngram[2] for ngram in ngram_matches]
ngram_tuples = [ngram[3] for ngram in ngram_matches]
ngram_seg_ids = [0 if position < (len(tokens) + 2) else 1 for position in ngram_positions]
ngram_mask_array = np.zeros(ngram_dict.max_ngram_in_seq, dtype=np.bool)
ngram_mask_array[:len(ngram_ids)] = 1
# record the masked positions
ngram_positions_matrix = np.zeros(shape=(max_seq_length, ngram_dict.max_ngram_in_seq), dtype=np.int32)
for i in range(len(ngram_ids)):
ngram_positions_matrix[ngram_positions[i]:ngram_positions[i] + ngram_lengths[i], i] = 1.0
# Zero-pad up to the max ngram in seq length.
padding = [0] * (ngram_dict.max_ngram_in_seq - len(ngram_ids))
ngram_ids += padding
ngram_lengths += padding
ngram_seg_ids += padding
# ----------- code for ngram END-----------
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids,
ngram_ids=ngram_ids,
ngram_positions=ngram_positions_matrix,
ngram_lengths=ngram_lengths,
ngram_tuples=ngram_tuples,
ngram_seg_ids=ngram_seg_ids,
ngram_masks=ngram_mask_array,
valid_ids=valid,
label_mask=label_mask))
return features
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_examples(self, data_path, set_type, quotechar=' '):
"""See base class."""
return self._create_examples(
self._read_tsv(data_path, self.get_quotechar()), set_type)
def _create_examples(self, lines, set_type):
examples = []
for i, (sentence, label) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = sentence
label = label
examples.append(InputExample(guid=guid, text_a=text_a, label=label))
return examples
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def get_quotechar(self):
return ' '
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
'''
read file
return format :
[ ['EU', 'B-ORG'], ['rejects', 'O'], ['German', 'B-MISC'], ['call', 'O'], ['to', 'O'], ['boycott', 'O'], ['British', 'B-MISC'], ['lamb', 'O'], ['.', 'O'] ]
'''
f = open(input_file)
data = []
sentence = []
label = []
for line in f:
if len(line) == 0 or line.startswith('-DOCSTART') or line[0] == "\n":
if len(sentence) > 0:
data.append((sentence, label))
sentence = []
label = []
continue
splits = line.split(quotechar)
sentence.append(splits[0])
label.append(splits[-1][:-1])
if len(sentence) > 0:
data.append((sentence, label))
sentence = []
label = []
return data
class MSRAProcessor(DataProcessor):
"""Processor for the msra data set."""
def get_labels(self):
return ['B-NR', 'B-NS', 'B-NT', 'E-NR', 'E-NS', 'E-NT', 'M-NR',
'M-NS', 'M-NT', 'O', 'S-NR', 'S-NS', 'S-NT', '[CLS]', '[SEP]']
class OntoNotes4Processor(DataProcessor):
"""Processor for the OntoNotes4 data set."""
def get_labels(self):
return ['B-GPE', 'B-LOC', 'B-ORG', 'B-PER', 'E-GPE', 'E-LOC',
'E-ORG', 'E-PER', 'M-GPE', 'M-LOC', 'M-ORG', 'M-PER', 'O',
'S-GPE', 'S-LOC', 'S-ORG', 'S-PER', '[CLS]', '[SEP]']
class WeiboProcessor(DataProcessor):
"""Processor for the Weibo data set."""
def get_labels(self):
return ['B-GPE.NAM', 'B-GPE.NOM', 'B-LOC.NAM', 'B-LOC.NOM',
'B-ORG.NAM', 'B-ORG.NOM', 'B-PER.NAM', 'B-PER.NOM', 'E-GPE.NAM',
'E-GPE.NOM', 'E-LOC.NAM', 'E-LOC.NOM', 'E-ORG.NAM', 'E-ORG.NOM',
'E-PER.NAM', 'E-PER.NOM', 'M-GPE.NAM', 'M-LOC.NAM', 'M-LOC.NOM',
'M-ORG.NAM', 'M-ORG.NOM', 'M-PER.NAM', 'M-PER.NOM', 'O',
'S-GPE.NAM', 'S-LOC.NOM', 'S-PER.NAM', 'S-PER.NOM', '[CLS]', '[SEP]']
class ResumeProcessor(DataProcessor):
"""Processor for the resume data set."""
def get_labels(self):
return ['B-CONT', 'B-EDU', 'B-LOC', 'B-NAME', 'B-ORG', 'B-PRO',
'B-RACE', 'B-TITLE', 'E-CONT', 'E-EDU', 'E-LOC', 'E-NAME',
'E-ORG', 'E-PRO', 'E-RACE', 'E-TITLE', 'M-CONT', 'M-EDU',
'M-LOC', 'M-NAME', 'M-ORG', 'M-PRO', 'M-RACE', 'M-TITLE',
'O', 'S-NAME', 'S-ORG', 'S-RACE', '[CLS]', '[SEP]']
class CMeEEProcessor(DataProcessor):
"""Processor for the CMeEE data set."""
def get_quotechar(self):
return '\t'
def get_labels(self):
return ['B-临床表现', 'B-医学检验项目', 'B-医疗程序', 'B-医疗设备',
'B-微生物类', 'B-疾病', 'B-科室', 'B-药物', 'B-身体', 'I-临床表现',
'I-医学检验项目', 'I-医疗程序', 'I-医疗设备', 'I-微生物类',
'I-疾病', 'I-科室', 'I-药物', 'I-身体', 'O', '[CLS]', '[SEP]']
class CLUENERProcessor(DataProcessor):
"""Processor for the CLUENER data set."""
def get_quotechar(self):
return '\t'
def get_labels(self):
return ['B-书名', 'B-公司', 'B-地址', 'B-姓名', 'B-政府', 'B-景点',
'B-游戏', 'B-电影', 'B-组织机构', 'B-职位', 'I-书名', 'I-公司',
'I-地址', 'I-姓名', 'I-政府', 'I-景点', 'I-游戏', 'I-电影',
'I-组织机构', 'I-职位', 'O', '[CLS]', '[SEP]']
class TaskDataset(Dataset):
def __init__(self, data_path, processor, mode='train'):
super().__init__()
self.data = self.load_data(data_path, processor, mode)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, data_path, processor, mode):
if mode == "train":
examples = processor.get_examples(data_path, mode)
elif mode == "test":
examples = processor.get_examples(data_path, mode)
elif mode == "dev":
examples = processor.get_examples(data_path, mode)
return examples
@dataclass
class TaskCollator:
args = None
tokenizer = None
ngram_dict = None
label2id = None
def __call__(self, samples):
features = convert_examples_to_features(samples, self.label2id, self.args.max_seq_length, self.tokenizer, self.ngram_dict)
# logger.info(" Num examples = %d", len(samples))
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
valid_ids = torch.tensor([f.valid_ids for f in features], dtype=torch.long)
ngram_ids = torch.tensor([f.ngram_ids for f in features], dtype=torch.long)
ngram_positions = torch.tensor([f.ngram_positions for f in features], dtype=torch.long)
# ngram_lengths = torch.tensor([f.ngram_lengths for f in features], dtype=torch.long)
# ngram_seg_ids = torch.tensor([f.ngram_seg_ids for f in features], dtype=torch.long)
# ngram_masks = torch.tensor([f.ngram_masks for f in features], dtype=torch.long)
# label_mask = torch.tensor([f.label_mask for f in features], dtype=torch.long)
return {
'input_ids': input_ids,
'ngram_ids': ngram_ids,
'ngram_positions': ngram_positions,
'attention_mask': input_mask,
'token_type_ids': segment_ids,
'labels': label_ids,
'valid_ids': valid_ids,
}
class TaskDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--data_dir', default='./data', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_data', default='train.json', type=str)
parser.add_argument('--valid_data', default='dev.json', type=str)
parser.add_argument('--test_data', default='test.json', type=str)
parser.add_argument('--train_batchsize', default=16, type=int)
parser.add_argument('--valid_batchsize', default=32, type=int)
parser.add_argument('--max_seq_length', default=128, type=int)
parser.add_argument('--texta_name', default='text', type=str)
parser.add_argument('--textb_name', default='sentence2', type=str)
parser.add_argument('--label_name', default='label', type=str)
parser.add_argument('--id_name', default='id', type=str)
parser.add_argument('--dataset_name', default=None, type=str)
parser.add_argument('--vocab_file',
type=str, default=None,
help="Vocabulary mapping/file BERT was pretrainined on")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--task_name', default='weibo', type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
self.collator = TaskCollator()
self.collator.args = args
self.collator.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path, do_lower_case=args.do_lower_case)
self.collator.ngram_dict = ZenNgramDict.from_pretrained(args.pretrained_model_path, tokenizer=self.collator.tokenizer)
processors = {
'weibo': WeiboProcessor,
'resume': ResumeProcessor,
'msra': MSRAProcessor,
'ontonotes4': OntoNotes4Processor,
'cmeee': CMeEEProcessor,
'cluener': CLUENERProcessor,
}
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
# 生成id映射
label_list = processor.get_labels()
label2id = {label: i for i, label in enumerate(label_list, 1)}
label2id["[PAD]"] = 0
self.id2label = {v: k for k, v in label2id.items()}
self.collator.label2id = label2id
if args.dataset_name is None:
self.train_data = TaskDataset(os.path.join(
args.data_dir, args.train_data), processor, mode='train')
self.valid_data = TaskDataset(os.path.join(
args.data_dir, args.valid_data), processor, mode='dev')
self.test_data = TaskDataset(os.path.join(
args.data_dir, args.test_data), processor, mode='test')
else:
import datasets
ds = datasets.load_dataset(args.dataset_name)
self.train_data = ds['train']
self.valid_data = ds['validation']
self.test_data = ds['test']
self.save_hyperparameters(args)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False,
collate_fn=self.collator)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def predict_dataloader(self):
return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
class LitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--markup', default='bios', type=str)
parser.add_argument('--middle_prefix', default='I-', type=str)
return parent_args
def __init__(self, args, id2label):
super().__init__()
# config = ZenConfig(os.path.join(args.pretrained_model_path, 'config.json'))
self.model = ZenForTokenClassification.from_pretrained(args.pretrained_model_path, num_labels=len(id2label))
self.seq_entity_score = SeqEntityScore(id2label, markup=args.markup, middle_prefix=args.middle_prefix)
self.train_seq_entity_score = SeqEntityScore(id2label, markup=args.markup, middle_prefix=args.middle_prefix)
self.id2label = id2label
self.label2id = {v: k for k, v in id2label.items()}
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss, _ = outputs
# logits = outputs.logits
# preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
# preds = preds.detach().cpu().numpy()
# labels = batch['labels'].detach().cpu().numpy()
# num_labels = len(self.label2id)
# y_true = []
# y_pred = []
# for i, label in enumerate(labels):
# temp_1 = []
# temp_2 = []
# for j, m in enumerate(label):
# if j == 0:
# continue
# elif labels[i][j] == num_labels - 1:
# y_true.append(temp_1)
# y_pred.append(temp_2)
# break
# else:
# temp_1.append(self.id2label[labels[i][j]])
# temp_2.append(self.id2label[preds[i][j]])
# self.train_seq_entity_score.update(y_true, y_pred)
# result = self.train_seq_entity_score.result()
# self.train_seq_entity_score.reset()
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss, logits = outputs
preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
preds = preds.detach().cpu().numpy()
labels = batch['labels'].detach().cpu().numpy()
num_labels = len(self.label2id)
y_true = []
y_pred = []
for i, label in enumerate(labels):
temp_1 = []
temp_2 = []
for j, m in enumerate(label):
if j == 0:
continue
elif labels[i][j] == num_labels - 1:
y_true.append(temp_1)
y_pred.append(temp_2)
break
else:
temp_1.append(self.id2label[labels[i][j]])
temp_2.append(self.id2label[preds[i][j]])
self.seq_entity_score.update(y_true, y_pred)
self.log('val_loss', loss)
def validation_epoch_end(self, outputs):
# compute metric for all process
score_dict, _ = self.seq_entity_score.result()
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
print('score_dict:\n', score_dict)
# reset the metric after once validation
self.seq_entity_score.reset()
for k, v in score_dict.items():
self.log('val_{}'.format(k), v)
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
class TaskModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./log/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename)
def save_test(data, args, data_model):
with open(args.output_save_path, 'w', encoding='utf-8') as f:
idx = 0
for i in range(len(data)):
batch = data[i]
for sample in batch:
tmp_result = dict()
label_id = np.argmax(sample.numpy())
tmp_result['id'] = data_model.test_data.data[idx]['id']
tmp_result['label'] = data_model.id2label[label_id]
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data+'\n')
idx += 1
print('save the result to '+args.output_save_path)
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = TaskDataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = pl.Trainer.add_argparse_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
# * Args for base model
from fengshen.models.model_utils import add_module_args
total_parser = add_module_args(total_parser)
total_parser = LitModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
checkpoint_callback = TaskModelCheckpoint(args).callbacks
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(args,
callbacks=[checkpoint_callback, lr_monitor]
)
data_model = TaskDataModel(args)
id2label = data_model.id2label
print('id2label:', id2label)
model = LitModel(args, id2label)
trainer.fit(model, data_model)
if __name__ == "__main__":
main()
| 26,317 | 39.614198 | 163 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/zen1_finetune/fengshen_sequence_level_ft_task.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fengshen.models.zen1.tokenization import BertTokenizer
from fengshen.models.zen1.modeling import ZenForSequenceClassification
from fengshen.models.zen1.ngram_utils import ZenNgramDict
from pytorch_lightning.callbacks import LearningRateMonitor
import csv
from dataclasses import dataclass
import logging
import math
import numpy as np
import os
from tqdm import tqdm
import json
import torch
import pytorch_lightning as pl
from random import shuffle
import argparse
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.utils.data import Dataset, DataLoader
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, ngram_ids, ngram_positions, ngram_lengths,
ngram_tuples, ngram_seg_ids, ngram_masks):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.ngram_ids = ngram_ids
self.ngram_positions = ngram_positions
self.ngram_lengths = ngram_lengths
self.ngram_tuples = ngram_tuples
self.ngram_seg_ids = ngram_seg_ids
self.ngram_masks = ngram_masks
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_examples(self, data_path, mode):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
# if sys.version_info[0] == 2:
# line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
@classmethod
def _read_json(cls, input_file):
"""Reads a jsonl file."""
with open(input_file, "r", encoding="utf-8") as f:
lines = f.readlines()
samples = []
for line in tqdm(lines):
data = json.loads(line)
samples.append(data)
return samples
class TnewsProcessor(DataProcessor):
"""Processor for the tnews data set (HIT version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_examples(self, data_path, mode):
return self._create_examples(
self._read_json(data_path),
set_type=mode
)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
# text_a = line[0]
text_a = line['sentence']
label = line['label'] if 'label' in line.keys() else None
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class OcnliProcessor(DataProcessor):
"""Processor for the ocnli or cmnli data set (HIT version)."""
def get_examples(self, data_path, mode):
return self._create_examples(
self._read_json(data_path),
set_type=mode
)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
# text_a = line[0]
text_a = line['sentence1']
text_b = line['sentence2']
label = line['label'] if 'label' in line.keys() else None
# 特殊处理,cmnli有label为-的
if label == '-':
label = None
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class IflytekProcessor(DataProcessor):
"""Processor for the iflytek data set (HIT version)."""
def get_examples(self, data_path, mode):
return self._create_examples(
self._read_json(data_path),
set_type=mode
)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
# text_a = line[0]
text_a = line['sentence']
label = line['label'] if 'label' in line.keys() else None
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
def convert_examples_to_features(examples, label_map, max_seq_length, tokenizer, ngram_dict):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
# ----------- code for ngram BEGIN-----------
ngram_matches = []
# Filter the word segment from 2 to 7 to check whether there is a word
for p in range(2, 8):
for q in range(0, len(tokens) - p + 1):
character_segment = tokens[q:q + p]
# j is the starting position of the word
# i is the length of the current word
character_segment = tuple(character_segment)
if character_segment in ngram_dict.ngram_to_id_dict:
ngram_index = ngram_dict.ngram_to_id_dict[character_segment]
ngram_matches.append([ngram_index, q, p, character_segment])
shuffle(ngram_matches)
# max_word_in_seq_proportion = max_word_in_seq
max_word_in_seq_proportion = math.ceil((len(tokens) / max_seq_length) * ngram_dict.max_ngram_in_seq)
if len(ngram_matches) > max_word_in_seq_proportion:
ngram_matches = ngram_matches[:max_word_in_seq_proportion]
ngram_ids = [ngram[0] for ngram in ngram_matches]
ngram_positions = [ngram[1] for ngram in ngram_matches]
ngram_lengths = [ngram[2] for ngram in ngram_matches]
ngram_tuples = [ngram[3] for ngram in ngram_matches]
ngram_seg_ids = [0 if position < (len(tokens_a) + 2) else 1 for position in ngram_positions]
ngram_mask_array = np.zeros(ngram_dict.max_ngram_in_seq, dtype=np.bool)
ngram_mask_array[:len(ngram_ids)] = 1
# record the masked positions
ngram_positions_matrix = np.zeros(shape=(max_seq_length, ngram_dict.max_ngram_in_seq), dtype=np.int32)
for i in range(len(ngram_ids)):
ngram_positions_matrix[ngram_positions[i]:ngram_positions[i] + ngram_lengths[i], i] = 1.0
# Zero-pad up to the max word in seq length.
padding = [0] * (ngram_dict.max_ngram_in_seq - len(ngram_ids))
ngram_ids += padding
ngram_lengths += padding
ngram_seg_ids += padding
# ----------- code for ngram END-----------
label_id = label_map[example.label] if example.label is not None else 0
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
ngram_ids=ngram_ids,
ngram_positions=ngram_positions_matrix,
ngram_lengths=ngram_lengths,
ngram_tuples=ngram_tuples,
ngram_seg_ids=ngram_seg_ids,
ngram_masks=ngram_mask_array))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class TaskDataset(Dataset):
def __init__(self, data_path, processor, mode='train'):
super().__init__()
self.data = self.load_data(data_path, processor, mode)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, data_path, processor, mode):
if mode == "train":
examples = processor.get_examples(data_path, mode)
elif mode == "test":
examples = processor.get_examples(data_path, mode)
elif mode == "dev":
examples = processor.get_examples(data_path, mode)
return examples
@dataclass
class TaskCollator:
args = None
tokenizer = None
ngram_dict = None
label2id = None
def __call__(self, samples):
features = convert_examples_to_features(samples, self.label2id, self.args.max_seq_length, self.tokenizer, self.ngram_dict)
# logger.info(" Num examples = %d", len(samples))
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
ngram_ids = torch.tensor([f.ngram_ids for f in features], dtype=torch.long)
ngram_positions = torch.tensor([f.ngram_positions for f in features], dtype=torch.long)
# ngram_lengths = torch.tensor([f.ngram_lengths for f in features], dtype=torch.long)
# ngram_seg_ids = torch.tensor([f.ngram_seg_ids for f in features], dtype=torch.long)
# ngram_masks = torch.tensor([f.ngram_masks for f in features], dtype=torch.long)
return {
'input_ids': input_ids,
'input_ngram_ids': ngram_ids,
'ngram_position_matrix': ngram_positions,
'attention_mask': input_mask,
'token_type_ids': segment_ids,
'labels': label_ids,
}
# return default_collate(sample_list)
class TaskDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--data_dir', default='./data', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_data', default='train.json', type=str)
parser.add_argument('--valid_data', default='dev.json', type=str)
parser.add_argument('--test_data', default='test.json', type=str)
parser.add_argument('--train_batchsize', default=16, type=int)
parser.add_argument('--valid_batchsize', default=32, type=int)
parser.add_argument('--max_seq_length', default=128, type=int)
parser.add_argument('--texta_name', default='text', type=str)
parser.add_argument('--textb_name', default='sentence2', type=str)
parser.add_argument('--label_name', default='label', type=str)
parser.add_argument('--id_name', default='id', type=str)
parser.add_argument('--dataset_name', default=None, type=str)
parser.add_argument('--vocab_file',
type=str, default=None,
help="Vocabulary mapping/file BERT was pretrainined on")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--task_name', default='tnews', type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
self.collator = TaskCollator()
self.collator.args = args
self.collator.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path, do_lower_case=args.do_lower_case)
self.collator.ngram_dict = ZenNgramDict.from_pretrained(args.pretrained_model_path, tokenizer=self.collator.tokenizer)
processors = {
'afqmc': OcnliProcessor,
'tnews': TnewsProcessor,
'ocnli': OcnliProcessor,
'cmnli': OcnliProcessor,
'iflytek': IflytekProcessor,
}
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
if args.dataset_name is None:
self.label2id, self.id2label = self.load_schema(os.path.join(
args.data_dir, args.train_data), args)
self.train_data = TaskDataset(os.path.join(
args.data_dir, args.train_data), processor, mode='train')
self.valid_data = TaskDataset(os.path.join(
args.data_dir, args.valid_data), processor, mode='dev')
self.test_data = TaskDataset(os.path.join(
args.data_dir, args.test_data), processor, mode='test')
self.collator.label2id = self.label2id
else:
import datasets
ds = datasets.load_dataset(args.dataset_name)
self.train_data = ds['train']
self.valid_data = ds['validation']
self.test_data = ds['test']
self.save_hyperparameters(args)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False,
collate_fn=self.collator)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def predict_dataloader(self):
return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def load_schema(self, data_path, args):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
label_list = []
for line in tqdm(lines):
data = json.loads(line)
labels = data[args.label_name] if args.label_name in data.keys(
) else 0
if labels not in label_list:
label_list.append(labels)
label2id, id2label = {}, {}
for i, k in enumerate(label_list):
label2id[k] = i
id2label[i] = k
return label2id, id2label
class LitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--num_labels', default=2, type=int)
return parent_args
def __init__(self, args):
super().__init__()
self.model = ZenForSequenceClassification.from_pretrained(args.pretrained_model_path, num_labels=args.num_labels)
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
loss, logits = self.model(**batch)
acc = self.comput_metrix(logits, batch['labels'])
self.log('train_loss', loss)
self.log('train_acc', acc)
return loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.size()[0]
return acc
def validation_step(self, batch, batch_idx):
loss, logits = self.model(**batch)
acc = self.comput_metrix(logits, batch['labels'])
self.log('val_loss', loss)
self.log('val_acc', acc)
def predict_step(self, batch, batch_idx):
output = self.model(**batch)
return output.logits
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
class TaskModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./log/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename)
def save_test(data, args, data_model):
with open(args.output_save_path, 'w', encoding='utf-8') as f:
idx = 0
for i in range(len(data)):
batch = data[i]
for sample in batch:
tmp_result = dict()
label_id = np.argmax(sample.numpy())
tmp_result['id'] = data_model.test_data.data[idx]['id']
tmp_result['label'] = data_model.id2label[label_id]
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data+'\n')
idx += 1
print('save the result to '+args.output_save_path)
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = TaskDataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = pl.Trainer.add_argparse_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
# * Args for base model
from fengshen.models.model_utils import add_module_args
total_parser = add_module_args(total_parser)
total_parser = LitModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
checkpoint_callback = TaskModelCheckpoint(args).callbacks
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(args,
callbacks=[checkpoint_callback, lr_monitor]
)
data_model = TaskDataModel(args)
model = LitModel(args)
trainer.fit(model, data_model)
if __name__ == "__main__":
main()
| 24,857 | 39.684124 | 130 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pretrain_taiyi_clip/pretrain.py
|
from pytorch_lightning import (
LightningModule,
Trainer,
)
from pytorch_lightning.callbacks import (
LearningRateMonitor,
)
from fengshen.models.clip import (
TaiyiCLIPModel,
TaiyiCLIPProcessor,
)
from fengshen.models.model_utils import (
add_module_args,
configure_optimizers,
get_total_steps,
)
import torch
import torch.nn.functional as F
import argparse
import math
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.data.taiyi_stable_diffusion_datasets.taiyi_datasets import add_data_args, load_data
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
import os
import numpy as np
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor
OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
class Collator():
def __init__(self, args, processor):
self.processor = processor
self.seq_length = args.seq_length
self.transforms = Compose([
ToTensor(),
RandomResizedCrop(args.resolution, scale=(0.9, 1.0),
interpolation=InterpolationMode.BICUBIC),
Normalize(mean=OPENAI_DATASET_MEAN, std=OPENAI_DATASET_STD),
])
def __call__(self, inputs):
max_length = min(self.seq_length, max([len(i['caption']) for i in inputs]))
images = []
texts = []
labels = []
for i in inputs:
# instance_image = Image.open(i['img_path'])
# instance_image = jpeg4py.JPEG(i['img_path']).decode()
instance_image = np.load(i['npy_path'])
images.append(self.transforms(instance_image))
texts.append(i['caption'])
labels.append(i['labels'] if 'labels' in i else -100)
# images_input = self.processor(images=images, return_tensors="pt")
texts_input = self.processor(text=texts,
max_length=max_length,
padding='max_length',
truncation=True,
return_tensors='pt')
# return images_input, texts_input, labels
return {'pixel_values': torch.stack(images)}, texts_input, labels
class TaiyiCLIP(LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('Taiyi CLIP')
parser.add_argument('--loss_type', choices=['local', 'global'], default='local')
parser.add_argument('--seq_length', default=77)
parser.add_argument('--gather_with_grad', default=False, action='store_true')
parser.add_argument('--freeze_image_tower', default=False, action='store_true')
return parent_parser
def __init__(self, args, **kwargs) -> None:
super().__init__()
self.save_hyperparameters(args)
self.model = TaiyiCLIPModel.from_pretrained(args.model_path)
self.processor = TaiyiCLIPProcessor.from_pretrained(args.model_path)
self.local_loss = args.loss_type == 'local'
if args.freeze_image_tower:
for param in self.model.vision_model.parameters():
param.requires_grad = False
self.model.visual_projection.requires_grad = False
# cache
self.cache_labels = True
self.prev_num_logits = 0
self.labels = {}
def setup(self, stage) -> None:
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
elif stage == 'validate':
self.total_steps = 100
def configure_optimizers(self):
return configure_optimizers(self)
def forward(self, image, text):
assert image is not None
assert text is not None
image_features = self.model.get_image_features(**image)
text_features = self.model.get_text_features(**text)
image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)
return image_features, text_features, self.model.logit_scale.exp()
def gather_features(self, features):
if self.trainer.world_size == 1:
return features
all_features = self.all_gather(
features, sync_grads=self.hparams.gather_with_grad)
if not self.local_loss and not self.gather_with_grad:
# 如果是全局loss,并且不需要梯度,需要把梯度更新回tensor
all_features[self.global_rank] = features
all_features = all_features.view(-1, all_features.shape[-1])
return all_features
def clip_loss(self, image_features, text_features, logit_scale):
logits_per_image = None
# 如果我冻住VIT并且是local_loss,那么我只需要自己的这部分text feature就行
# 因为根本不需要image2text的feature训练VIT
if self.hparams.freeze_image_tower and self.local_loss:
all_text_features = None
else:
all_text_features = self.gather_features(
text_features)
all_image_features = self.gather_features(
image_features)
if self.local_loss:
if all_text_features is not None:
logits_per_image = logit_scale * image_features @ all_text_features.T
logits_per_text = logit_scale * text_features @ all_image_features.T
else:
# 如果是global_loss,那all_text_features肯定不是空的
logits_per_image = logit_scale * all_image_features @ all_text_features.T
logits_per_text = logits_per_image.T
num_logits = logits_per_text.shape[0]
if self.prev_num_logits != num_logits or self.device not in self.labels:
labels = torch.arange(num_logits, device=self.device, dtype=torch.long)
if self.trainer.world_size > 1 and self.local_loss:
labels = labels + num_logits * self.global_rank
if self.cache_labels:
self.labels[self.device] = labels
self.prev_num_logits = num_logits
else:
labels = self.labels[self.device]
total_loss = (
F.cross_entropy(logits_per_image, labels) +
F.cross_entropy(logits_per_text, labels)
) / 2 if logits_per_image is not None else F.cross_entropy(logits_per_text, labels)
return total_loss
def training_step(self, batch):
image, text, _ = batch
image_features, text_features, logit_scale = self(image, text)
total_loss = self.clip_loss(image_features, text_features, logit_scale)
self.log('train_loss', total_loss, sync_dist=False)
return total_loss
def on_train_batch_end(self, outputs, batch, batch_idx: int) -> None:
with torch.no_grad():
self.model.logit_scale.clamp_(0, math.log(100))
def get_metrics(self, image_features, text_features, labels, logit_scale):
# 计算相似度,支持多个样本的情况(比如一个图片有多个caption)
# img2txt计算的时候要用到,因为一张图片可能对应多个文本。
# txt2img计算的时候不需要(一般一个text只有一个对应图片)
metrics = {}
logits_per_image = (logit_scale * image_features @ text_features.t()).detach().cpu()
logits_per_text = logits_per_image.t().detach().cpu()
logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text}
label2idx = {} # 计算label到idx的映射。
repeat_id = []
for i, label in enumerate(labels):
if label not in label2idx:
label2idx[label] = [i]
else:
# 表示该index的标签出现过,记录这个index,后续算txt2img分数的时候,这些index的权值要降低。
label2idx[label].append(i)
repeat_id.append(i)
ground_truth = [label2idx[label] for label in labels]
for name, logit in logits.items():
if name == 'text_to_image':
logit[:, repeat_id] -= 1e8 # 这部分的分数要降低。(重复出现的图片,直接忽略)
r_stat = {1: [], 5: [], 10: []}
# r1_stat, r5_stat, r10_stat = [], [], []
# index of the largest element to the smallest
ranking = torch.argsort(logit, descending=True)
for i, each_query in enumerate(ranking[:, :10]):
for j, q in enumerate(each_query):
found = False
if q in ground_truth[i]:
for k, v in r_stat.items():
if j < k:
found = True
v.append(1)
if found:
break
for k, v in r_stat.items():
metrics[f'{name}_R@{k}'] = sum(v)/len(logit)
return metrics
def validation_step(self, batch, batch_idx):
image, text, label = batch
image_features, text_features, logit_scale = self(image, text)
return image_features, text_features, logit_scale, text['input_ids'].shape[0], label
def validation_epoch_end(self, val_outputs):
all_image_features = []
all_text_features = []
all_labels = []
sample_size = 0
for o in val_outputs:
all_image_features.append(o[0])
all_text_features.append(o[1])
sample_size += o[3]
all_labels += o[4]
if len(all_image_features) == 0 or len(all_text_features) == 0:
return
all_image_features = torch.cat(all_image_features)
all_text_features = torch.cat(all_text_features)
logit_scale = val_outputs[0][2].mean()
logits_per_image = logit_scale * all_image_features @ all_text_features.t()
logits_per_text = logits_per_image.t()
labels = torch.arange(sample_size, device=self.device).long()
total_loss = (F.cross_entropy(logits_per_image, labels)
+ F.cross_entropy(logits_per_text, labels)) / 2
val_metrics = self.get_metrics(
image_features=all_image_features,
text_features=all_text_features,
logit_scale=logit_scale,
labels=all_labels)
loss = total_loss / sample_size
self.log('val_loss', loss, sync_dist=False)
for k, v in val_metrics.items():
self.log(f'val_{k}', v, sync_dist=False)
def on_load_checkpoint(self, checkpoint) -> None:
# 兼容低版本lightning,低版本lightning从ckpt起来时steps数会被重置为0
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
def on_save_checkpoint(self, checkpoint) -> None:
# 保存的时候把权重按huggingface的形式保存出来
if self.global_rank == 0:
dir_path = os.path.join(
self.hparams.default_root_dir, f'hf_out_{self.trainer.current_epoch}_{self.trainer.global_step}')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
self.model.save_pretrained(dir_path)
self.processor.save_pretrained(dir_path)
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = add_module_args(args_parser)
args_parser = add_data_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = TaiyiCLIP.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
lr_monitor = LearningRateMonitor(logging_interval='step')
checkpoint_callback = UniversalCheckpoint(args)
trainer = Trainer.from_argparse_args(args,
callbacks=[
lr_monitor,
checkpoint_callback])
model = TaiyiCLIP(args)
processor = model.processor
collate_fn = Collator(args, processor)
datasets = load_data(args, global_rank=trainer.global_rank)
# 加载单个验证集:!!!验证代码有效性临时这样干的,验证完有效性会删除
from fengshen.examples.pretrain_taiyi_clip.flickr_datasets import flickr30k_CNA
img_root = '/shared_space/ccnl/mm_data/Flickr30k-CNA/flickr30k/images'
text_annot_path = '/shared_space/ccnl/mm_data/Flickr30k-CNA/test/flickr30k_cn_test.txt'
datasets[args.val_datasets_field] = flickr30k_CNA(img_root, text_annot_path, collate_fn)
datamoule = UniversalDataModule(
tokenizer=None, collate_fn=collate_fn, args=args, datasets=datasets)
trainer.fit(model, datamoule, ckpt_path=args.load_ckpt_path)
| 12,711 | 40.139159 | 113 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pretrain_taiyi_clip/test.py
|
from pytorch_lightning import (
Trainer,
)
from fengshen.models.model_utils import (
add_module_args,
)
import argparse
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.examples.pretrain_taiyi_clip.pretrain import (
TaiyiCLIP,
Collator,
)
from fengshen.data.fs_datasets import load_dataset
from torch.utils.data import DataLoader
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = add_module_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = TaiyiCLIP.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
checkpoint_callback = UniversalCheckpoint(args)
trainer = Trainer.from_argparse_args(args, callbacks=[
checkpoint_callback
])
model = TaiyiCLIP(args)
processor = model.processor
collate_fn = Collator(processor)
datasets = load_dataset(args.datasets_name)
dataloader = DataLoader(datasets[args.test_datasets_field],
batch_size=args.test_batchsize, num_workers=2, collate_fn=collate_fn)
trainer.validate(model, dataloaders=dataloader, ckpt_path=args.load_ckpt_path)
| 1,404 | 36.972973 | 97 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pretrain_taiyi_clip/flickr_datasets.py
|
# 这里这个dataset只是临时测试用的,所以暂时用最简陋的方式放在这里,后续会优化
from torch.utils.data import Dataset
from PIL import Image
class flickr30k_CNA(Dataset):
def __init__(self, img_root_path=None,
text_annot_path=None,
data_process_fn=None):
self.images = []
self.captions = []
self.labels = []
self.root = img_root_path
with open(text_annot_path, 'r') as f:
for line in f:
line = line.strip().split('\t')
key, caption = line[0].split('#')[0], line[1]
img_path = key + '.jpg'
self.images.append(img_path)
self.captions.append(caption)
self.labels.append(key)
self.data_process_fn = data_process_fn
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
img_path = str(self.root + "/" + self.images[idx])
instance_image = Image.open(img_path)
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
captions = self.captions[idx]
label = self.labels[idx]
image, text = self.data_process_fn(instance_image, captions)
return image, text, label
| 1,241 | 33.5 | 68 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/summary/seq2seq_summary.py
|
import torch
import os
import argparse
import json
import pytorch_lightning as pl
from fengshen.models.model_utils import add_module_args
from fengshen.data.task_dataloader.task_datasets import AbstractCollator
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.utils.utils import chinese_char_tokenize
from torchmetrics.text.rouge import ROUGEScore
from pytorch_lightning import Trainer, loggers
from pytorch_lightning.callbacks import LearningRateMonitor
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import sys
sys.path.append('../../../')
# os.environ["CUDA_VISIBLE_DEVICES"] = '3,4'
class FinetuneSummary(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--rouge_keys', default='rougeL,rouge1,rouge2', type=str)
return parent_args
def __init__(self, args, tokenizer=None):
super().__init__()
self.save_hyperparameters(args)
self.model = AutoModelForSeq2SeqLM.from_pretrained(
args.pretrained_model_path)
self.tokenizer = tokenizer
assert self.tokenizer, "tokenizer is None!"
self.rouge_keys = tuple(args.rouge_keys.split(','))
self.rouge_metric = ROUGEScore(rouge_keys=self.rouge_keys, normalizer=lambda x: x)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
tb_size = self.hparams.train_batchsize * max(1, self.trainer.gpus)
ab_size = self.trainer.accumulate_grad_batches * \
float(self.trainer.max_epochs)
self.total_steps = (
len(train_loader.dataset) // tb_size) // ab_size
print('total_steps is :', self.total_steps)
def training_step(self, batch, batch_idx):
output = self.model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'], labels=batch['labels'])
self.log('train_loss', output.loss, sync_dist=True)
return output.loss
def on_validation_start(self) -> None:
# rm file at validation start
prefix, ext = os.path.splitext(self.hparams.output_save_path)
file_path_rank = '{}_{}{}'.format(
prefix, self.trainer._accelerator_connector.cluster_environment.global_rank(), ext)
if os.path.exists(file_path_rank):
print('rm {}'.format(file_path_rank))
os.remove(file_path_rank)
def validation_step(self, batch, batch_idx):
output = self.model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'], labels=batch['labels'])
generated_ids = self.model.generate(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
max_length=self.hparams.max_dec_length
)
preds = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
labels = torch.where(batch['labels'] != -100, batch['labels'],
self.tokenizer.pad_token_id)
labels = self.tokenizer.batch_decode(
labels, skip_special_tokens=True, clean_up_tokenization_spaces=True)
# save preds for every rank
prefix, ext = os.path.splitext(self.hparams.output_save_path)
file_path_rank = '{}_{}{}'.format(
prefix, self.trainer._accelerator_connector.cluster_environment.global_rank(), ext)
self.save_prediction_to_file(preds=preds, texts=batch['text'],
summarys=batch['summary'], file_path=file_path_rank)
# you need to split chinese char with space for rouge metric
new_preds = [chinese_char_tokenize(p) for p in preds]
new_labels = [chinese_char_tokenize(label) for label in labels]
# update metric
self.rouge_metric.update(preds=new_preds, target=new_labels)
self.log('val_loss', output.loss, sync_dist=True)
def validation_epoch_end(self, outputs):
# compute metric for all process
rouge_dict = self.rouge_metric.compute()
# reset the metric after once validation
self.rouge_metric.reset()
for k, v in rouge_dict.items():
self.log('val_{}'.format(k), v, sync_dist=True)
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
print('rouge:\n', rouge_dict)
def on_save_checkpoint(self, checkpoint) -> None:
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
self.model.save_pretrained(os.path.join(
self.trainer.checkpoint_callback.dirpath,
'hf_pretrained_epoch{}_step{}'.format(checkpoint['epoch'], checkpoint['global_step'])))
def save_prediction_to_file(self, preds, texts, summarys, file_path):
with open(file_path, 'a', encoding='utf-8') as f:
for idx, pred in enumerate(preds):
text = texts[idx]
summary = summarys[idx]
tmp_result = dict()
tmp_result['pred'] = pred
tmp_result['label'] = summary
tmp_result['text'] = text
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data + '\n')
def predict_step(self, batch, batch_idx):
# print(batch)
texts = batch['text']
# output summary and metrics
generated_ids = self.model.generate(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
max_length=self.hparams.max_dec_length
)
preds = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
labels = self.tokenizer.batch_decode(
batch['labels'], skip_special_tokens=True, clean_up_tokenization_spaces=True)
print(batch_idx, len(preds), len(labels))
self.save_prediction_to_file(preds, texts, labels)
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
def main():
total_parser = argparse.ArgumentParser("Summary Task")
total_parser.add_argument('--do_eval_only',
action='store_true',
default=False)
total_parser.add_argument('--pretrained_model_path',
default='google/mt5-small',
type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json',
type=str)
total_parser.add_argument('--self_tokenizer',
action='store_true',
default=False)
total_parser.add_argument('--max_enc_length', default=1024, type=int)
total_parser.add_argument('--max_dec_length', default=256, type=int)
total_parser.add_argument('--prompt', default='summarize:', type=str)
# * Args for data preprocessing
# from fengshen.data.task_dataloader.task_datasets import LCSTSDataModel
total_parser = UniversalDataModule.add_data_specific_args(total_parser)
# * Args for training
total_parser = add_module_args(total_parser)
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = FinetuneSummary.add_model_specific_args(total_parser)
# * Args for base model
args = total_parser.parse_args()
if args.self_tokenizer:
from fengshen.examples.pegasus.tokenizers_pegasus import PegasusTokenizer
tokenizer = PegasusTokenizer.from_pretrained(args.pretrained_model_path)
else:
tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_path, use_fast=False)
collator = AbstractCollator(tokenizer, args.max_enc_length,
args.max_dec_length, args.prompt)
data_model = UniversalDataModule(tokenizer=tokenizer, args=args, collate_fn=collator)
model = FinetuneSummary(args, tokenizer)
if not args.do_eval_only:
lr_monitor = LearningRateMonitor(logging_interval='step')
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'log/'))
checkpoint_callback = UniversalCheckpoint(args)
trainer = Trainer.from_argparse_args(args,
logger=logger,
callbacks=[lr_monitor,
checkpoint_callback]
)
trainer.fit(model, data_model)
else:
trainer = Trainer.from_argparse_args(args)
# trainer.predict(model, data_model)
trainer.validate(model, data_model)
if __name__ == '__main__':
main()
| 9,184 | 45.388889 | 119 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/mt5_summary/mt5_summary.py
|
from fengshen.data.task_dataloader.task_datasets import LCSTSDataModel
from transformers import T5Tokenizer, MT5ForConditionalGeneration
from transformers.optimization import get_linear_schedule_with_warmup
from pytorch_lightning import Trainer, loggers
from pytorch_lightning.callbacks import ModelCheckpoint
from transformers import AutoTokenizer
import pytorch_lightning as pl
import json
import argparse
import torch
import os
import sys
sys.path.append('./')
# os.environ["CUDA_VISIBLE_DEVICES"] = '4,5,6,7'
def test():
tokenizer = T5Tokenizer.from_pretrained("google/mt5-small")
article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
summary = "Weiter Verhandlung in Syrien."
article = "日前,方舟子发文直指林志颖旗下爱碧丽推销假保健品,引起哗然。调查发现,爱碧丽没有自己的生产加工厂。 \
其胶原蛋白饮品无核心研发,全部代工生产。号称有“逆生长”功效的爱碧丽“梦幻奇迹限量组”售价>高达1080元,实际成本仅为每瓶4元!"
summary = "林志颖公司疑涉虚假营销无厂房无研发"
inputs = tokenizer(article, rturn_tensors="pt")
tt = tokenizer.encode_plus(summary, max_length=64,
padding='max_length', truncation='longest_first')
print('tt:', tt)
print('inputs:', inputs)
with tokenizer.as_target_tokenizer():
labels = tokenizer(summary, return_tensors="pt")
print('labels:', labels)
print('origin labels:', tokenizer.decode(labels['input_ids'][0]))
model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small")
# outputs = model(input_ids=inputs["input_ids"], labels=labels["input_ids"])
# print(outputs.keys())
# evaluation
model.eval()
generated_ids = model.generate(
input_ids=inputs['input_ids'],
attention_mask=inputs['attention_mask'],
max_length=150,
num_beams=2,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True
)
preds = [tokenizer.decode(g, skip_special_tokens=True,
clean_up_tokenization_spaces=True) for g in generated_ids]
print(preds)
class MT5FinetuneSummaryModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./ckpt/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_last', action='store_true', default=True)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename,
save_last=args.save_last)
class MT5FinetuneSummary(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--learning_rate', default=1e-4, type=float)
parser.add_argument('--weight_decay', default=0.1, type=float)
parser.add_argument('--warmup', default=0.01, type=float)
return parent_args
def __init__(self, args, num_data):
super().__init__()
self.args = args
self.num_data = num_data
print('num_data:', num_data)
self.model = MT5ForConditionalGeneration.from_pretrained(args.pretrained_model_path)
def setup(self, stage) -> None:
if stage == 'fit':
num_gpus = self.trainer.gpus if self.trainer.gpus is not None else 0
self.total_step = int(self.trainer.max_epochs * self.num_data /
(max(1, num_gpus) * self.trainer.accumulate_grad_batches))
print('Total training step:', self.total_step)
def training_step(self, batch, batch_idx):
output = self.model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'], labels=batch['labels'])
# output = self.model(input_ids=batch['input_ids'], labels=batch['labels'])
# acc = self.comput_metrix(output.logits, batch['labels'])
self.log('train_loss', output.loss)
return output.loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.size()[0]
return acc
def validation_step(self, batch, batch_idx):
output = self.model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'], labels=batch['labels'])
# output = self.model(input_ids=batch['input_ids'], labels=batch['labels'])
# acc = self.comput_metrix(output.logits, batch['labels'])
self.log('val_loss', output.loss)
# self.log('val_acc', acc)
def predict_step(self, batch, batch_idx):
text = batch['text']
summary = batch['summary']
generated_ids = self.model.generate(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
max_length=self.args.max_dec_length
)
return {"pred": generated_ids, "text": text, "summary": summary}
def configure_optimizers(self):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
paras = list(
filter(lambda p: p[1].requires_grad, self.named_parameters()))
paras = [{
'params':
[p for n, p in paras if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay
}, {
'params': [p for n, p in paras if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}]
optimizer = torch.optim.AdamW(paras, lr=self.args.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, int(self.total_step * self.args.warmup),
self.total_step)
return [{
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'interval': 'step',
'frequency': 1
}
}]
def save_test(data, args, data_model):
tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_path)
with open(os.path.join(args.output_save_path), 'w', encoding='utf-8') as f:
for _, batch in enumerate(data):
texts = batch['text']
summarys = batch['summary']
preds = batch['pred']
for idx, pred_ids in enumerate(preds):
text = texts[idx]
summary = summarys[idx]
tmp_result = dict()
preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for g in pred_ids]
tmp_result['summary'] = ''.join(preds)
tmp_result['label'] = summary
tmp_result['origin_text'] = text
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data+'\n')
print('save the result to '+args.output_save_path)
def main():
total_parser = argparse.ArgumentParser("Summary Task")
total_parser.add_argument('--do_eval_only', action='store_true', default=False)
total_parser.add_argument('--pretrained_model_path', default='google/mt5-small', type=str)
total_parser.add_argument('--output_save_path', default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = LCSTSDataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = MT5FinetuneSummaryModelCheckpoint.add_argparse_args(total_parser)
total_parser = MT5FinetuneSummary.add_model_specific_args(total_parser)
# * Args for base model
args = total_parser.parse_args()
data_model = LCSTSDataModel(args)
if not args.do_eval_only:
model = MT5FinetuneSummary(args, len(data_model.train_dataloader()))
checkpoint_callback = MT5FinetuneSummaryModelCheckpoint(args).callbacks
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'log/'), name='mt5_summary')
trainer = Trainer.from_argparse_args(args,
logger=logger,
callbacks=[checkpoint_callback]
)
trainer.fit(model, data_model)
else:
trainer = Trainer.from_argparse_args(args)
model = MT5FinetuneSummary.load_from_checkpoint(
args.resume_from_checkpoint, args=args, num_data=len(data_model.predict_dataloader()))
result = trainer.predict(model, data_model)
if torch.distributed.get_rank() == 0:
save_test(result, args, data_model)
if __name__ == '__main__':
main()
# test()
'''
python examples/mt5_summary.py --gpus=1 --test_data=test_public.jsonl
--default_root_dir=/cognitive_comp/ganruyi/fengshen/mt5_summary/eval
--do_eval_only
--resume_from_checkpoint=/cognitive_comp/ganruyi/fengshen/mt5_summary/ckpt/model-epoch=01-train_loss=1.9166.ckpt
--strategy=ddp
'''
| 9,924 | 41.41453 | 112 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/mt5_summary/fastapi_mt5_summary.py
|
import os
import sys
import uvicorn
import torch
from fastapi import Body, FastAPI
from transformers import T5Tokenizer, MT5ForConditionalGeneration
import pytorch_lightning as pl
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(__file__), os.path.pardir)))
os.environ["CUDA_VISIBLE_DEVICES"] = '5'
os.environ["MASTER_ADDR"] = '127.0.0.1'
os.environ["MASTER_PORT"] = '6000'
device = "cuda:0" if torch.cuda.is_available() else "cpu"
print('device')
pretrain_model_path = '/cognitive_comp/ganruyi/hf_models/google/mt5-large'
# pretrain_model_path = 'google/mt5-small'
model_path = '/cognitive_comp/ganruyi/fengshen/mt5_large_summary/ckpt/epoch-0-last.ckpt'
tokenizer = T5Tokenizer.from_pretrained(pretrain_model_path)
print('load tokenizer')
class MT5FinetuneSummary(pl.LightningModule):
def __init__(self):
super().__init__()
self.model = MT5ForConditionalGeneration.from_pretrained(pretrain_model_path)
model = MT5FinetuneSummary.load_from_checkpoint(model_path)
print('load checkpoint')
model.to(device)
model.eval()
app = FastAPI()
print('server start')
# def flask_gen(text: str, level: float = 0.9, n_sample: int = 5, length: int = 32, is_beam_search=False):
@app.post('/mt5_summary')
async def flask_gen(text: str = Body('', title='原文', embed=True),
n_sample: int = 5, length: int = 32, is_beam_search=False):
if len(text) > 128:
text = text[:128]
text = 'summary:'+text
print(text)
# inputs = tokenizer(text, return_tensors='pt')
inputs = tokenizer.encode_plus(
text, max_length=128, padding='max_length', truncation=True, return_tensors='pt')
# print(inputs)
if is_beam_search:
generated_ids = model.model.generate(
input_ids=inputs['input_ids'].to(device),
attention_mask=inputs['attention_mask'].to(device),
max_length=length,
num_beams=n_sample,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True,
num_return_sequences=n_sample
)
else:
generated_ids = model.model.generate(
input_ids=inputs['input_ids'].to(device),
attention_mask=inputs['attention_mask'].to(device),
max_length=length,
do_sample=True,
temperature=1.0,
top_p=1.0,
repetition_penalty=2.5,
# early_stopping=True,
num_return_sequences=n_sample
)
result = []
# print(tokenizer.all_special_tokens)
for sample in generated_ids:
preds = [tokenizer.decode(sample, skip_special_tokens=True,
clean_up_tokenization_spaces=True)]
preds = ''.join(preds)
# print(preds)
result.append(preds)
return result
if __name__ == '__main__':
uvicorn.run(app, host="0.0.0.0", port=6607, log_level="debug")
# # article = "日前,方舟子发文直指林志颖旗下爱碧丽推销假保健品,引起哗然。调查发现,
# 爱碧丽没有自己的生产加工厂。其胶原蛋白饮品无核心研发,全部代工生产。号称有“逆生长”功效的爱碧丽“梦幻奇迹限量组”售价>高达1080元,实际成本仅为每瓶4元!"
# article = '''在北京冬奥会自由式滑雪女子坡面障碍技巧决赛中,中国选手谷爱凌夺得银牌。祝贺谷爱凌!
# 今天上午,自由式滑雪女子坡面障碍技巧决赛举行。决赛分三轮进行,取选手最佳成绩排名决出奖牌。
# 第一跳,中国选手谷爱凌获得69.90分。在12位选手中排名第三。完成动作后,谷爱凌又扮了个鬼脸,甚是可爱。
# 第二轮中,谷爱凌在道具区第三个障碍处失误,落地时摔倒。获得16.98分。网友:摔倒了也没关系,继续加油!
# 在第二跳失误摔倒的情况下,谷爱凌顶住压力,第三跳稳稳发挥,流畅落地!获得86.23分!此轮比赛,共12位选手参赛,谷爱凌第10位出场。网友:看比赛时我比谷爱凌紧张,加油!'''
# flask_gen(article, length=30)
| 3,393 | 35.106383 | 106 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/finetune_taiyi_stable_diffusion/finetune.py
|
import os
import torch
import argparse
from pytorch_lightning import (
LightningModule,
Trainer,
)
from pytorch_lightning.callbacks import (
LearningRateMonitor,
)
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.models.model_utils import (
add_module_args,
configure_optimizers,
get_total_steps,
)
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from diffusers import StableDiffusionPipeline
from torch.nn import functional as F
from torchvision import transforms
from fengshen.data.taiyi_stable_diffusion_datasets.taiyi_datasets import add_data_args, load_data
import numpy as np
from PIL import Image
class Collator():
def __init__(self, args, tokenizer):
self.image_transforms = transforms.Compose(
[
transforms.ToTensor(),
transforms.Resize(
args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(
args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution),
transforms.Normalize([0.5], [0.5]),
]
)
self.tokenizer = tokenizer
def __call__(self, inputs):
max_length = min(max([len(i['caption']) for i in inputs]), 256)
images = []
texts = []
for i in inputs:
if 'npy_path' in i:
instance_image = np.load(i['npy_path'])
elif 'img_path' in i:
try:
instance_image = Image.open(i['img_path'])
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
except:
continue
else:
raise ValueError('no img path in samples')
images.append(self.image_transforms(instance_image))
texts.append(i['caption'])
text_inputs = self.tokenizer(text=texts,
images=images,
max_length=max_length,
padding='max_length',
truncation=True,
return_tensors='pt')
# return images_input, texts_input, labels
return {'pixel_values': torch.stack(images), 'input_ids': text_inputs['input_ids']}
class StableDiffusion(LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('Taiyi Stable Diffusion Module')
parser.add_argument('--freeze_unet', action='store_true', default=False)
parser.add_argument('--text_model_path', default=None)
parser.add_argument('--freeze_text_encoder', action='store_true', default=False)
parser.add_argument('--use_local_token', action='store_true', default=False)
parser.add_argument('--use_local_unet', action='store_true', default=False)
return parent_parser
def __init__(self, args):
super().__init__()
self.pipeline = StableDiffusionPipeline.from_pretrained(args.model_path)
self.tokenizer = self.pipeline.tokenizer
self.text_encoder = self.pipeline.text_encoder
self.vae = self.pipeline.vae
self.unet = self.pipeline.unet
self.noise_scheduler = self.pipeline.scheduler
self.pipeline.set_use_memory_efficient_attention_xformers(True)
for param in self.vae.parameters():
param.requires_grad = False
if args.freeze_text_encoder:
for param in self.text_encoder.parameters():
param.requires_grad = False
if args.freeze_unet:
for param in self.unet.parameters():
param.requires_grad = False
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
return configure_optimizers(self)
def training_step(self, batch, batch_idx):
latents = self.vae.encode(batch["pixel_values"]).latent_dist.sample()
latents = latents * 0.18215
# Sample noise that we'll add to the latents
noise = torch.randn(latents.shape).to(latents.device)
noise = noise.to(dtype=self.unet.dtype)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(
0, self.noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = self.noise_scheduler.add_noise(latents, noise, timesteps)
noisy_latents = noisy_latents.to(dtype=self.unet.dtype)
if self.noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif self.noise_scheduler.config.prediction_type == "v_prediction":
target = self.noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(
f"Unknown prediction type {self.noise_scheduler.config.prediction_type}")
# Get the text embedding for conditioning
encoder_hidden_states = self.text_encoder(batch["input_ids"])[0]
# Predict the noise residual
model_pred = self.unet(noisy_latents, timesteps, encoder_hidden_states).sample
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
self.log("train_loss", loss.item())
if self.trainer.global_rank == 0 and self.global_step == 100:
# 打印显存占用
from fengshen.utils.utils import report_memory
report_memory('stable diffusion')
return {"loss": loss}
def on_save_checkpoint(self, checkpoint) -> None:
if self.trainer.global_rank == 0:
print('saving model...')
self.pipeline.save_pretrained(os.path.join(
args.default_root_dir, f'hf_out_{self.trainer.current_epoch}_{self.trainer.global_step}'))
def on_load_checkpoint(self, checkpoint) -> None:
# 兼容低版本lightning,低版本lightning从ckpt起来时steps数会被重置为0
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = add_module_args(args_parser)
args_parser = add_data_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = StableDiffusion.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
lr_monitor = LearningRateMonitor(logging_interval='step')
checkpoint_callback = UniversalCheckpoint(args)
trainer = Trainer.from_argparse_args(args,
callbacks=[
lr_monitor,
checkpoint_callback])
model = StableDiffusion(args)
tokenizer = model.tokenizer
datasets = load_data(args, global_rank=trainer.global_rank)
collate_fn = Collator(args, tokenizer)
datamoule = UniversalDataModule(
tokenizer=tokenizer, collate_fn=collate_fn, args=args, datasets=datasets)
trainer.fit(model, datamoule, ckpt_path=args.load_ckpt_path)
| 7,853 | 39.484536 | 106 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/finetune_taiyi_stable_diffusion/evaluate_model.py
|
import pytorch_lightning as pl
import torch.nn as nn
import torch.nn.functional as F
import torch
import timm
from torchvision import transforms as T
import open_clip
import sys
import torch
import json
from transformers import BertModel, BertTokenizer
from PIL import Image
from diffusers import StableDiffusionPipeline
import random
import os
from tqdm import tqdm
os.environ['CUDA_LAUNCH_BLOCKING']='1'
torch.backends.cudnn.benchmark = True
class AestheticsMLP(pl.LightningModule):
# 美学判别器是基于CLIP的基础上接了一个MLP
def __init__(self, input_size, xcol='emb', ycol='avg_rating'):
super().__init__()
self.input_size = input_size
self.xcol = xcol
self.ycol = ycol
self.layers = nn.Sequential(
nn.Linear(self.input_size, 1024),
#nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(1024, 128),
#nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(128, 64),
#nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(64, 16),
#nn.ReLU(),
nn.Linear(16, 1)
)
def forward(self, x):
return self.layers(x)
def training_step(self, batch, batch_idx):
x = batch[self.xcol]
y = batch[self.ycol].reshape(-1, 1)
x_hat = self.layers(x)
loss = F.mse_loss(x_hat, y)
return loss
def validation_step(self, batch, batch_idx):
x = batch[self.xcol]
y = batch[self.ycol].reshape(-1, 1)
x_hat = self.layers(x)
loss = F.mse_loss(x_hat, y)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
class WaterMarkModel(nn.Module):
def __init__(self, model_path='./watermark_model_v1.pt'):
super(WaterMarkModel, self).__init__()
# model definition
self.model = timm.create_model(
'efficientnet_b3a', pretrained=True, num_classes=2)
self.model.classifier = nn.Sequential(
# 1536 is the orginal in_features
nn.Linear(in_features=1536, out_features=625),
nn.ReLU(), # ReLu to be the activation function
nn.Dropout(p=0.3),
nn.Linear(in_features=625, out_features=256),
nn.ReLU(),
nn.Linear(in_features=256, out_features=2),
)
self.model.load_state_dict(torch.load(model_path))
def forward(self, x):
return self.model(x)
class FilterSystem:
def __init__(
self,
clip_model_path="IDEA-CCNL/Taiyi-CLIP-RoBERTa-102M-ViT-L-Chinese",
aesthetics_model_path="./ava+logos-l14-linearMSE.pth",
watermark_model_path="./watermark_model_v1.pt"
):
self.clip_model_path = clip_model_path
self.aesthetics_model_path = aesthetics_model_path
self.watermark_model_path = watermark_model_path
self.init_aesthetics_model()
self.init_clip_model()
self.init_watermark_model()
def init_clip_model(self, ):
# 此处初始化clip模型,返回模型、tokenizer、processor
text_encoder = BertModel.from_pretrained(self.clip_model_path).eval().cuda()
text_tokenizer = BertTokenizer.from_pretrained(self.clip_model_path)
clip_model, _, processor = open_clip.create_model_and_transforms('ViT-L-14', pretrained='openai')
clip_model = clip_model.eval().cuda()
self.text_encoder, self.text_tokenizer, self.clip_model, self.processor = text_encoder, text_tokenizer, clip_model, processor
print("clip model loaded")
return None
def init_aesthetics_model(self, ):
# 此处初始化美学模型
self.aesthetics_model = AestheticsMLP(768)
self.aesthetics_model.load_state_dict(torch.load(self.aesthetics_model_path))
self.aesthetics_model.eval().cuda()
print("aesthetics model loaded")
return None
def init_watermark_model(self, ):
self.watermark_model = WaterMarkModel(self.watermark_model_path)
self.watermark_model.eval().cuda()
self.watermark_processor = T.Compose([
T.Resize((256, 256)),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
print("watermark model loaded")
return None
def get_image_feature(self, images):
# 此处返回图像的特征向量
if isinstance(images, list):
images = torch.stack([self.processor(image) for image in images]).cuda()
elif isinstance(images, torch.Tensor):
images = images.cuda()
else:
images = self.processor(images).cuda()
with torch.no_grad():
image_features = self.clip_model.encode_image(images)
image_features /= image_features.norm(dim=1, keepdim=True)
return image_features
def get_text_feature(self, text):
# 此处返回文本的特征向量
if isinstance(text, list) or isinstance(text, str):
text = self.text_tokenizer(text, return_tensors='pt', padding=True)['input_ids'].cuda()
elif isinstance(text, torch.Tensor):
text = text.cuda()
with torch.no_grad():
text_features = self.text_encoder(text)[1]
text_features /= text_features.norm(dim=1, keepdim=True)
return text_features
def calculate_clip_score(self, features1, features2):
# 此处2个特征向量的相似度,输入可以是 图片+文本、文本+文本、图片+图片。
# 返回的是相似度矩阵,维度为 f1.shape[0] * f2.shape[0]
score_matrix = features1 @ features2.t()
return score_matrix
def get_clip_score(self, text, image):
text_feature = self.get_text_feature(text)
image_feature = self.get_image_feature(image)
return self.calculate_clip_score(text_feature, image_feature)
def get_aesthetics_score(self, features):
# 此处返回美学分数,传入的是CLIP的feature, 先计算get_image_feature在传入此函数~(模型是ViT-L-14)
with torch.no_grad():
scores = self.aesthetics_model(features)
scores = scores[:, 0].detach().cpu().numpy()
return scores
def get_watermark_score(self, images):
if isinstance(images, list):
images = torch.stack([self.watermark_processor(image) for image in images]).cuda()
elif isinstance(images, torch.Tensor):
images = images.cuda()
with torch.no_grad():
pred = self.watermark_model(images)
watermark_scores = F.softmax(pred, dim=1)[:,0].detach().cpu().numpy()
return watermark_scores
class InferenceFlickr:
def __init__(self, sd_model_list, sample_num=20, guidance_scale=7.5, test_caption_path="/cognitive_comp/chenweifeng/project/dataset/mm_data/Flickr30k-CNA/test/flickr30k_cn_test.txt"):
self.model_name_list = sd_model_list
self.guidance_scale = guidance_scale
self.sample_num=sample_num
self.score_model = FilterSystem()
self.caption_path = test_caption_path
self.score = dict()
self.final_score = dict()
def init_model(self):
self.model_list = []
for model_name in self.model_name_list:
pipe = StableDiffusionPipeline.from_pretrained(model_name, torch_dtype=torch.float16).to("cuda")
self.model_list.append(pipe)
def generate_image_score(self, prompt_list, model_list):
generator = torch.Generator(device=0)
generator = generator.manual_seed(42)
# num_images = 1
# latents = None
# seeds = []
# for _ in range(num_images):
# generator = generator.manual_seed(42)
# image_latents = torch.randn(
# (1, pipe.unet.in_channels, 512 // 8, 512 // 8),
# generator = generator,
# device =1
# )
# latents = image_latents if latents is None else torch.cat((latents, image_latents))
for i, model in enumerate(model_list):
model_name = self.model_name_list[i]
self.score[model_name] = dict()
for j, prompt in tqdm(enumerate(prompt_list)):
latents = None
image_latents = torch.randn(
(1, model.unet.in_channels, 512 // 8, 512 // 8),
generator = generator,
device =0,
dtype=torch.float16
)
latents = image_latents if latents is None else torch.cat((latents, image_latents))
image = model(prompt, guidance_scale=self.guidance_scale, latents=latents, torch_dtype=torch.float16).images[0]
image_feature = self.score_model.get_image_feature([image])
text_feature = self.score_model.get_text_feature(prompt)
image_clip_score = self.score_model.calculate_clip_score(image_feature, text_feature)
image_watermark_score = self.score_model.get_watermark_score([image])
image_aesthetics_score =self.score_model.get_aesthetics_score(image_feature)
self.score[model_name][prompt] = {
"clip_score": float(image_clip_score[0][0]),
"watermark_score": float(image_watermark_score[0]),
"aesthetics_score": float(image_aesthetics_score[0]),
}
image.save(f"tmp/{prompt}_model-{str(i)}.png")
def get_prompt_list(self, seed=42, ):
with open(self.caption_path) as fin:
input_lines = fin.readlines()
tmp_list = []
for line in input_lines:
infos = line.strip('\n').split('\t')
prompt = infos[1]
tmp_list.append(prompt)
random.seed(seed)
prompt_list = random.sample(tmp_list, self.sample_num)
return prompt_list
def run(self):
self.init_model()
prompt_list = self.get_prompt_list()
self.generate_image_score(prompt_list, self.model_list)
def show(self, save_path=None):
# print(self.score)
print(self.final_score)
if save_path:
with open(save_path, 'w') as fout:
json.dump(fout, self.final_score, indent=2, ensure_ascii=False)
def calculate_score(self,):
for model_name in self.score.keys():
clip_score = 0.0
watermark_score = 0.0
aesthetics_score = 0.0
for prompt in self.score[model_name]:
clip_score += self.score[model_name][prompt]['clip_score']
watermark_score += self.score[model_name][prompt]['watermark_score']
aesthetics_score += self.score[model_name][prompt]['aesthetics_score']
average_clip_score = clip_score / len(self.score[model_name].keys())
average_watermark_score = watermark_score / len(self.score[model_name].keys())
average_aesthetics_score = aesthetics_score / len(self.score[model_name].keys())
self.final_score[model_name] = {"avg_clip": average_clip_score, "avg_watermark": average_watermark_score, 'avg_aesthetics': average_aesthetics_score}
def main():
model_path = sys.argv[1]
model_list = [
# '/cognitive_comp/chenweifeng/project/stable-diffusion-lightning/finetune_taiyi_v0.40_laion',
# '/cognitive_comp/chenweifeng/project/stable-diffusion-chinese/finetune_taiyi0'
# "/cognitive_comp/lixiayu/diffuser_models/wukong_epoch1"
# "/cognitive_comp/lixiayu/work/Fengshenbang-LM/fengshen/workspace/taiyi-stablediffusion-laion/60per_ckpt",
model_path
]
score_model = InferenceFlickr(model_list, sample_num=1000)
score_model.run()
score_model.calculate_score()
score_model.show()
if __name__ == "__main__":
main()
| 11,944 | 39.491525 | 187 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pegasus/pretrain_pegasus.py
|
# -*- coding: utf-8 -*-
from fengshen.models.model_utils import add_module_args
from transformers import PegasusForConditionalGeneration, PegasusConfig
from pytorch_lightning import Trainer, loggers, LightningModule
from pytorch_lightning.callbacks import LearningRateMonitor
from tokenizers_pegasus import PegasusTokenizer
from utils import UniversalCheckpoint
from data.universal_datamodule import UniversalDataModule
from data_utils import (
get_input_mask, pseudo_summary_f1, shift_tokens_right,
padding_to_maxlength, load_stopwords, text_segmentate)
import argparse
import torch
import os
import sys
sys.path.append('../../')
# os.environ["CUDA_VISIBLE_DEVICES"] = '6'
class FakeAbstractCollator:
def __init__(self, tokenizer, stopwords_dict, max_enc_length):
self.tokenizer = tokenizer
self.max_seq_length = max_enc_length
self.stopwords_dict = stopwords_dict
def __call__(self, samples):
# print("samples: ", samples)
labels = []
attn_mask = []
decoder_attn_mask = []
source_inputs = []
for text in samples:
texts = text["chunks"]
text = text_segmentate(texts)
sentence_id_vec, source, target, source_idxs, target_idxs = pseudo_summary_f1(
text, self.stopwords_dict, self.tokenizer, self.max_seq_length,
"rouge-l")
source_idxs, target_idxs = get_input_mask(sentence_id_vec,
target_idxs)
if len(source_idxs) > self.max_seq_length:
if 2 not in source_idxs[self.max_seq_length - 1:]:
source_idxs = source_idxs[:self.max_seq_length]
source_idxs[-1] = self.tokenizer.eos_token_id
sys.stderr.write("Warning split long line: " + source +
"\n")
else:
continue
source_idxs, attention_mask = padding_to_maxlength(
source_idxs, self.max_seq_length, self.tokenizer.pad_token_id)
label, target_attention_mask = padding_to_maxlength(
target_idxs, self.max_seq_length, self.tokenizer.pad_token_id)
# print("sample len: ", len(source_idxs))
source_inputs.append(source_idxs)
attn_mask.append(attention_mask)
decoder_attn_mask.append(target_attention_mask)
labels.append(label)
labels = torch.tensor(labels)
decode_input_idxs = shift_tokens_right(labels,
self.tokenizer.pad_token_id,
self.tokenizer.pad_token_id)
end_token_index = torch.where(labels == self.tokenizer.eos_token_id)[1]
for idx, end_idx in enumerate(end_token_index):
labels[idx][end_idx + 1:] = -100
# print("call samples: ")
return {
"input_ids": torch.tensor(source_inputs),
"attention_mask": torch.tensor(attn_mask),
"labels": labels,
"decoder_input_ids": decode_input_idxs,
"decoder_attention_mask": torch.tensor(decoder_attn_mask)
}
class PegasusChineseModel(LightningModule):
def __init__(self, args, **kwargs):
super().__init__()
self.args = args
self.save_hyperparameters(args)
config = PegasusConfig.from_json_file(
os.path.join(args.model_path, "config.json"))
print("vocab_size: ", config.vocab_size)
self.model = PegasusForConditionalGeneration(config=config)
print("model.num_parameters: ", self.model.num_parameters())
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader(
)
# Calculate total steps
tb_size = self.hparams.train_batchsize * max(1, self.trainer.gpus)
ab_size = self.trainer.accumulate_grad_batches * float(
self.trainer.max_epochs)
self.total_steps = (len(train_loader.dataset) //
tb_size) // ab_size
print('Total training step:', self.total_steps)
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
def training_step(self, batch, batch_idx):
output = self.model(**batch)
self.log('train_loss', output.loss, sync_dist=True)
return output.loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1, ))
y_true = labels.view(size=(-1, )).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float()) / labels.size()[0]
return acc
def validation_step(self, batch, batch_idx):
output = self.model(**batch)
acc = self.comput_metrix(output.logits, batch['labels'])
self.log('val_loss', output.loss, sync_dist=True)
self.log('val_acc', acc, sync_dist=True)
def on_save_checkpoint(self, checkpoint) -> None:
if self.trainer._accelerator_connector.cluster_environment.global_rank(
) == 0:
self.model.save_pretrained(
os.path.join(
self.trainer.checkpoint_callback.dirpath,
'hf_pretrained_epoch{}_step{}'.format(
checkpoint['epoch'], checkpoint['global_step'])))
def main():
args_parser = argparse.ArgumentParser("Pegasus Task")
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args_parser = add_module_args(args_parser)
args_parser.add_argument('--deepspeed')
args_parser.add_argument(
'--stopword_path',
default="/cognitive_comp/dongxiaoqun/project/pegasus/own/pegasus/stopwords",
type=str)
args_parser.add_argument('--max_seq_length', default=1024, type=int)
args = args_parser.parse_args()
tokenizer = PegasusTokenizer.from_pretrained(args.model_path)
stopwords_dict = load_stopwords(args.stopword_path)
collator = FakeAbstractCollator(tokenizer, stopwords_dict,
args.max_seq_length)
data_module = UniversalDataModule(tokenizer=tokenizer,
args=args,
collate_fn=collator)
module = PegasusChineseModel(args)
lr_monitor = LearningRateMonitor(logging_interval='step')
logger = loggers.TensorBoardLogger(
save_dir=os.path.join(args.default_root_dir, 'logs/'),
name=os.path.basename(os.path.dirname(args.model_path)))
checkpoint_callback = UniversalCheckpoint(args).callbacks
# autotuning
if args.deepspeed is not None:
os.environ['PL_DEEPSPEED_CONFIG_PATH'] = args.deepspeed
trainer = Trainer.from_argparse_args(
args, logger=logger, callbacks=[lr_monitor, checkpoint_callback])
trainer.fit(module, data_module)
if __name__ == '__main__':
main()
| 7,247 | 38.824176 | 92 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pegasus/data_utils.py
|
# -*- coding: utf-8 -*-
import re
import six
import unicodedata
import torch
import rouge
import numpy as np
import random
# from fengshen.examples.pegasus.pegasus_utils import text_segmentate
import sys
sys.path.append('../../../')
rouge = rouge.Rouge()
is_py2 = six.PY2
if not is_py2:
basestring = str
def _is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF)
or (cp >= 0x20000 and cp <= 0x2A6DF)
or (cp >= 0x2A700 and cp <= 0x2B73F)
or (cp >= 0x2B740 and cp <= 0x2B81F)
or (cp >= 0x2B820 and cp <= 0x2CEAF)
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F)):
return True
return False
def _is_whitespace(char):
"""Checks whether `char` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `char` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `char` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (
cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def is_string(s):
"""判断是否是字符串
"""
return isinstance(s, basestring)
def is_stopwords(word, stopwords):
if word in stopwords:
return True
else:
return False
def text_segmentate(text):
en_seg_pattern = '((?:\\!|\\?|\\.|\\n)+(?:\\s)+)'
ch_seg_pattern = '((?:?|!|。|\\n)+)'
try:
text = re.sub(en_seg_pattern, r'\1[SEP]', text)
# print("sub text: ", text)
except Exception as e:
print("input: ", text)
raise e
text = re.sub(ch_seg_pattern, r'\1[SEP]', text)
# print("sub ch text: ", text)
text_list = text.split("[SEP]")
text_list = list(filter(lambda x: len(x) != 0, text_list))
return text_list
def load_stopwords(stopwords_path):
stopwords_dict = {}
with open(stopwords_path, "r") as rf:
for line in rf:
line = line.strip()
if line not in stopwords_dict:
stopwords_dict[line] = 0
else:
pass
return stopwords_dict
def text_process(text, max_length):
"""分割文本
"""
texts = text_segmentate(text)
result, length = [], 0
for text in texts:
if length + len(text) > max_length * 1.3 and len(result) >= 3:
yield result
result, length = [], 0
result.append(text)
length += len(text)
if result and len(result) >= 3:
yield result
def text_process_split_long_content(text, max_length):
"""分割长文本
"""
texts = text_segmentate(text)
result, sentence_num = "", 0
for text in texts:
if len(text) > 500:
if len(result) > 300 and sentence_num >= 3:
yield result
result, sentence_num = "", 0
else:
result, sentence_num = "", 0
continue
else:
if len(result) + len(text) > max_length * 1.1 and sentence_num >= 3:
yield result
result, sentence_num = "", 0
result += text
sentence_num += 1
if result and sentence_num >= 3:
yield result
def gather_join(texts, idxs):
"""取出对应的text,然后拼接起来
"""
return ''.join([texts[i] for i in idxs])
def gather_join_f1(texts_token, idsx):
join_texts = []
for id in idsx:
join_texts.extend(texts_token[id])
return join_texts
def compute_rouge(source, target):
"""计算rouge-1、rouge-2、rouge-l
"""
source, target = ' '.join(source), ' '.join(target)
try:
scores = rouge.get_scores(hyps=source, refs=target)
return {
'rouge-1': scores[0]['rouge-1']['f'],
'rouge-2': scores[0]['rouge-2']['f'],
'rouge-l': scores[0]['rouge-l']['f'],
}
except ValueError:
return {
'rouge-1': 0.0,
'rouge-2': 0.0,
'rouge-l': 0.0,
}
def remove_stopwords(texts, stopwords_dict):
for i, text in enumerate(texts):
texts[i] = list(filter(lambda x: x not in stopwords_dict, text))
return texts
def pseudo_summary_f1(texts,
stopwords,
tokenizer,
max_length,
rouge_strategy="rouge-l"):
"""构建伪标签摘要数据集
"""
summary_rate = 0.25
max_length = max_length - 1
texts_tokens = []
sentece_idxs_vec = []
for text in texts:
if len(texts) == 0:
continue
try:
ids = tokenizer.encode(text.strip())[:-1]
except ValueError:
print("error, input : ", text)
raise ValueError
sentece_idxs_vec.append(ids)
tokens = [tokenizer._convert_id_to_token(token) for token in ids]
texts_tokens.append(tokens)
texts_tokens_rm = remove_stopwords(texts_tokens, stopwords)
source_idxs, target_idxs = list(range(len(texts))), []
assert len(texts_tokens) == len(texts)
# truncate_index = 0
while True:
sims = []
for i in source_idxs:
new_source_idxs = [j for j in source_idxs if j != i]
new_target_idxs = sorted(target_idxs + [i])
new_source = gather_join_f1(texts_tokens_rm, new_source_idxs)
new_target = gather_join_f1(texts_tokens_rm, new_target_idxs)
sim = compute_rouge(new_source, new_target)[rouge_strategy]
sims.append(sim)
new_idx = source_idxs[np.argmax(sims)]
del sims
source_idxs.remove(new_idx)
target_idxs = sorted(target_idxs + [new_idx])
source = gather_join(texts, source_idxs)
target = gather_join(texts, target_idxs)
try:
if (len(source_idxs) == 1
or 1.0 * len(target) / len(source) > summary_rate):
break
except ZeroDivisionError as e:
print(e.meesage)
print(texts)
print("source: ", source)
print("target: ", target)
if len(source) < len(target):
source, target = target, source
source_idxs, target_idxs = target_idxs, source_idxs
return sentece_idxs_vec, source, target, source_idxs, target_idxs
def get_input_mask(sentence_id_vec, indexs):
target_idxs = []
input_idxs = []
kMaskSentenceTokenId = 2
kEosTokenId = 1
mask_sentence_options_cumulative_prob = [0.9, 0.9, 1, 1]
for index in indexs:
target_idxs.extend(sentence_id_vec[index])
choice = random.uniform(0, 1)
if choice < mask_sentence_options_cumulative_prob[0]:
# print("mask index: ", index)
sentence_id_vec[index] = [kMaskSentenceTokenId]
elif choice < mask_sentence_options_cumulative_prob[1]:
# print("replace index: ", index)
replace_id = random.randint(0, len(sentence_id_vec))
sentence_id_vec[index] = sentence_id_vec[replace_id]
elif choice < mask_sentence_options_cumulative_prob[2]:
pass
else:
sentence_id_vec[index] = []
target_idxs.append(kEosTokenId)
# print(sentence_id_vec)
for index, sentence_id in enumerate(sentence_id_vec):
# print(index, sentence_id)
if len(sentence_id) == 0:
continue
input_idxs.extend(sentence_id_vec[index])
input_idxs.append(kEosTokenId)
return input_idxs, target_idxs
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int,
decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
def padding_to_maxlength(ids, max_length, pad_id):
cur_len = len(ids)
len_diff = max_length - cur_len
return ids + [pad_id] * len_diff, [1] * cur_len + [0] * len_diff
| 9,799 | 29.625 | 80 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pegasus/tokenizers_pegasus.py
|
from fengshen.examples.pegasus.data_utils import (
_is_control,
_is_punctuation,
_is_whitespace,
_is_chinese_char)
from transformers import PreTrainedTokenizer
from transformers import logging
from typing import List, Optional, Tuple, Union
import collections
import os
import unicodedata
import re
import jieba
import sys
from copy import deepcopy
sys.path.append("../../../../")
jieba.dt.tmp_dir = os.path.expanduser("~/.cache/")
# jieba.enable_parallel(8)
jieba.initialize()
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class PegasusTokenizer(PreTrainedTokenizer):
# copy from BertTokenizer
r"""
Construct a Pegasus tokenizer. Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
Whether or not to do basic tokenization before WordPiece.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
# pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
# pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
# max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
pad_token="<pad>",
eos_token="</s>",
unk_token="<unk>",
mask_token="<mask_2>",
mask_token_sent="<mask_1>",
additional_special_tokens=None,
sep_token="[SEP]",
cls_token="[CLS]",
tokenize_chinese_chars=True,
strip_accents=None,
offset=100,
pre_tokenizer=lambda x: jieba.cut(x, HMM=False),
**kwargs):
self.offset = offset
if additional_special_tokens is not None:
if not isinstance(additional_special_tokens, list):
raise TypeError(
f"additional_special_tokens should be of type {type(list)}, \
but is {type(additional_special_tokens)}"
)
additional_special_tokens_extended = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens
and mask_token_sent is not None else additional_special_tokens)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(
len(additional_special_tokens_extended), self.offset - 1)
]
if len(set(additional_special_tokens_extended)) != len(
additional_special_tokens_extended):
raise ValueError(
f"Please make sure that the provided additional_special_tokens \
do not contain an incorrectly shifted list of <unk_x> tokens. \
Found {additional_special_tokens_extended}."
)
additional_special_tokens = additional_special_tokens_extended
else:
additional_special_tokens = [
mask_token_sent
] if mask_token_sent is not None else []
# additional_special_tokens += [f"<unk_{i}>" for i in range(3, self.offset)]
# print("additional_special_tokens: ", additional_special_tokens)
if not os.path.isfile(vocab_file):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. \
To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
super().__init__(
do_lower_case=do_lower_case,
do_basic_tokenize=do_basic_tokenize,
never_split=never_split,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
eos_token=eos_token,
tokenize_chinese_chars=tokenize_chinese_chars,
additional_special_tokens=additional_special_tokens,
strip_accents=strip_accents,
**kwargs,
)
self.pre_tokenizer = pre_tokenizer
self.mask_token_sent = mask_token_sent
self.vocab = load_vocab(vocab_file)
self.old_vocab = deepcopy(self.vocab)
self.vocab[self.eos_token] = self.vocab.pop("[unused1]")
# self.vocab[self.eos_token] = self.vocab.pop("[unused2]")
self.vocab[self.pad_token] = self.vocab.pop("[PAD]")
self.vocab[self.unk_token] = self.vocab.pop("[UNK]")
if self.mask_token_sent is not None:
self.vocab[self.mask_token] = self.vocab.pop("[unused3]")
self.vocab[self.mask_token_sent] = self.vocab.pop("[unused2]")
self.ids_to_tokens = collections.OrderedDict([
(ids, tok) for tok, ids in self.vocab.items()
])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab,
unk_token=self.unk_token)
@property
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
# print("pegasus_tokenizer: ", text)
for text in self.pre_tokenizer(text):
if text in self.vocab:
split_tokens.append(text)
else:
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(
text, never_split=self.all_special_tokens):
# If the token is part of the never_split set
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(
token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
@staticmethod
def _cjk_punctuation():
return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\
\uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\
\uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\
\u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\
\u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\u00b7\uff01\uff1f\uff61\u3002'
def convert_ids_to_tokens(
self,
ids: Union[int, List[int]],
skip_special_tokens: bool = False) -> Union[str, List[str]]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
added tokens.
Args:
ids (`int` or `List[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
Returns:
`str` or `List[str]`: The decoded token(s).
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids and index != 2:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
# for token in
# tokens = tokens or self.ids_to_tokens(ids)
# tokens = [token for token in tokens if not self._is_special(token)]
text = ''
for i, token in enumerate(tokens):
if token[:2] == '##':
text += token[2:]
elif len(token) == 1 and _is_chinese_char(ord(token)):
text += token
elif len(token) == 1 and _is_punctuation(token):
text += token
text += ' '
elif i > 0 and _is_chinese_char(ord(text[-1])):
text += token
elif tokens == "</s>":
continue
else:
text += ' '
text += token
text = re.sub(' +', ' ', text)
text = re.sub('\' (re|m|s|t|ve|d|ll) ', '\'\\1 ', text)
punctuation = re.sub(' +', '', self._cjk_punctuation()).strip() + '+-/={(<['
punctuation_regex = '|'.join([re.escape(p) for p in punctuation])
punctuation_regex = '(%s) ' % punctuation_regex
text = re.sub(punctuation_regex, '\\1', text)
text = re.sub(r'(\d\.) (\d)', '\\1\\2', text)
return text.strip()
# out_string = " ".join(tokens).replace(" ##", "").strip()
def build_inputs_with_special_tokens(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating
and adding special tokens. A PEGASUS sequence has the following format, where `X` represents the sequence:
- single sequence: `X </s>`
- pair of sequences: `A B </s>` (not intended use)
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
return token_ids_0 + token_ids_1 + [self.eos_token_id]
def _special_token_mask(self, seq):
all_special_ids = set(
self.all_special_ids) # call it once instead of inside list comp
# all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None,
already_has_special_tokens: bool = False) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif token_ids_1 is None:
return self._special_token_mask(token_ids_0) + [self.eos_token_id]
else:
return self._special_token_mask(token_ids_0 +
token_ids_1) + [self.eos_token_id]
def num_special_tokens_to_add(self, pair=False):
"""Just EOS"""
return 1
def save_vocabulary(self,
save_directory: str,
filename_prefix: Optional[str] = None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") +
VOCAB_FILES_NAMES["vocab_file"])
else:
vocab_file = (filename_prefix +
"-" if filename_prefix else "") + save_directory
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.old_vocab.items(),
key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!")
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file, )
class BasicTokenizer(object):
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
Args:
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents: (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
"""
def __init__(self,
do_lower_case=True,
never_split=None,
tokenize_chinese_chars=True,
strip_accents=None):
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = set(never_split)
self.tokenize_chinese_chars = tokenize_chinese_chars
self.strip_accents = strip_accents
def tokenize(self, text, never_split=None):
"""
Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see
WordPieceTokenizer.
Args:
never_split (`List[str]`, *optional*)
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
[`PreTrainedTokenizer.tokenize`]) List of token not to split.
"""
# union() returns a new set by concatenating the two sets.
never_split = self.never_split.union(
set(never_split)) if never_split else self.never_split
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if token not in never_split:
if self.do_lower_case:
token = token.lower()
if self.strip_accents is not False:
token = self._run_strip_accents(token)
elif self.strip_accents:
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
tokenization using the given vocabulary.
For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through *BasicTokenizer*.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
| 25,507 | 41.513333 | 120 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clip_finetune/clip_finetune_flickr.py
|
import sys
sys.path.append('../../')
from data.clip_dataloader.flickr import FlickrDataModule
import pytorch_lightning as pl
import numpy as np
import torch
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts
import torch.nn.functional as F
import math
import copy
import argparse
from transformers import CLIPModel, BertForSequenceClassification
class CLIPLightning(pl.LightningModule):
def __init__(self, model_name='ViT-B/32', minibatch_size=2):
"""A lightning wrapper for a CLIP model as specified in the paper.
Args:
model_name (str): A case sensitive visual model name.
config (dict): A dictionary containing the CLIP instantiation parameters.
"""
super().__init__()
self.prepare_data_per_node = True
self.model_name = 'ViT-B/32'
# self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") # NOTE load from openAI
self.text_encoder = BertForSequenceClassification.from_pretrained("IDEA-CCNL/Taiyi-CLIP-Roberta-102M-Chinese")
self.minibatch_size = minibatch_size
self.isViT = 'ViT' in self.model_name
self.automatic_optimization = False
# Training loss: https://github.com/openai/CLIP/issues/83
# Mini-batching thanks to https://github.com/crowsonkb / https://twitter.com/RiversHaveWings
# Multi-GPU support: https://github.com/MicPie/clasp
def training_step(self, train_batch, idx):
# get optimizers and scheduler
optimizer = self.optimizers()
image, text, labels = train_batch
n = math.ceil(len(image) // self.minibatch_size)
image_mbs = torch.chunk(image, n)
text_mbs = torch.chunk(text, n)
with torch.no_grad():
ims = [F.normalize(self.clip_model.get_image_features(im), dim=1) for im in image_mbs]
txt = [F.normalize(self.text_encoder(t).logits, dim=1) for t in text_mbs]
# gather from all GPUs 这里的LOSS要把所有GPU的汇集起来一起算才对
ims = self.all_gather(torch.cat(ims))
txt = self.all_gather(torch.cat(txt))
if len(ims.shape) == 3:
ims = list(ims)
txt = list(txt)
else:
ims = [ims]
txt = [txt]
image_logits = torch.cat(ims) @ torch.cat(txt).t() * self.clip_model.logit_scale.exp()
ground_truth = torch.arange(len(image_logits)).long().to(image_logits.device)
loss = (F.cross_entropy(image_logits, ground_truth) +
F.cross_entropy(image_logits.t(), ground_truth)).div(2)
acc_i = (torch.argmax(image_logits, 1) == ground_truth).sum()
acc_t = (torch.argmax(image_logits, 0) == ground_truth).sum()
self.log_dict({'loss': loss / len(ims), 'acc': (acc_i + acc_t) / 2 / len(image) / len(ims)}, prog_bar=True)
if isinstance(optimizer, list):
optimizer = optimizer[0]
optimizer.zero_grad()
# image loss
for j, mb in enumerate(image_mbs[:-1]):
# 最后一部分样本舍弃。(对齐的bug)
images_tmp = copy.deepcopy(ims)
images_tmp[self.global_rank][j * self.minibatch_size:(j+1)*self.minibatch_size] = \
F.normalize(self.clip_model.get_image_features(mb), dim=1)
image_logits = torch.cat(images_tmp) @ torch.cat(txt).t() * self.clip_model.logit_scale.exp()
ground_truth = torch.arange(len(image_logits)).long().to(image_logits.device)
loss = (F.cross_entropy(image_logits, ground_truth) + F.cross_entropy(image_logits.t(), ground_truth))/2
self.manual_backward(loss)
# text loss
for j, mb in enumerate(text_mbs[:-1]):
text_tmp = copy.deepcopy(txt)
text_tmp[self.global_rank][j * self.minibatch_size:(j+1)*self.minibatch_size] = \
F.normalize(self.text_encoder(mb).logits, dim=1)
image_logits = torch.cat(ims) @ torch.cat(text_tmp).t() * self.clip_model.logit_scale.exp()
loss = (F.cross_entropy(image_logits, ground_truth) + F.cross_entropy(image_logits.t(), ground_truth))/2
self.manual_backward(loss)
optimizer.step()
lr_scheduler = self.lr_schedulers()
lr_scheduler.step()
self.clip_model.logit_scale.data.clamp_(-np.log(100), np.log(100))
def validation_step(self, val_batch, idx):
image, text, labels = val_batch
img_embed = self.clip_model.get_image_features(image)
txt_embed = self.text_encoder(text).logits
# print(img_embed.shape)
image_norm = F.normalize(img_embed, dim=1)
text_norm = F.normalize(txt_embed, dim=1)
image_logits = image_norm @ text_norm.t() * self.clip_model.logit_scale.exp()
text_logits = text_norm @ image_norm.t() * self.clip_model.logit_scale.exp()
# print(image_logits.shape)
# image_logits, text_logits = self.forward(image, text)
ground_truth = torch.arange(len(image_logits)).long().to(image_logits.device)
loss = (F.cross_entropy(image_logits, ground_truth) + F.cross_entropy(text_logits, ground_truth)).div(2)
self.log('val_loss', loss, prog_bar=True)
return [image_norm, text_norm, labels]
def validation_epoch_end(self, outputs):
image_features = torch.cat([x[0] for x in outputs])
text_features = torch.cat([x[1] for x in outputs])
labels = [label for x in outputs for label in x[2]]
print(image_features.shape, text_features.shape, len(labels))
self.get_metrics(image_features, text_features, labels, 100)
def test_step(self, test_batch, idx):
image, text, labels = test_batch
image_features = self.clip_model.get_image_features(image)
text_features = self.text_encoder(text).logits
image_features = image_features / image_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
return [image_features, text_features, labels]
def test_epoch_end(self, outputs):
image_features = torch.cat([x[0] for x in outputs])
text_features = torch.cat([x[1] for x in outputs])
labels = [label for x in outputs for label in x[2]]
print(image_features.shape, text_features.shape, len(labels))
self.get_metrics(image_features, text_features, labels, 100)
def get_metrics(self, image_features, text_features, labels, logit_scale):
# 计算相似度,支持多个样本的情况(比如一个图片有多个caption)
# img2txt计算的时候要用到,因为一张图片可能对应多个文本。
# txt2img计算的时候不需要(一般一个text只有一个对应图片)
# metrics = {}
logits_per_image = (logit_scale * image_features @ text_features.t()).detach().cpu()
logits_per_text = logits_per_image.t().detach().cpu()
logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text}
label2idx = {} # 计算label到idx的映射。
repeat_id = []
for i, label in enumerate(labels):
if label not in label2idx:
label2idx[label] = [i]
else:
# 表示该index的标签出现过,记录这个index,后续算txt2img分数的时候,这些index的权值要降低。
label2idx[label].append(i)
repeat_id.append(i)
# print(label2idx) # 标注了每个label的idx
# print('repeat_id:', repeat_id)
ground_truth = [label2idx[label] for label in labels]
# print(ground_truth)
for name, logit in logits.items():
# print(name, logit.shape)
if name == 'text_to_image':
logit[:, repeat_id] -= 1e8 # 这部分的分数要降低。(重复出现的图片,直接忽略)
r1_stat, r5_stat, r10_stat = [], [], []
ranking = torch.argsort(logit, descending=True) # index of the largest element to the smallest
# print(name, ranking[:, :10])
for i, each_query in enumerate(ranking[:, :10]):
for j, q in enumerate(each_query):
if q in ground_truth[i]:
if j == 0:
r1_stat.append(1)
r5_stat.append(1)
r10_stat.append(1)
break
if j < 5:
r5_stat.append(1)
r10_stat.append(1)
break
if j < 10:
r10_stat.append(1)
break
print(f'{name} r1:{sum(r1_stat)/len(logit)}, r5:{sum(r5_stat)/len(logit)}, r10:{sum(r10_stat)/len(logit)}')
def configure_optimizers(self):
lr = {
"RN50": 5e-4,
"RN101": 5e-4,
"RN50x4": 5e-4,
"RN50x16": 4e-4,
"RN50x64": 3.6e-4,
"ViT-B/32": 5e-4,
"ViT-B/16": 5e-4,
"ViT-L/14": 4e-4,
"ViT-L/14-336px": 2e-5
}[self.model_name]
optimizer = torch.optim.AdamW(
[{'params': self.clip_model.parameters()}, {'params': self.text_encoder.parameters()}],
lr=lr,
betas=(
0.9,
0.98 if self.isViT else 0.999
),
eps=1e-6 if self.isViT else 1e-8,
weight_decay=0.2
)
# Source: https://github.com/openai/CLIP/issues/107
# Use pip install 'git+https://github.com/katsura-jp/pytorch-cosine-annealing-with-warmup'
lr_scheduler = CosineAnnealingWarmRestarts(
optimizer,
T_0=2000
)
# CosineAnnealingWarmupRestarts
return {'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# model_name
parser.add_argument('--model', type=str,
default="ViT-B/32",
help='model definition')
# experiment setting
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--num_epoches', type=int, default=1)
parser.add_argument('--num_gpus', type=int, default=2)
# dataset
parser.add_argument('--train_filename', type=str,
help='dir or csv file')
parser.add_argument('--train_root', type=str,
help='image root path')
parser.add_argument('--val_filename', type=str,
help='dir or csv file')
parser.add_argument('--val_root', type=str,
help='image root path')
parser.add_argument('--test_filename', type=str,
help='dir or csv file')
parser.add_argument('--test_root', type=str,
help='image root path')
parser.add_argument('--num_workers', type=int, default=0)
# huggingface pretrain model 定义
parser.add_argument('--pretrain_model', type=str,
default="openai/clip-vit-base-patch32",
help='defalut load from openai') # "wf-genius/TaiYi-CLIP-ViT-B-32" 是我训好的 NOTE
args = parser.parse_args()
dm = FlickrDataModule(args)
model = CLIPLightning(model_name=args.model, minibatch_size=args.batch_size//2)
trainer = pl.Trainer(gpus=args.num_gpus, precision=16, max_epochs=args.num_epoches)
trainer.test(model, dm) # zero-shot test
trainer.fit(model, dm) # finetune on train set
trainer.test(model, dm) # test again
| 11,460 | 43.080769 | 119 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/ziya_llama/llama_generate.py
|
import torch
from transformers import LlamaForCausalLM, AutoTokenizer
from typing import List
import torch.nn.functional as F
def zero_pad_sequences(sequences: List[torch.Tensor], side: str = 'left', padding_value: int = 0) -> torch.Tensor:
assert side in ('left', 'right')
max_len = max(seq.size(0) for seq in sequences)
padded_sequences = []
for seq in sequences:
pad_len = max_len - seq.size(0)
padding = (pad_len, 0) if side == 'left' else (0, pad_len)
padded_sequences.append(F.pad(seq, padding, value=padding_value))
return torch.stack(padded_sequences, dim=0)
def generate(queries: List[str], tokenizer: AutoTokenizer, model: LlamaForCausalLM, device: int=0, **generate_kwargs):
def _apply_prefix(query):
return f"<human>:{query.strip()}\n<bot>:"
def _tokenizing(queries):
input_ids = []
for query in queries:
query = _apply_prefix(query)
input_ids.append(torch.tensor(tokenizer(query).input_ids))
inputs = zero_pad_sequences(input_ids, side="left", padding_value=generate_kwargs["pad_token_id"])
return inputs
input_ids = _tokenizing(queries).to(device)
pad_token_id = generate_kwargs["pad_token_id"]
input_attention_mask = input_ids.not_equal(pad_token_id).to(dtype=torch.bool, device=device)
sequences = model.generate(
input_ids.to(device), attention_mask=input_attention_mask, **generate_kwargs)
output = []
for seq in sequences:
out_text = tokenizer.decode(seq.tolist(), skip_special_tokens=False).split('<bot>:')[-1]
output.append(out_text.replace('<s>','').replace('</s>',''))
return output
if __name__ == '__main__':
model_path = 'your model path'
tk_path = 'your tokenizer path'
model = LlamaForCausalLM.from_pretrained(model_path).to(torch.bfloat16).cuda()
llama_tokenizer = AutoTokenizer.from_pretrained(tk_path)
generate_kwargs = {
"do_sample": True,
"top_p": 1.0,
"top_k": 0,
"max_length": 2048,
"repetition_penalty": 1.0,
"temperature": 0.8,
"pad_token_id": llama_tokenizer.eos_token_id,
"eos_token_id": llama_tokenizer.eos_token_id,
}
queries = ['怎样给世界一点爱?', '生命的意义是什么?']
ans = generate(queries=queries,
tokenizer=llama_tokenizer,
model=model,
device=0,
**generate_kwargs)
| 2,398 | 35.348485 | 118 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/ziya_llama/finetune_ziya_llama.py
|
from asyncio.log import logger
from cgitb import lookup
from dataclasses import dataclass
import os
import deepspeed
import torch
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
import argparse
from fengshen.models.model_utils import (
configure_optimizers,
add_module_args,
get_total_steps
)
from fengshen.models.llama.modeling_llama import LlamaForCausalLM
from fengshen.models.megatron import mpu
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.strategies.megatron_deepspeed import DeepSpeedStrategy
from transformers import LlamaTokenizer
from llama_generate import generate
SHOW_DATA = False
def pad(ids, pad_id, max_length):
if len(ids) > max_length:
return ids[:max_length]
return ids + [pad_id] * (max_length - len(ids))
prompt_prefix = ""
prompt_without_output = "<human>:{prompt}\n<bot>:"
@dataclass
class LlamaSFTCollator:
'''
由input处理成samples,也就是最终模型的输入
其中主要处理逻辑在__call__里
'''
tokenizer: None # 分词
max_seq_length: 1536
def __call__(self, samples):
input_ids_list = []
labels_list = []
max_length = 0
for s in samples:
"""
sample: {
"task" : str,
"prompt": [str]
"output": [str]
}
"""
prompt_cnt = min(len(s["prompt"]), len(s["output"]))
# input_ids = self.tokenizer(prompt_prefix).input_ids
input_ids = []
labels_ids = [-100] * len(input_ids)
for i in range(prompt_cnt):
prompt_input_ids = self.tokenizer(prompt_without_output.format_map(
{"prompt": s["prompt"][i].strip()}), add_special_tokens=False).input_ids
output_ids = self.tokenizer(s["output"][i].strip(), add_special_tokens=False).input_ids + [self.tokenizer.eos_token_id]
input_ids += prompt_input_ids
input_ids += output_ids
labels_ids += [-100] * (len(prompt_input_ids)) + output_ids
# input_ids += [self.tokenizer.eos_token_id]
# labels_ids += [self.tokenizer.eos_token_id]
max_length = min(max(len(input_ids), max_length), self.max_seq_length)
input_ids_list.append(input_ids)
labels_list.append(labels_ids)
# PAD
for i in range(len(input_ids_list)):
labels_list[i] = pad(labels_list[i], -100, max_length)
input_ids_list[i] = pad(input_ids_list[i], self.tokenizer.eos_token_id, max_length)
model_inputs = {
'input_ids': torch.tensor(input_ids_list).clone(),
'attention_mask': torch.ones((len(input_ids_list), max_length)).clone(),
"position_ids": torch.arange(0, max_length).unsqueeze(0).expand(len(input_ids_list), max_length).clone(),
'labels': torch.tensor(labels_list).clone(),
}
return model_inputs
class Llama(pl.LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('ziya_llama finetune')
parser.add_argument('--max_seq_length', type=int, default=1024)
parser.add_argument('--model_parallel_size', type=int, default=1)
parser.add_argument('--tokenizer_path', default=None, type=str)
return parent_parser
def __init__(self, args, tokenizer):
super().__init__()
self.save_hyperparameters(args)
self.tokenizer = tokenizer
def setup(self, stage) -> None:
if mpu.get_model_parallel_world_size() > 1:
self.model = LlamaForCausalLM.from_pretrained(
f"{self.hparams.model_path}/part_{mpu.get_model_parallel_rank()}", torch_dtype=torch.half).cuda()
else:
self.model = LlamaForCausalLM.from_pretrained(f"{self.hparams.model_path}", torch_dtype=torch.half).cuda()
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}'.format(self.total_steps))
def configure_optimizers(self):
return configure_optimizers(self)
def forward(self, **batch):
return self.model(**batch)
def detokenize(self, token_ids):
tokens = self.tokenizer.convert_ids_to_tokens(token_ids)
return self.tokenizer.convert_tokens_to_string(tokens)
def comput_metrix(self, logits, labels):
with torch.no_grad():
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.shape[0]
return acc
def training_step(self, batch, batch_idx):
if self.trainer.global_rank == 0:
global SHOW_DATA
if not SHOW_DATA:
SHOW_DATA = True
print('source: {}'.format(batch['input_ids'][0]))
print('target: {}'.format(batch['labels'][0]))
print('source: {}'.format(self.detokenize(batch['input_ids'][0])))
label_idx = batch['labels'][0] != -100
print('target: {}'.format(self.detokenize(
batch['labels'][0][label_idx])))
print('mask: {}'.format(batch['attention_mask'][0]))
print('position_ids: {}'.format(batch['position_ids'][0]))
output = self(**batch)
self.log('train/loss', output.loss, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx):
output = self(**batch)
self.log('val_loss', output.loss, sync_dist=True)
return output.loss
def predict_step(self, batch, batch_idx):
# generate data
generate_kwargs = {
"do_sample": True,
"top_p": 1.0,
"top_k": 0,
"max_length": 256,
"repetition_penalty": 1.0,
"temperature": 0.8,
"pad_token_id": self.tokenizer.eos_token_id,
"eos_token_id": self.tokenizer.eos_token_id,
}
batch_input_ids = batch['input_ids'].cpu().numpy().tolist()
print('batch_input_ids:\n', batch_input_ids)
queries = [self.detokenize(input_ids).split('<bot>:')[0].replace('<s>', '')+'<bot>:' for input_ids in batch_input_ids]
print('queries:\n', queries)
# queries = ['怎样给世界一点爱?', '生命的意义是什么?']
ans = generate(queries=queries,
tokenizer=self.tokenizer,
model=self.model,
device=self.model.device,
**generate_kwargs)
print('ans:\n', ans)
## end
def on_load_checkpoint(self, checkpoint) -> None:
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser.add_argument('--do_eval_only', action='store_true', default=False)
args_parser.add_argument('--wandb_project', type=str, default="ziya_llama13b_finetune_example")
args_parser.add_argument('--wandb_name', type=str, default="exp1")
args_parser = add_module_args(args_parser)
args_parser = pl.Trainer.add_argparse_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Llama.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
tokenizer = LlamaTokenizer.from_pretrained(args.tokenizer_path, use_fast=False)
collate_fn = LlamaSFTCollator(
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
)
data_module = UniversalDataModule(tokenizer=tokenizer, args=args, collate_fn=collate_fn)
print('data load complete')
model = Llama(args, tokenizer=tokenizer)
print('model load complete')
print(model)
strategy = DeepSpeedStrategy(
tensor_model_parallel_size=args.model_parallel_size,
pipe_model_parallel_size=1,
mpu_seed=42,
)
if args.load_ckpt_path is not None and \
not os.path.exists(args.load_ckpt_path):
print('--------warning no checkpoint found--------, remove args')
args.load_ckpt_path = None
if not args.do_eval_only:
wandb_logger = WandbLogger(project=args.wandb_project, name=args.wandb_name)
lr_monitor = LearningRateMonitor(logging_interval='step')
checkpoint_callback = UniversalCheckpoint(args)
trainer = pl.Trainer.from_argparse_args(
args,
strategy=strategy,
logger=wandb_logger,
callbacks=[lr_monitor, checkpoint_callback])
trainer.fit(model, data_module, ckpt_path=args.load_ckpt_path)
else:
trainer = pl.Trainer.from_argparse_args(args, strategy=strategy)
trainer.predict(model, data_module, ckpt_path=args.load_ckpt_path)
| 9,179 | 38.74026 | 135 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/ziya_llama/sample_test.py
|
from transformers import LlamaTokenizer
import torch
def pad(ids, pad_id, max_length):
if len(ids) > max_length:
return ids[:max_length]
return ids + [pad_id] * (max_length - len(ids))
prompt_without_output = "<human>:{prompt}\n<bot>:"
def generate_samples(s, tokenizer, max_seq_length):
max_length = 0
prompt_cnt = min(len(s["prompt"]), len(s["output"]))
input_ids_list = []
labels_list = []
input_ids = []
labels_ids = []
for i in range(prompt_cnt):
prompt_input_ids = tokenizer(prompt_without_output.format_map(
{"prompt": s["prompt"][i].strip()}), add_special_tokens=False).input_ids
output_ids = tokenizer(s["output"][i].strip(), add_special_tokens=False).input_ids + [tokenizer.eos_token_id]
input_ids += prompt_input_ids
input_ids += output_ids
labels_ids += [-100] * (len(prompt_input_ids)) + output_ids
# input_ids += [self.tokenizer.eos_token_id]
# labels_ids += [self.tokenizer.eos_token_id]
max_length = min(max(len(input_ids), max_length), max_seq_length)
input_ids_list.append(input_ids)
labels_list.append(labels_ids)
# PAD
for i in range(len(input_ids_list)):
labels_list[i] = pad(labels_list[i], -100, max_length)
input_ids_list[i] = pad(input_ids_list[i], tokenizer.pad_token_id, max_length)
model_inputs = {
'input_ids': torch.tensor(input_ids_list).clone(),
'attention_mask': torch.ones((len(input_ids_list), max_length)).clone(),
"position_ids": torch.arange(0, max_length).unsqueeze(0).expand(len(input_ids_list), max_length).clone(),
'labels': torch.tensor(labels_list).clone(),
}
return model_inputs
if __name__ == "__main__":
tokenizer = LlamaTokenizer.from_pretrained("/cognitive_comp/gaoxinyu/workspace_lightning/llama/ckpt/GXY_HIT_13B")
s = {'task': 'belle_multi_chat',
'prompt': ['写一篇关于人工智能对未来影响的文章,2000字以上。', '从这篇文章中提取出未来人工智能发展方向的关键词。'],
'output': ['人工。', '未来 人工智能 智能 可靠 透明 应用 领域 利用 有益助手']}
batch = generate_samples(s, tokenizer, 256)
def detokenize(token_ids):
tokens = tokenizer.convert_ids_to_tokens(token_ids)
return tokenizer.convert_tokens_to_string(tokens)
print('source: {}'.format(batch['input_ids']))
print('target: {}'.format(batch['labels']))
print('source: {}'.format(detokenize(batch['input_ids'])))
label_idx = batch['labels'][1] != -100
print('target: {}'.format(detokenize(
batch['labels'][0][label_idx])))
print('mask: {}'.format(batch['attention_mask'][1]))
print('position_ids: {}'.format(batch['position_ids'][1]))
| 2,660 | 39.938462 | 117 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/predict2submit/wsc_submit.py
|
import json
from tqdm import tqdm
import argparse
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
def submit(file_path):
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for line in tqdm(lines):
data = json.loads(line)
if '不是' in data['choice'][0] and '是' in data['choice'][1]:
if data['label']==1:
label='false'
else:
label='true'
else:
if data['label']==0:
label='true'
else:
label='false'
result.append({'id':data['id'],'label':label})
return result
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
save_data(submit(args.data_path), args.save_path)
| 1,174 | 27.658537 | 70 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/predict2submit/ocnli_submit.py
|
import json
from tqdm import tqdm
import argparse
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
def submit(file_path):
id2label={0:'contradiction',1:'neutral',2:'entailment'}
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for line in tqdm(lines):
data = json.loads(line)
result.append({'id':data['id'],'label':id2label[data['label']]})
return result
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
save_data(submit(args.data_path), args.save_path)
| 911 | 27.5 | 76 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/predict2submit/c3_submit.py
|
import json
from tqdm import tqdm
import argparse
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
def submit(file_path):
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for line in tqdm(lines):
data = json.loads(line)
result.append({'id':data['id'],'label':data['label']})
return result
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
save_data(submit(args.data_path), args.save_path)
| 842 | 25.34375 | 66 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/predict2submit/cmrc2018_submit.py
|
import json
from tqdm import tqdm
import argparse
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
json_data=json.dumps(data,ensure_ascii=False)
f.write(json_data+'\n')
def submit(file_path):
id2score={}
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
for line in lines:
line = json.loads(line)
for choice in line['choices']:
if choice['id'] not in id2score.keys():
id2score[choice['id']]=[]
id2score[choice['id']].extend(choice['entity_list'])
result={}
for k,v in id2score.items():
if v==[]:
result[k]=''
else:
result[k] = sorted(v, key=lambda k: k['score'],reverse=True)[0]['entity_name']
return result
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
save_data(submit(args.data_path), args.save_path)
| 1,152 | 27.121951 | 90 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/predict2submit/tnews_submit.py
|
import json
from tqdm import tqdm
import argparse
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
def submit(file_path):
id2label={"故事": "100",
"文化": "101",
"娱乐": "102",
"体育": "103",
"财经": "104",
"房产": "106",
"汽车": "107",
"教育": "108",
"科技": "109",
"军事": "110",
"旅游": "112",
"国际": "113",
"股票": "114",
"农业": "115",
"电竞": "116"}
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for line in tqdm(lines):
data = json.loads(line)
result.append({'id':data['id'],'label':id2label[data['choice'][data['label']]]})
return result
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
save_data(submit(args.data_path), args.save_path)
| 1,255 | 25.723404 | 92 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/predict2submit/iflytek_submit.py
|
import json
from tqdm import tqdm
import argparse
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
label2id={"打车": "0", "地图导航": "1", "免费WIFI": "2", "租车": "3", "同城服务": "4", "快递物流": "5", "婚庆": "6", "家政": "7", "公共交通": "8", "政务": "9", "社区服务": "10", "薅羊毛": "11", "魔幻": "12", "仙侠": "13", "卡牌": "14", "飞行空战": "15", "射击游戏": "16", "休闲益智": "17", "动作类": "18", "体育竞技": "19", "棋牌中心": "20", "经营养成": "21", "策略": "22", "MOBA": "23", "辅助工具": "24", "约会社交": "25", "即时通讯": "26", "工作社交": "27", "论坛圈子": "28", "婚恋社交": "29", "情侣社交": "30", "社交工具": "31", "生活社交": "32", "微博博客": "33", "新闻": "34", "漫画": "35", "小说": "36", "技术": "37", "教辅": "38", "问答交流": "39", "搞笑": "40", "杂志": "41", "百科": "42", "影视娱乐": "43", "求职": "44", "兼职": "45", "视频": "46", "短视频": "47", "音乐": "48", "直播": "49", "电台": "50", "K歌": "51", "成人": "52", "中小学": "53", "职考": "54", "公务员": "55", "英语": "56", "视频教育": "57", "高等教育": "58", "成人教育": "59", "艺术": "60", "语言(非英语)": "61", "旅游资讯": "62", "综合预定": "63", "民航": "64", "铁路": "65", "酒店": "66", "行程管理": "67", "民宿短租": "68", "出国": "69", "工具": "70", "亲子儿童": "71", "母婴": "72", "驾校": "73", "违章": "74", "汽车咨询": "75", "汽车交易": "76", "日常养车": "77", "行车辅助": "78", "租房": "79", "买房": "80", "装修家居": "81", "电子产品": "82", "问诊挂号": "83", "养生保健": "84", "医疗服务": "85", "减肥瘦身": "86", "美妆美业": "87", "菜谱": "88", "餐饮店": "89", "体育咨讯": "90", "运动健身": "91", "支付": "92", "保险": "93", "股票": "94", "借贷": "95", "理财": "96", "彩票": "97", "记账": "98", "银行": "99", "美颜": "100", "影像剪辑": "101", "摄影修图": "102", "相机": "103", "绘画": "104", "二手": "105", "电商": "106", "团购": "107", "外卖": "108", "电影票务": "109", "社区超市": "110", "购物咨询": "111", "笔记": "112", "办公": "113", "日程管理": "114", "女性": "115", "经营": "116", "收款": "117", "其他": "118"}
label2desc={
'银行': '银行',
'社区服务': '社区',
'电商': '电商',
'支付': '支付',
'经营养成': '养成',
'卡牌': '卡牌',
'借贷': '借贷',
'驾校': '驾校',
'理财': '理财',
'职考': '职考',
'新闻': '新闻',
'旅游资讯': '旅游',
'公共交通': '交通',
'魔幻': '魔幻',
'医疗服务': '医疗',
'影像剪辑': '影像',
'动作类': '动作',
'工具': '工具',
'体育竞技': '体育',
'小说': '小说',
'运动健身': '运动',
'相机': '相机',
'辅助工具': '辅助',
'快递物流': '快递',
'高等教育': '教育',
'股票': '股票',
'菜谱': '菜谱',
'行车辅助': '行车',
'仙侠': '仙侠',
'亲子儿童': '亲子',
'购物咨询': '购物',
'射击游戏': '射击',
'漫画': '漫画',
'中小学': '小学',
'同城服务': '同城',
'成人教育': '成人',
'求职': '求职',
'电子产品': '电子',
'艺术': '艺术',
'薅羊毛': '赚钱',
'约会社交': '约会',
'经营': '经营',
'兼职': '兼职',
'短视频': '短视',
'音乐': '音乐',
'英语': '英语',
'棋牌中心': '棋牌',
'摄影修图': '摄影',
'养生保健': '养生',
'办公': '办公',
'政务': '政务',
'视频': '视频',
'论坛圈子': '论坛',
'彩票': '彩票',
'直播': '直播',
'其他': '其他',
'休闲益智': '休闲',
'策略': '策略',
'即时通讯': '通讯',
'汽车交易': '买车',
'违章': '违章',
'地图导航': '地图',
'民航': '民航',
'电台': '电台',
'语言(非英语)': '语言',
'搞笑': '搞笑',
'婚恋社交': '婚恋',
'社区超市': '超市',
'日常养车': '养车',
'杂志': '杂志',
'视频教育': '在线',
'家政': '家政',
'影视娱乐': '影视',
'装修家居': '装修',
'体育咨讯': '资讯',
'社交工具': '社交',
'餐饮店': '餐饮',
'美颜': '美颜',
'问诊挂号': '挂号',
'飞行空战': '飞行',
'综合预定': '预定',
'电影票务': '票务',
'笔记': '笔记',
'买房': '买房',
'外卖': '外卖',
'母婴': '母婴',
'打车': '打车',
'情侣社交': '情侣',
'日程管理': '日程',
'租车': '租车',
'微博博客': '博客',
'百科': '百科',
'绘画': '绘画',
'铁路': '铁路',
'生活社交': '生活',
'租房': '租房',
'酒店': '酒店',
'保险': '保险',
'问答交流': '问答',
'收款': '收款',
'MOBA': '竞技',
'K歌': '唱歌',
'技术': '技术',
'减肥瘦身': '减肥',
'工作社交': '工作',
'团购': '团购',
'记账': '记账',
'女性': '女性',
'公务员': '公务',
'二手': '二手',
'美妆美业': '美妆',
'汽车咨询': '汽车',
'行程管理': '行程',
'免费WIFI': '免费',
'教辅': '教辅',
'成人': '两性',
'出国': '出国',
'婚庆': '婚庆',
'民宿短租': '民宿'}
desc2label={v:k for k,v in label2desc.items()}
def submit(file_path):
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for line in tqdm(lines):
data = json.loads(line)
result.append({'id':data['id'],'label':label2id[desc2label[data['choice'][data['label']]]]})
return result
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
save_data(submit(args.data_path), args.save_path)
| 5,047 | 30.55 | 1,563 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/predict2submit/csl_submit.py
|
import json
from tqdm import tqdm
import argparse
import numpy as np
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
def load_data(file_path,is_training=False):
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for l,line in tqdm(enumerate(lines)):
data = json.loads(line)
result.append(data)
return result
def recls(line):
mat=[]
for l in line:
s=[v for v in l['score'].values()]
mat.append(s)
mat=np.array(mat)
batch,num_labels=mat.shape
for i in range(len(line)):
index = np.unravel_index(np.argmax(mat, axis=None), mat.shape)
line[index[0]]['label'] = int(index[1])
mat[index[0],:] = np.zeros((num_labels,))
mat[:,index[1]] = np.zeros((batch,))
return line
import copy
def csl_scorted(data):
lines={}
new_data=copy.deepcopy(data)
for d in data:
if d['texta'] not in lines.keys():
lines[d['texta']]={}
lines[d['texta']][d['id']]=d['score'][d['choice'][0]]
result=[]
id2preds={}
for k,v in lines.items():
v=sorted(v.items(), key=lambda x: x[1], reverse=True)
# print(v)
for i,(text_id, score) in enumerate(v):
if i<len(v)/2:
label=0
else:
label=1
id2preds[text_id]=label
for d in range(len(new_data)):
new_data[d]['label']=id2preds[new_data[d]['id']]
return new_data
def submit(file_path):
id2label={1:'0',0:'1'}
lines=csl_scorted(load_data(file_path))
result=[]
for line in tqdm(lines):
data = line
result.append({'id':data['id'],'label':str(id2label[data['label']])})
return result
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
save_data(submit(args.data_path), args.save_path)
| 2,349 | 27.313253 | 144 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/predict2submit/afqmc_submit.py
|
import json
from tqdm import tqdm
import argparse
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
def submit(file_path):
id2label={0:'0',1:'1'}
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for line in tqdm(lines):
data = json.loads(line)
result.append({'id':data['id'],'label':id2label[data['label']]})
return result
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
save_data(submit(args.data_path), args.save_path)
| 879 | 25.666667 | 76 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/predict2submit/chid_submit.py
|
import json
from tqdm import tqdm
import argparse
import numpy as np
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
json_data=json.dumps(data,ensure_ascii=False)
f.write(json_data+'\n')
def load_data(file_path,is_training=False):
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for l,line in tqdm(enumerate(lines)):
data = json.loads(line)
result.append(data)
return result
def recls(line):
mat=[]
for l in line:
s=[v for v in l['score'].values()]
mat.append(s)
mat=np.array(mat)
batch,num_labels=mat.shape
for i in range(len(line)):
index = np.unravel_index(np.argmax(mat, axis=None), mat.shape)
line[index[0]]['label'] = int(index[1])
mat[index[0],:] = np.zeros((num_labels,))
mat[:,index[1]] = np.zeros((batch,))
return line
def chid_m(data):
lines={}
for d in data:
if d['line_id'] not in lines.keys():
lines[d['line_id']]=[]
lines[d['line_id']].append(d)
result=[]
for k,v in lines.items():
result.extend(recls(v))
return result
def submit(file_path):
lines = chid_m(load_data(file_path))
result={}
for line in tqdm(lines):
data = line
result[data['id']]=data['label']
return result
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
save_data(submit(args.data_path), args.save_path)
| 1,869 | 24.972222 | 133 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/solution/clue_ubert.py
|
import argparse
from fengshen import UbertPipelines
import os
import json
from tqdm import tqdm
def load_data(data_path):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
samples = [json.loads(line) for line in tqdm(lines)]
return samples
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--data_dir', default='./data', type=str)
total_parser.add_argument('--train_data', default='train.json', type=str)
total_parser.add_argument('--valid_data', default='dev.json', type=str)
total_parser.add_argument('--test_data', default='test.json', type=str)
total_parser.add_argument('--output_path',default='./predict.json', type=str)
total_parser = UbertPipelines.pipelines_args(total_parser)
args = total_parser.parse_args()
train_data = load_data(os.path.join(args.data_dir, args.train_data))
dev_data = load_data(os.path.join(args.data_dir, args.valid_data))
test_data = load_data(os.path.join(args.data_dir, args.test_data))
# test_data = test_data[:10]
model = UbertPipelines(args)
if args.train:
model.fit(train_data, dev_data)
result = model.predict(test_data)
for line in result[:20]:
print(line)
with open(args.output_path, 'w', encoding='utf8') as f:
for line in result:
json_data = json.dumps(line, ensure_ascii=False)
f.write(json_data+'\n')
if __name__ == "__main__":
main()
| 1,506 | 31.06383 | 81 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/solution/clue_unimc.py
|
import argparse
from fengshen.pipelines.multiplechoice import UniMCPipelines
import os
import json
import copy
from tqdm import tqdm
def load_data(data_path):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
samples = [json.loads(line) for line in tqdm(lines)]
return samples
def comp_acc(pred_data,test_data):
corr=0
for i in range(len(pred_data)):
if pred_data[i]['label']==test_data[i]['label']:
corr+=1
return corr/len(pred_data)
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--data_dir', default='./data', type=str)
total_parser.add_argument('--train_data', default='train.json', type=str)
total_parser.add_argument('--valid_data', default='dev.json', type=str)
total_parser.add_argument('--test_data', default='test.json', type=str)
total_parser.add_argument('--output_path', default='', type=str)
total_parser = UniMCPipelines.piplines_args(total_parser)
args = total_parser.parse_args()
train_data = load_data(os.path.join(args.data_dir, args.train_data))
dev_data = load_data(os.path.join(args.data_dir, args.valid_data))
test_data = load_data(os.path.join(args.data_dir, args.test_data))
# dev_data = dev_data[:200]
dev_data_ori=copy.deepcopy(dev_data)
model = UniMCPipelines(args, args.pretrained_model_path)
print(args.data_dir)
if args.train:
model.train(train_data, dev_data)
result = model.predict(dev_data)
for line in result[:20]:
print(line)
acc=comp_acc(result,dev_data_ori)
print('acc:',acc)
if args.output_path != '':
test_result = model.predict(test_data)
with open(args.output_path, 'w', encoding='utf8') as f:
for line in test_result:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
if __name__ == "__main__":
main()
| 1,989 | 30.09375 | 77 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/data_preprocessing/c3_preprocessing.py
|
import json
from tqdm import tqdm
import os
import argparse
def load_data(file_path,is_training=False):
with open(file_path, 'r', encoding='utf8') as f:
lines = json.loads(''.join(f.readlines()))
result=[]
for line in tqdm(lines):
data = line
texta = '\n'.join(data[0])
textb =''
for qa in data[1]:
question=qa['question']
choice=qa['choice']
answer=qa['answer'] if 'answer' in qa.keys() else ''
label = qa['choice'].index(answer) if 'answer' in qa.keys() else 0
text_id = qa['id'] if 'id' in qa.keys() else 0
result.append({'texta':texta,
'textb':textb,
'question':question,
'choice':choice,
'answer':answer,
'label':label,
'id':text_id})
return result
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
data_path = args.data_path
save_path = args.save_path
if not os.path.exists(save_path):
os.makedirs(save_path)
file_list=['d-train','d-dev','c3-m-train','m-train','m-dev','test1.0','test1.1']
train_data = []
dev_data = []
for file in file_list:
file_path = os.path.join(data_path,file+'.json')
data=load_data(file_path=file_path)
if 'train' in file or 'd-dev' in file:
train_data.extend(data)
elif 'm-dev' in file:
dev_data.extend(data)
elif 'test' in file:
output_path = os.path.join(save_path,file+'.json')
save_data(data,output_path)
output_path = os.path.join(save_path,'train.json')
save_data(train_data,output_path)
output_path = os.path.join(save_path,'dev.json')
save_data(dev_data,output_path)
| 2,355 | 31.722222 | 84 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/data_preprocessing/ocnli_preprocessing.py
|
import json
from tqdm import tqdm
import os
import argparse
label2desc={'contradiction':'矛盾','neutral':'自然','entailment':'蕴含'}
def load_data(file_path,is_training=False):
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for line in tqdm(lines):
data = json.loads(line)
texta = data['sentence1']
textb = data['sentence2']
question = ''
choice = [v for k,v in label2desc.items()]
answer = label2desc[data['label']] if 'label' in data.keys() else ''
label = choice.index(answer) if 'label' in data.keys() else 0
text_id = data['id'] if 'id' in data.keys() else 0
result.append({'task_type':'自然语言推理',
'texta':texta,
'textb':textb,
'question':question,
'choice':choice,
'answer':answer,
'label':label,
'id':text_id})
for i in range(5):
print(result[i])
return result
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
data_path = args.data_path
save_path = args.save_path
if not os.path.exists(save_path):
os.makedirs(save_path)
file_list = ['train','dev','test']
for file in file_list:
file_path = os.path.join(data_path,file+'.json')
output_path = os.path.join(save_path,file+'.json')
save_data(load_data(file_path),output_path)
| 1,986 | 31.57377 | 80 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/data_preprocessing/cmrc2018_preprocessing.py
|
import json
from tqdm import tqdm
import os
from sklearn.utils import shuffle
import re
import argparse
def cut_sent(para):
para = re.sub('([。,,!?\?])([^”’])', r"\1\n\2", para) # 单字符断句符
para = re.sub('(\.{6})([^”’])', r"\1\n\2", para) # 英文省略号
para = re.sub('(\…{2})([^”’])', r"\1\n\2", para) # 中文省略号
para = re.sub('([。!?\?][”’])([^,。!?\?])', r'\1\n\2', para)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
para = para.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里
return para.split("\n")
def search(pattern, sequence):
n = len(pattern)
res=[]
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
res.append([i,i + n-1])
return res
max_length=512
stride=128
def stride_split(question, context, answer, start):
end = start + len(answer) -1
results, n = [], 0
max_c_len = max_length - len(question) - 3
while True:
left, right = n * stride, n * stride + max_c_len
if left <= start < end <= right:
results.append((question, context[left:right], answer, start - left, end - left))
elif right < start or end < right:
results.append((question, context[left:right], '', -1, -1))
if right >= len(context):
return results
n += 1
def load_data(file_path,is_training=False):
task_type='抽取任务'
subtask_type='抽取式阅读理解'
with open(file_path, 'r', encoding='utf8') as f:
lines = json.loads(''.join(f.readlines()))
result=[]
lines = lines['data']
for line in tqdm(lines):
if line['paragraphs']==[]:
continue
data = line['paragraphs'][0]
context=data['context'].strip()
for qa in data['qas']:
question=qa['question'].strip()
rcv=[]
for a in qa['answers']:
if a not in rcv:
rcv.append(a)
split=stride_split(question, context, a['text'], a['answer_start'])
for sp in split:
choices = []
choice = {}
choice['id']=qa['id']
choice['entity_type'] = qa['question']
choice['label']=0
entity_list=[]
if sp[3]>=0 and sp[4]>=0:
entity_list.append({'entity_name':sp[2],'entity_type':'','entity_idx':[[sp[3],sp[4]]]})
choice['entity_list']=entity_list
choices.append(choice)
if choices==[]:
print(data)
continue
result.append({ 'task_type':task_type,
'subtask_type':subtask_type,
'text':sp[1],
'choices':choices,
'id':0})
return result
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
data_path = args.data_path
save_path = args.save_path
if not os.path.exists(save_path):
os.makedirs(save_path)
file_list=['dev','train','trial','test']
train_data = []
dev_data = []
for file in file_list:
file_path = os.path.join(data_path,file+'.json')
data=load_data(file_path=file_path)
if 'train' in file or 'trial' in file:
train_data.extend(data)
else:
output_path = os.path.join(save_path,file+'.json')
save_data(data,output_path)
output_path = os.path.join(save_path,'train.json')
save_data(train_data,output_path)
| 4,370 | 33.690476 | 119 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/data_preprocessing/iflytek_preprocessing.py
|
import json
from tqdm import tqdm
import os
import argparse
label2desc={
'银行': '银行',
'社区服务': '社区',
'电商': '电商',
'支付': '支付',
'经营养成': '养成',
'卡牌': '卡牌',
'借贷': '借贷',
'驾校': '驾校',
'理财': '理财',
'职考': '职考',
'新闻': '新闻',
'旅游资讯': '旅游',
'公共交通': '交通',
'魔幻': '魔幻',
'医疗服务': '医疗',
'影像剪辑': '影像',
'动作类': '动作',
'工具': '工具',
'体育竞技': '体育',
'小说': '小说',
'运动健身': '运动',
'相机': '相机',
'辅助工具': '辅助',
'快递物流': '快递',
'高等教育': '教育',
'股票': '股票',
'菜谱': '菜谱',
'行车辅助': '行车',
'仙侠': '仙侠',
'亲子儿童': '亲子',
'购物咨询': '购物',
'射击游戏': '射击',
'漫画': '漫画',
'中小学': '小学',
'同城服务': '同城',
'成人教育': '成人',
'求职': '求职',
'电子产品': '电子',
'艺术': '艺术',
'薅羊毛': '赚钱',
'约会社交': '约会',
'经营': '经营',
'兼职': '兼职',
'短视频': '短视',
'音乐': '音乐',
'英语': '英语',
'棋牌中心': '棋牌',
'摄影修图': '摄影',
'养生保健': '养生',
'办公': '办公',
'政务': '政务',
'视频': '视频',
'论坛圈子': '论坛',
'彩票': '彩票',
'直播': '直播',
'其他': '其他',
'休闲益智': '休闲',
'策略': '策略',
'即时通讯': '通讯',
'汽车交易': '买车',
'违章': '违章',
'地图导航': '地图',
'民航': '民航',
'电台': '电台',
'语言(非英语)': '语言',
'搞笑': '搞笑',
'婚恋社交': '婚恋',
'社区超市': '超市',
'日常养车': '养车',
'杂志': '杂志',
'视频教育': '在线',
'家政': '家政',
'影视娱乐': '影视',
'装修家居': '装修',
'体育咨讯': '资讯',
'社交工具': '社交',
'餐饮店': '餐饮',
'美颜': '美颜',
'问诊挂号': '挂号',
'飞行空战': '飞行',
'综合预定': '预定',
'电影票务': '票务',
'笔记': '笔记',
'买房': '买房',
'外卖': '外卖',
'母婴': '母婴',
'打车': '打车',
'情侣社交': '情侣',
'日程管理': '日程',
'租车': '租车',
'微博博客': '博客',
'百科': '百科',
'绘画': '绘画',
'铁路': '铁路',
'生活社交': '生活',
'租房': '租房',
'酒店': '酒店',
'保险': '保险',
'问答交流': '问答',
'收款': '收款',
'MOBA': '竞技',
'K歌': '唱歌',
'技术': '技术',
'减肥瘦身': '减肥',
'工作社交': '工作',
'团购': '团购',
'记账': '记账',
'女性': '女性',
'公务员': '公务',
'二手': '二手',
'美妆美业': '美妆',
'汽车咨询': '汽车',
'行程管理': '行程',
'免费WIFI': '免费',
'教辅': '教辅',
'成人': '两性',
'出国': '出国',
'婚庆': '婚庆',
'民宿短租': '民宿'}
choice = [k for k,v in label2desc.items()]
print('1'.join(choice))
print(len('1'.join(choice)))
def load_data(file_path,is_training=False):
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for line in tqdm(lines):
data = json.loads(line)
texta = data['sentence']
textb = ''
question = '请问app应用属于?'
choice = [v for k,v in label2desc.items()]
answer = label2desc[data['label_des']] if 'label_des' in data.keys() else ''
# choice = [k for k,v in label2desc.items()]
# answer = data['label_des'] if 'label_des' in data.keys() else ''
label = choice.index(answer) if 'label_des' in data.keys() else 0
text_id = data['id'] if 'id' in data.keys() else 0
result.append({'texta':texta,
'textb':textb,
'question':question,
'choice':choice,
'answer':answer,
'label':label,
'id':text_id})
# for i in range(5):
# print(result[i])
return result
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
data_path = args.data_path
save_path = args.save_path
if not os.path.exists(save_path):
os.makedirs(save_path)
file_list = ['train','dev','test']
for file in file_list:
file_path = os.path.join(data_path,file+'.json')
output_path = os.path.join(save_path,file+'.json')
save_data(load_data(file_path),output_path)
| 4,638 | 23.675532 | 88 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/data_preprocessing/chid_preprocessing.py
|
import json
from tqdm import tqdm
import os
import re
import argparse
mask_token='[MASK]'
label_mask='__'
def load_schema(train_answer,dev_answer):
with open(train_answer,'r',encoding='utf-8') as f:
train2id = json.loads(''.join(f.readlines()))
with open(dev_answer,'r',encoding='utf-8') as f:
dev2id = json.loads(''.join(f.readlines()))
for k,v in dev2id.items():
train2id[k]=v
return train2id
def cut(sentence):
"""
将一段文本切分成多个句子
:param sentence: ['虽然BillRoper正忙于全新游戏
:return: ['虽然BillRoper正..接近。' , '与父母,之首。' , '很多..常见。' , '”一位上..推进。' , ''”一直坚..市场。'' , '如今,...的70%。']
"""
new_sentence = []
sen = []
for i in sentence: # 虽
sen.append(i)
if i in ['。', '!', '?', '?',',',',']:
new_sentence.append("".join(sen)) #['虽然BillRoper正...接近。' , '与父母,...之首。' , ]
sen = []
if len(new_sentence) <= 1: # 一句话超过max_seq_length且没有句号的,用","分割,再长的不考虑了。
new_sentence = []
sen = []
for i in sentence:
sen.append(i)
if i.split(' ')[0] in [',', ','] and len(sen) != 0:
new_sentence.append("".join(sen))
sen = []
if len(sen) > 0: # 若最后一句话无结尾标点,则加入这句话
new_sentence.append("".join(sen))
return new_sentence
def get_answer_text(text,m):
sent_list=cut(text)
text1=''
text2=''
for i,sent in enumerate(sent_list):
if m in sent:
text1=''.join(sent_list[:i])
if i+1>len(sent_list)-1:
text2=''
else:
text2=''.join(sent_list[i+1:])
index_text=sent
return text1,text2,index_text
return '','',''
def load_data(file_path,label2id):
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for l,line in tqdm(enumerate(lines)):
data = json.loads(line)
choice=data['candidates']
for s,sent in enumerate(data['content']):
masks=re.findall("#idiom\d{6}#", sent)
for m in masks:
text1,text2,index_text=get_answer_text(sent,m)
masks1=re.findall("#idiom\d{6}#", text1)
for m1 in masks1:
text1=text1.replace(m1,choice[label2id[m1]])
masks2=re.findall("#idiom\d{6}#", text2)
for m2 in masks2:
text2=text2.replace(m2,choice[label2id[m2]])
masks3=re.findall("#idiom\d{6}#", index_text)
for m3 in masks3:
if m3!=m:
index_text=index_text.replace(m3,choice[label2id[m3]])
choice=[]
for c in data['candidates']:
choice.append(index_text.replace(m,c))
if len('.'.join(choice))>400:
choice=data['candidates']
text1=text1+index_text.split(m)[0]
text2=index_text.split(m)[1]+text2
if len(text1)+len(text2)>512-len('.'.join(choice)):
split1=0
split2=0
while split1+split2<512-len('.'.join(choice)):
if split1<len(text1):
split1+=1
if split2<len(text2):
split2+=1
text1=text1[-split1:]
text2=text2[:split2]
label=label2id[m] if m in label2id.keys() else 0
answer=choice[label] if m in label2id.keys() else ''
result.append({'texta':text1,
'textb':text2,
'question':'',
'choice':choice,
'answer':answer,
'label':label,
'id':m,
'text_id':s,
'line_id':l})
return result
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
data_path = args.data_path
save_path = args.save_path
if not os.path.exists(save_path):
os.makedirs(save_path)
label2id = load_schema(os.path.join(data_path,'train_answer.json'),os.path.join(data_path,'dev_answer.json'))
file_list = ['train','dev','test1.1']
for file in file_list:
file_path = os.path.join(data_path,file+'.json')
output_path = os.path.join(save_path,file+'.json')
save_data(load_data(file_path,label2id),output_path)
| 5,303 | 32.358491 | 113 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/data_preprocessing/afqmc_preprocessing.py
|
import json
from tqdm import tqdm
import os
import argparse
label2desc={"0": "不相似", "1": "相似"}
def load_data(file_path,is_training=False):
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for line in tqdm(lines):
data = json.loads(line)
texta = data['sentence1']
textb = data['sentence2']
question = ''
choice = [v for k,v in label2desc.items()]
answer = label2desc[data['label']] if 'label' in data.keys() else ''
label = choice.index(answer) if 'label' in data.keys() else 0
text_id = data['id'] if 'id' in data.keys() else 0
result.append({
'task_type':'语义匹配',
'texta':texta,
'textb':textb,
'question':question,
'choice':choice,
'answer':answer,
'label':label,
'id':text_id})
return result
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
data_path = args.data_path
save_path = args.save_path
if not os.path.exists(save_path):
os.makedirs(save_path)
file_list = ['train','dev','test']
for file in file_list:
file_path = os.path.join(data_path,file+'.json')
output_path = os.path.join(save_path,file+'.json')
save_data(load_data(file_path),output_path)
| 1,930 | 31.183333 | 80 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/data_preprocessing/wsc_preprocessing.py
|
import json
from tqdm import tqdm
import os
import argparse
label2desc={'true':'是','false':'不是'}
def load_data(file_path,is_training=False):
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for line in tqdm(lines):
data = json.loads(line)
target = data['target']
text=list(data['text'])
if target['span2_index']<target['span1_index']:
text.insert(target['span2_index'],'_')
text.insert(target['span2_index']+len(target['span2_text'])+1,'_')
text.insert(target['span1_index']+2,'[')
text.insert(target['span1_index']+2+len(target['span1_text'])+1,']')
else:
text.insert(target['span1_index'],'[')
text.insert(target['span1_index']+len(target['span1_text'])+1,']')
text.insert(target['span2_index']+2,'_')
text.insert(target['span2_index']+2+len(target['span2_text'])+1,'_')
texta = ''.join(text)
textb = ''
span2_text=target['span2_text']
span1_text=target['span1_text']
question = ''
choice = []
for k,v in label2desc.items():
choice .append(f'{span2_text}{v}{span1_text}')
# print(choice)
answer = label2desc[data['label']] if 'label' in data.keys() else ''
answer = f'{span2_text}{answer}{span1_text}'
label = choice.index(answer) if 'label' in data.keys() else 0
text_id = data['id'] if 'id' in data.keys() else 0
result.append({'texta':texta,
'textb':textb,
'question':question,
'choice':choice,
'answer':answer,
'label':label,
'id':text_id})
for i in range(5):
print(result[i])
return result
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
data_path = args.data_path
save_path = args.save_path
if not os.path.exists(save_path):
os.makedirs(save_path)
file_list = ['train','dev','test1.0','test1.1']
for file in file_list:
file_path = os.path.join(data_path,file+'.json')
output_path = os.path.join(save_path,file+'.json')
save_data(load_data(file_path),output_path)
| 2,862 | 34.345679 | 84 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/data_preprocessing/csl_preprocessing.py
|
import json
from tqdm import tqdm
import os
import jieba.analyse
import argparse
label2desc={'1':'可以','0':'不能'}
def load_data(file_path,is_training=False):
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for line in tqdm(lines):
data = json.loads(line)
texta = data['abst']
abst = data['abst']
textb = ''
keyword = '、'.join(data['keyword'])
question = ''
keyword=data['keyword']
rs=jieba.analyse.extract_tags(data['abst'],topK=15)
texta='、'.join(rs)+'。'+texta
comm=[]
for k in keyword:
if k in rs:
comm.append(k)
for word in comm:
if word in abst:
abst=abst.replace(word,word+'(共现关键字)')
comm=[word for word in comm]
keyword=[word for word in data['keyword']]
comm_text='共现词汇'+str(len(comm))+'个,分别是'+'、'.join(comm)
keyword = '、'.join(keyword)
question=''
choice = [f'{v}使用{keyword}概括摘要' for k,v in label2desc.items()]
answer = label2desc[data['label']] if 'label' in data.keys() else ''
answer = f'{answer}使用{keyword}概括摘要'
label = choice.index(answer) if 'label' in data.keys() else 0
text_id = data['id'] if 'id' in data.keys() else 0
result.append({'texta':texta,
'textb':textb,
'question':question,
'choice':choice,
'answer':answer,
'label':label,
'id':text_id})
for i in range(5):
print(result[i])
return result
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
data_path = args.data_path
save_path = args.save_path
if not os.path.exists(save_path):
os.makedirs(save_path)
file_list = ['train','dev','test']
for file in file_list:
file_path = os.path.join(data_path,file+'.json')
output_path = os.path.join(save_path,file+'.json')
save_data(load_data(file_path),output_path)
| 2,677 | 29.089888 | 80 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue1.1/data_preprocessing/tnews_preprocessing.py
|
import json
from tqdm import tqdm
import argparse
label2desc={"news_story": "故事",
"news_culture": "文化",
"news_entertainment": "娱乐",
"news_sports": "体育",
"news_finance": "财经",
"news_house": "房产",
"news_car": "汽车",
"news_edu": "教育",
"news_tech": "科技",
"news_military": "军事",
"news_travel": "旅游",
"news_world": "国际",
"news_stock": "股票",
"news_agriculture": "农业",
"news_game": "电竞"}
def load_data(file_path,is_training=False):
with open(file_path, 'r', encoding='utf8') as f:
lines = f.readlines()
result=[]
for line in tqdm(lines):
data = json.loads(line)
texta = data['sentence']
textb = ''
question = '下面新闻属于哪一个类别?'
choice = [v for k,v in label2desc.items()]
answer = label2desc[data['label_desc']] if 'label_desc' in data.keys() else ''
label = choice.index(answer) if 'label_desc' in data.keys() else 0
text_id = data['id'] if 'id' in data.keys() else 0
result.append({'texta':texta,
'textb':textb,
'question':question,
'choice':choice,
'answer':answer,
'label':label,
'id':text_id})
print(result[0])
return result
def save_data(data,file_path):
with open(file_path, 'w', encoding='utf8') as f:
for line in data:
json_data=json.dumps(line,ensure_ascii=False)
f.write(json_data+'\n')
import os
if __name__=="__main__":
parser = argparse.ArgumentParser(description="train")
parser.add_argument("--data_path", type=str,default="")
parser.add_argument("--save_path", type=str,default="")
args = parser.parse_args()
data_path = args.data_path
save_path = args.save_path
if not os.path.exists(save_path):
os.makedirs(save_path)
file_list = ['train','dev','test1.0','test1.1']
for file in file_list:
file_path = os.path.join(data_path,file+'.json')
output_path = os.path.join(save_path,file+'.json')
save_data(load_data(file_path),output_path)
| 2,386 | 32.619718 | 90 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/sequence_tagging/finetune_sequence_tagging.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
import copy
import logging
import torch.nn.functional as F
import os
import json
import torch
import pytorch_lightning as pl
import argparse
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from torch.utils.data import Dataset, DataLoader
from torch.utils.data._utils.collate import default_collate
from fengshen.models.tagging_models.bert_for_tagging import BertLinear,BertCrf,BertSpan,BertBiaffine
from fengshen.data.sequence_tagging_dataloader.sequence_tagging_collator import CollatorForLinear, CollatorForCrf, CollatorForSpan, CollatorForBiaffine
from fengshen.data.sequence_tagging_dataloader.sequence_tagging_datasets import DataProcessor, get_datasets
from fengshen.metric.metric import EntityScore
from fengshen.models.model_utils import configure_optimizers, get_total_steps
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.data.universal_datamodule import UniversalDataModule
from transformers import (
BertTokenizer, BertConfig, AutoTokenizer
)
from fengshen.metric.utils_ner import get_entities, bert_extract_item
_model_dict={
'bert-linear': BertLinear,
'bert-crf': BertCrf,
'bert-span': BertSpan,
'bert-biaffine': BertBiaffine
}
_collator_dict={
'linear': CollatorForLinear,
'crf': CollatorForCrf,
'span': CollatorForSpan
}
_validation_dict={
'linear': 'validation_linear',
'crf': 'validation_crf',
'span': 'validation_span',
'biaffine': 'validation_biaffine',
}
_prediction_dict={
'linear': 'predict_linear',
'crf': 'predict_crf',
'span': 'predict_span',
'biaffine': 'predict_biaffine',
}
logger = logging.getLogger(__name__)
class LitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument("--max_seq_length", default=512, type=int)
parser.add_argument('--data_dir', default=None, type=str)
parser.add_argument('--model_type', default='bert', type=str)
parser.add_argument("--decode_type", default="linear", choices=["linear", "crf", "biaffine", "span"], type=str)
parser.add_argument('--loss_type', default='ce', type=str, choices=['lsr', 'focal', 'ce'])
return parent_args
def __init__(self, args, id2label, tokenizer):
super().__init__()
self.model_name=args.model_type+"-"+args.decode_type
self.id2label = id2label
self.config=BertConfig.from_pretrained(args.model_path)
self.tokenizer = tokenizer
self.model = _model_dict[self.model_name].from_pretrained(args.model_path, config=self.config, num_labels=len(self.id2label), loss_type=args.loss_type)
self.entity_score=EntityScore()
self.validate_fn=getattr(self,_validation_dict[args.decode_type])
self.predict_fn=getattr(self,_prediction_dict[args.decode_type])
self.predict_result=[]
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx):
self.validate_fn(batch,batch_idx)
def validation_linear(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
logits = outputs.logits
preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
preds = preds.detach().cpu().numpy()
labels = batch['labels'].detach().cpu().numpy()
for i, label in enumerate(labels):
y_true = []
y_pred = []
for j, m in enumerate(label):
if j == 0:
continue
elif j == (torch.sum(batch['attention_mask'][i]).item()-1):
true_subject=get_entities(y_true,self.id2label)
pred_subject=get_entities(y_pred,self.id2label)
self.entity_score.update(true_subject=true_subject, pred_subject=pred_subject)
break
else:
y_true.append(self.id2label[labels[i][j]])
y_pred.append(self.id2label[preds[i][j]])
self.log('val_loss', loss)
def validation_crf(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
logits = outputs.logits
preds = self.model.crf.decode(logits, batch['attention_mask'])
preds = preds.detach().squeeze(0).cpu().numpy().tolist()
labels = batch['labels'].detach().cpu().numpy()
for i, label in enumerate(labels):
y_true = []
y_pred = []
for j, m in enumerate(label):
if j == 0:
continue
elif j == (torch.sum(batch['attention_mask'][i]).item()-1):
true_subject=get_entities(y_true,self.id2label)
pred_subject=get_entities(y_pred,self.id2label)
self.entity_score.update(true_subject=true_subject, pred_subject=pred_subject)
break
else:
y_true.append(self.id2label[labels[i][j]])
y_pred.append(self.id2label[preds[i][j]])
self.log('val_loss', loss)
def validation_span(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
start_logits = outputs.start_logits
end_logits = outputs.end_logits
labels=batch['subjects']
for i, T in enumerate(labels):
active_start_logits=start_logits[i][:batch['input_len'][i]]
active_end_logits=end_logits[i][:batch['input_len'][i]]
R = bert_extract_item(active_start_logits, active_end_logits)
T=T[~torch.all(T==-1,dim=-1)].cpu().numpy()
T=list(map(lambda x:(self.id2label[x[0]],x[1],x[2]),T))
R=list(map(lambda x:(self.id2label[x[0]],x[1],x[2]),R))
self.entity_score.update(true_subject=T, pred_subject=R)
self.log('val_loss', loss)
def validation_biaffine(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
logits = outputs.span_logits
preds = torch.argmax(logits.cpu().numpy(), axis=-1)
labels = batch['span_labels'].cpu().numpy()
for i, label in enumerate(labels):
input_len=(batch['input_len'][i])-2
active_label=labels[i,1:input_len+1,1:input_len+1]
active_pred=preds[i,1:input_len+1,1:input_len+1]
temp_1 = []
temp_2 = []
for j in range(input_len):
for k in range(input_len):
if self.id2label[active_label[j,k]]!="O":
temp_1.append([self.id2label[active_label[j,k]],j,k])
if self.id2label[active_pred[j,k]]!="O":
temp_2.append([self.id2label[active_pred[j,k]],j,k])
self.entity_score.update(pred_subject=temp_2, true_subject=temp_1)
self.log('val_loss', loss)
def validation_epoch_end(self, outputs):
# compute metric for all process
score_dict, _ = self.entity_score.result()
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
print('score_dict:\n', score_dict)
# reset the metric after once validation
self.entity_score.reset()
for k, v in score_dict.items():
self.log('val_{}'.format(k), v)
def predict_step(self, batch, batch_idx):
batch['labels'] = None
outputs = self.model(**batch)
self.predict_fn(batch,batch_idx)
def predict_linear(self, batch, outputs):
logits = torch.argmax(F.log_softmax(outputs.logits, dim=2), dim=2)
preds = logits.detach().cpu().numpy()
for i, pred in enumerate(preds):
text = self.tokenizer.convert_ids_to_tokens(batch['input_ids'][i])[:batch['input_len'][i]][1:-1]
pred = pred[:batch['input_len'][i]][1:-1]
label_entities = get_entities(pred, self.id2label)
for label_list in label_entities:
label_list.append("".join(text[label_list[1]:label_list[2]+1]))
self.predict_result.extend(label_entities)
def predict_crf(self, batch, batch_idx):
logits = self.model(**batch).logits
preds = self.model.crf.decode(logits, batch['attention_mask']).squeeze(0).cpu().numpy().tolist()
for i, pred in enumerate(preds):
text = self.tokenizer.convert_ids_to_tokens(batch['input_ids'][i])[:batch['input_len'][i]][1:-1]
pred = pred[:batch['input_len'][i]][1:-1]
label_entities = get_entities(pred, self.id2label)
for label_list in label_entities:
label_list.append("".join(text[label_list[1]:label_list[2]+1]))
self.predict_result.extend(label_entities)
def predict_span(self, batch, batch_idx):
batch['start_positions'] = None
batch['end_positions'] = None
outputs = self.model(**batch)
start_logits, end_logits = outputs.start_logits, outputs.end_logits
for i, _ in enumerate(start_logits):
text = self.tokenizer.convert_ids_to_tokens(batch['input_ids'][i])[:batch['input_len'][i]][1:-1]
R = bert_extract_item(start_logits[i][:batch['input_len'][i]], end_logits[i][:batch['input_len'][i]])
if R:
label_entities = [[self.id2label[x[0]],x[1],x[2],"".join(text[x[1]:x[2]+1])] for x in R]
else:
label_entities = []
self.predict_result.extend(label_entities)
def configure_optimizers(self):
return configure_optimizers(self)
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
# * Args for data preprocessing
total_parser = UniversalDataModule.add_data_specific_args(total_parser)
# * Args for training
total_parser = pl.Trainer.add_argparse_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
# * Args for base model
from fengshen.models.model_utils import add_module_args
total_parser = add_module_args(total_parser)
total_parser = LitModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
datasets=get_datasets(args)
checkpoint_callback = UniversalCheckpoint(args).callbacks
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(args,
callbacks=[checkpoint_callback, lr_monitor]
)
label2id,id2label=DataProcessor.get_labels(args)
tokenizer = AutoTokenizer.from_pretrained(args.model_path)
collator = _collator_dict[args.decode_type]()
collator.args=args
collator.tokenizer=tokenizer
collator.label2id=label2id
data_model = UniversalDataModule(tokenizer,collator,args,datasets)
model = LitModel(args,id2label,tokenizer)
print(label2id)
trainer.fit(model, data_model)
# trainer.predict(model,dataloaders=data_model.predict_dataloader())
if __name__ == "__main__":
main()
| 12,218 | 37.545741 | 159 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/FastDemo/YuyuanQA.py
|
import requests
import langid
import streamlit as st
from translate import baiduTranslatorMedical
from translate import baiduTranslator
langid.set_languages(['en', 'zh'])
lang_dic = {'zh': 'en', 'en': 'zh'}
st.set_page_config(
page_title="余元医疗问答",
page_icon=":shark:",
# layout="wide",
initial_sidebar_state="expanded",
menu_items={
'Get Help': 'https://www.extremelycoolapp.com/help',
'Report a bug': "https://www.extremelycoolapp.com/bug",
'About': "# This is a header. This is an *extremely* cool app!"
}
)
st.title('Demo for MedicalQA')
st.sidebar.header("参数配置")
sbform = st.sidebar.form("固定参数设置")
n_sample = sbform.slider("设置返回条数", min_value=1, max_value=10, value=3)
text_length = sbform.slider('生成长度:', min_value=32, max_value=512, value=64, step=32)
text_level = sbform.slider('文本多样性:', min_value=0.1, max_value=1.0, value=0.9, step=0.1)
model_id = sbform.number_input('选择模型号:', min_value=0, max_value=13, value=13, step=1)
trans = sbform.selectbox('选择翻译内核', ['百度通用', '医疗生物'])
sbform.form_submit_button("配置")
form = st.form("参数设置")
input_text = form.text_input('请输入你的问题:', value='', placeholder='例如:糖尿病的症状有哪些?')
if trans == '百度通用':
translator = 'baidu_common'
else:
translator = 'baidu'
if input_text:
lang = langid.classify(input_text)[0]
if translator == 'baidu':
st.write('**你的问题是:**', baiduTranslatorMedical(input_text, src=lang, dest=lang_dic[lang]).text)
else:
st.write('**你的问题是:**', baiduTranslator(input_text, src=lang, dest=lang_dic[lang]).text)
form.form_submit_button("提交")
# @st.cache(suppress_st_warning=True)
def generate_qa(input_text, n_sample, model_id='7', length=64, translator='baidu', level=0.7):
# st.write('调用了generate函数')
URL = 'http://192.168.190.63:6605/qa'
data = {"text": input_text, "n_sample": n_sample, "model_id": model_id,
"length": length, 'translator': translator, 'level': level}
r = requests.get(URL, params=data)
return r.text
# my_bar = st.progress(80)
with st.spinner('老夫正在思考中🤔...'):
if input_text:
results = generate_qa(input_text, n_sample, model_id=str(model_id),
translator=translator, length=text_length, level=text_level)
for idx, item in enumerate(eval(results), start=1):
st.markdown(f"""
**候选回答「{idx}」:**\n
""")
st.info('中文:%s' % item['fy_next_sentence'])
st.info('英文:%s' % item['next_sentence'])
| 2,493 | 33.638889 | 102 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/ubert/example.py
|
import argparse
from fengshen import UbertPipelines
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '6'
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser = UbertPipelines.pipelines_args(total_parser)
args = total_parser.parse_args()
# 设置一些训练要使用到的参数
args.pretrained_model_path = 'IDEA-CCNL/Erlangshen-Ubert-110M-Chinese' #预训练模型的路径,我们提供的预训练模型存放在HuggingFace上
args.default_root_dir = './' #默认主路径,用来放日志、tensorboard等
args.max_epochs = 5
args.gpus = 1
args.batch_size = 1
# 只需要将数据处理成为下面数据的 json 样式就可以一键训练和预测,下面只是提供了一条示例样本
train_data = [
{
"task_type": "抽取任务",
"subtask_type": "实体识别",
"text": "彭小军认为,国内银行现在走的是台湾的发卡模式,先通过跑马圈地再在圈的地里面选择客户,",
"choices": [
{"entity_type": "地址", "label": 0, "entity_list": [
{"entity_name": "台湾", "entity_type": "地址", "entity_idx": [[15, 16]]}]},
{"entity_type": "书名", "label": 0, "entity_list": []},
{"entity_type": "公司", "label": 0, "entity_list": []},
{"entity_type": "游戏", "label": 0, "entity_list": []},
{"entity_type": "政府机构", "label": 0, "entity_list": []},
{"entity_type": "电影名称", "label": 0, "entity_list": []},
{"entity_type": "人物姓名", "label": 0, "entity_list": [
{"entity_name": "彭小军", "entity_type": "人物姓名", "entity_idx": [[0, 2]]}]},
{"entity_type": "组织机构", "label": 0, "entity_list": []},
{"entity_type": "岗位职位", "label": 0, "entity_list": []},
{"entity_type": "旅游景点", "label": 0, "entity_list": []}
],
"id": 0}
]
dev_data = [
{
"task_type": "抽取任务",
"subtask_type": "实体识别",
"text": "就天涯网推出彩票服务频道是否是业内人士所谓的打政策“擦边球”,记者近日对此事求证彩票监管部门。",
"choices": [
{"entity_type": "地址", "label": 0, "entity_list": []},
{"entity_type": "书名", "label": 0, "entity_list": []},
{"entity_type": "公司", "label": 0, "entity_list": [
{"entity_name": "天涯网", "entity_type": "公司", "entity_idx": [[1, 3]]}]},
{"entity_type": "游戏", "label": 0, "entity_list": []},
{"entity_type": "政府机构", "label": 0, "entity_list": []},
{"entity_type": "电影名称", "label": 0, "entity_list": []},
{"entity_type": "人物姓名", "label": 0, "entity_list": []},
{"entity_type": "组织机构", "label": 0, "entity_list": [
{"entity_name": "彩票监管部门", "entity_type": "组织机构", "entity_idx": [[40, 45]]}]},
{"entity_type": "岗位职位", "label": 0, "entity_list": [
{"entity_name": "记者", "entity_type": "岗位职位", "entity_idx": [[31, 32]]}]},
{"entity_type": "旅游景点", "label": 0, "entity_list": []}
],
"id": 0}
]
test_data = [
{
"task_type": "抽取任务",
"subtask_type": "实体识别",
"text": "这也让很多业主据此认为,雅清苑是政府公务员挤对了国家的经适房政策。",
"choices": [
{"entity_type": "地址", "label": 0, "entity_list": [
{"entity_name": "雅清苑", "entity_type": "地址", "entity_idx": [[12, 14]]}]},
{"entity_type": "书名", "label": 0, "entity_list": []},
{"entity_type": "公司", "label": 0, "entity_list": []},
{"entity_type": "游戏", "label": 0, "entity_list": []},
{"entity_type": "政府机构", "label": 0, "entity_list": []},
{"entity_type": "电影名称", "label": 0, "entity_list": []},
{"entity_type": "人物姓名", "label": 0, "entity_list": []},
{"entity_type": "组织机构", "label": 0, "entity_list": []},
{"entity_type": "岗位职位", "label": 0, "entity_list": [
{"entity_name": "公务员", "entity_type": "岗位职位", "entity_idx": [[18, 20]]}]},
{"entity_type": "旅游景点", "label": 0, "entity_list": []}
],
"id": 0},
]
model = UbertPipelines(args)
model.fit(train_data, dev_data)
result = model.predict(test_data)
for line in result:
print(line)
if __name__ == "__main__":
main()
| 4,235 | 43.125 | 110 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/uniex/example.py
|
import argparse
from fengshen.pipelines.information_extruction import UniEXPipelines
import os
import json
from tqdm import tqdm
import copy
import time
def load_data(data_path):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
samples = [json.loads(line) for line in tqdm(lines)]
return samples
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--data_dir', default='./data', type=str)
total_parser.add_argument('--train_data', default='train.json', type=str)
total_parser.add_argument('--valid_data', default='dev.json', type=str)
total_parser.add_argument('--test_data', default='test.json', type=str)
total_parser = UniEXPipelines.pipelines_args(total_parser)
args = total_parser.parse_args()
train_data = load_data(os.path.join(args.data_dir, args.train_data))
dev_data = load_data(os.path.join(args.data_dir, args.valid_data))
test_data = load_data(os.path.join(args.data_dir, args.test_data))
# train_data=train_data[:10]
test_data=test_data[:100]
dev_data=dev_data[:10]
test_data_ori = copy.deepcopy(test_data)
model = UniEXPipelines(args)
if args.train:
model.fit(train_data, dev_data,test_data)
start_time=time.time()
pred_data = model.predict(test_data)
consum=time.time()-start_time
print('总共耗费:',consum)
print('sent/s:',len(test_data)/consum)
for line in pred_data[:10]:
print(line)
if __name__ == "__main__":
main()
| 1,547 | 27.666667 | 77 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/translate/finetune_deltalm.py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import json
import argparse
import torch
import os
import logging
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from pytorch_lightning.utilities import rank_zero_info
from sacrebleu.metrics import BLEU
from fengshen.utils.utils import chinese_char_tokenize
from fengshen.models.model_utils import add_module_args, add_inverse_square_args
from fengshen.models.deltalm.tokenizer_deltalm import DeltalmTokenizer
from fengshen.models.deltalm.modeling_deltalm import DeltalmForConditionalGeneration
from fengshen.utils import UniversalCheckpoint
from fengshen.data.universal_datamodule import UniversalDataModule
from pytorch_lightning import Trainer, loggers, LightningModule
from pytorch_lightning.callbacks import LearningRateMonitor
from mosestokenizer import MosesDetokenizer
from typing import List
import sys
sys.path.append('../../../')
# from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
# from pytorch_lightning.callbacks.early_stopping import EarlyStopping
mose_decode = MosesDetokenizer()
os.environ["CUDA_VISIBLE_DEVICES"] = '4'
logger = logging.getLogger(__name__)
EVAL_BLEU_ORDER = 4
def calc_bleu_from_stats(sentence_stats: pd.DataFrame) -> BLEU:
corpus_stats = sentence_stats.sum(axis=0)
smooth = {"smooth_method": "exp"}
corpus_bleu = BLEU.compute_bleu(
correct=[
corpus_stats.correct_1_grams,
corpus_stats.correct_2_grams,
corpus_stats.correct_3_grams,
corpus_stats.correct_4_grams,
],
total=[
corpus_stats.total_1_grams,
corpus_stats.total_2_grams,
corpus_stats.total_3_grams,
corpus_stats.total_4_grams,
],
sys_len=corpus_stats.translation_length,
ref_len=corpus_stats.reference_length,
**smooth
)
return corpus_bleu
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
# logger.debug("Debug: After target.dim() == lprobs.dim(): ", target.dim(), lprobs.dim())
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / (lprobs.size(-1) - 1)
valid_length = target.ne(ignore_index).sum()
# unvalid_length = target.eq(ignore_index).sum()
loss = ((1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss) / valid_length.item()
return loss, nll_loss
class DataCollator:
def __init__(self, model, tokenizer, max_enc_length, max_dec_length, reverse_src_tgt):
self.tokenizer = tokenizer
self.max_enc_length = max_enc_length
self.max_dec_length = max_dec_length
self.model = model
self.reverse_src_tgt = reverse_src_tgt
def __call__(self, batch_samples):
batch_inputs, batch_targets = [], []
for sample in batch_samples:
if self.reverse_src_tgt:
if "tgt" in sample and len(sample["tgt"]) != 0:
batch_inputs.append(sample["tgt"])
batch_targets.append(sample["src"])
else:
if "src" in sample and len(sample["src"]) != 0:
batch_inputs.append(sample["src"])
batch_targets.append(sample["tgt"])
batch_data = self.tokenizer(
batch_inputs,
padding='max_length',
max_length=self.max_enc_length,
truncation=True,
return_tensors="pt"
)
with self.tokenizer.as_target_tokenizer():
labels = self.tokenizer(
batch_targets,
padding='max_length',
max_length=self.max_dec_length,
truncation=False,
return_tensors="pt"
)["input_ids"]
batch_data['decoder_input_ids'] = self.model.prepare_decoder_input_ids_from_labels(labels)
batch_data['labels'] = labels
batch_data['src'] = batch_inputs
batch_data['tgt'] = batch_targets
# logger.debug(batch_data)
return batch_data
class FinetuneTranslation(LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('deltalm-base finetune')
parser.add_argument('--label_smoothing', default=0.1, type=float)
return parent_args
def __init__(self, args, tokenizer=None):
super().__init__()
self.args = args
self.save_hyperparameters(args)
if args.other_model:
self.model = AutoModelForSeq2SeqLM.from_pretrained(args.model_path)
else:
self.model = DeltalmForConditionalGeneration.from_pretrained(args.model_path, ignore_mismatched_sizes=True)
self.tokenizer = tokenizer
assert self.tokenizer, "tokenizer is None!"
self.blue_metric = BLEU()
self.sufficient_stats: List[List[int]] = []
self.label_smoothing = self.args.label_smoothing
self.mose_decode = MosesDetokenizer()
if self.args.label_smoothing != 0:
self.loss_fn = label_smoothed_nll_loss
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
tb_size = self.hparams.train_batchsize * max(1, self.trainer.gpus)
ab_size = self.trainer.accumulate_grad_batches * float(
self.trainer.max_epochs)
self.total_steps = (len(train_loader.dataset) //
tb_size) // ab_size
def configure_optimizers(self):
# if self.args.use_default_configure:
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
def training_step(self, batch, batch_idx):
if self.label_smoothing == 0:
output = self.model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'])
self.log('train_loss', output.loss, sync_dist=True)
return output.loss
# TODO label_smoothing should be implemented at here
else:
labels = batch["labels"]
output = self.model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
decoder_input_ids=batch['decoder_input_ids'])
logits = output["logits"]
m = torch.nn.LogSoftmax(dim=-1)
lprobs = m(logits.float())
loss, _ = self.loss_fn(lprobs.view(-1, lprobs.size(-1)), labels.view(-1),
self.label_smoothing, self.tokenizer.pad_token_id)
self.log('train_loss', loss, sync_dist=True)
return loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1, ))
y_true = labels.view(size=(-1, ))
pad_mask = y_true.eq(1)
valid_length = y_true.ne(1).sum()
corr = torch.eq(y_pred, y_true.float())
corr.masked_fill_(pad_mask, 0.0)
acc = torch.sum(corr.float()) / valid_length
return acc
def get_sufficient_stats(self, translations: List[str], references: List[str]) -> pd.DataFrame:
assert len(translations) == len(references), (
f"There are {len(translations)} translated sentences "
f"but {len(references)} reference sentences"
)
# for sentence, ref in zip(translations, references):
sentence_bleu = self.blue_metric.corpus_score(translations, [references])
self.sufficient_stats.append(
[
# Number of correct 1-grams, .., 4-grams
sentence_bleu.counts[0],
sentence_bleu.counts[1],
sentence_bleu.counts[2],
sentence_bleu.counts[3],
# Total number of 1-grams, .., 4-grams
sentence_bleu.totals[0],
sentence_bleu.totals[1],
sentence_bleu.totals[2],
sentence_bleu.totals[3],
# Length of translated sentence.
sentence_bleu.sys_len,
# Length of reference sentence.
sentence_bleu.ref_len,
]
)
def on_validation_start(self) -> None:
# rm file at validation start
prefix, ext = os.path.splitext(self.hparams.output_save_path)
file_path_rank = '{}_{}{}'.format(
prefix,
self.trainer._accelerator_connector.cluster_environment.
global_rank(), ext)
if os.path.exists(file_path_rank):
# logger.debug('rm {}'.format(file_path_rank))
os.remove(file_path_rank)
def validation_step(self, batch, batch_idx):
def postprocess_text(preds, labels, tgt_zh):
if tgt_zh:
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
else:
preds = list(map(lambda x: mose_decode(x.strip().split()), preds))
labels = list(map(lambda x: mose_decode(x.strip().split()), labels))
return preds, labels
tmp_label = batch['labels']
end_token_index = torch.where(tmp_label == self.tokenizer.eos_token_id)[1]
for idx, end_idx in enumerate(end_token_index):
tmp_label[idx][end_idx+1:] = -100
output = self.model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=tmp_label)
generated_ids = self.model.generate(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
max_length=self.hparams.max_dec_length)
preds = self.tokenizer.batch_decode(generated_ids,
skip_special_tokens=True)
labels = torch.where(batch['labels'] != -100, batch['labels'],
self.tokenizer.pad_token_id)
labels = self.tokenizer.batch_decode(labels,
skip_special_tokens=True)
decoded_preds, decoded_labels = postprocess_text(preds, labels, self.args.tgt_zh)
# save preds for every rank
prefix, ext = os.path.splitext(self.hparams.output_save_path)
file_path_rank = '{}_{}{}'.format(
prefix,
self.trainer._accelerator_connector.cluster_environment.
global_rank(), ext)
self.save_prediction_to_file(preds=decoded_preds,
sources=batch['src'],
targets=decoded_labels,
ori_target=batch['tgt'],
file_path=file_path_rank)
if self.args.tgt_zh:
new_preds = [chinese_char_tokenize(p) for p in decoded_preds]
new_labels = [chinese_char_tokenize(label) for label in decoded_labels]
self.get_sufficient_stats(new_preds, new_labels)
else:
self.get_sufficient_stats(decoded_preds, decoded_labels)
# batch_bleu = self.blue_metric.corpus_score(decoded_preds, [decoded_labels]).score
acc = self.comput_metrix(output.logits, batch['labels'])
self.log('val_loss', output.loss, sync_dist=True)
self.log('val_acc', acc, sync_dist=True)
def validation_epoch_end(self, outputs):
rank_zero_info("***** Validation results *****")
sentence_states = pd.DataFrame(
self.sufficient_stats,
columns=[
"correct_1_grams",
"correct_2_grams",
"correct_3_grams",
"correct_4_grams",
"total_1_grams",
"total_2_grams",
"total_3_grams",
"total_4_grams",
"translation_length",
"reference_length",
]
)
computed_bleu = calc_bleu_from_stats(sentence_states)
rank_zero_info("valid_sacrebleu= {}\n".format(computed_bleu.score))
self.log('valid_sacrebleu', computed_bleu.score, sync_dist=True)
self.sufficient_stats = []
def on_save_checkpoint(self, checkpoint) -> None:
if self.trainer._accelerator_connector.cluster_environment.global_rank(
) == 0:
self.model.save_pretrained(
os.path.join(
self.trainer.checkpoint_callback.dirpath,
'finetuned_epoch{}_step{}'.format(
checkpoint['epoch'], checkpoint['global_step'])))
def save_prediction_to_file(self, preds, sources, targets, ori_target, file_path):
with open(file_path, 'a', encoding='utf-8') as f:
for idx, pred in enumerate(preds):
source = sources[idx]
target = targets[idx]
tmp_result = dict()
tmp_result['pred'] = pred
tmp_result['source'] = source
tmp_result['label'] = target
tmp_result['ori_label'] = ori_target[idx]
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data + '\n')
def test_step(self, batch, batch_idx):
# print(batch)
texts = batch['src']
# output summary and metrics
self.model.eval()
generated_ids = self.model.generate(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
max_length=self.hparams.max_dec_length
)
preds = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
labels = torch.where(batch['labels'] != -100, batch['labels'],
self.tokenizer.pad_token_id)
labels = self.tokenizer.batch_decode(
labels, skip_special_tokens=True, clean_up_tokenization_spaces=True)
self.save_prediction_to_file(preds, texts, labels, self.hparams.output_save_path)
def configure_logger(logging_lever=logging.INFO):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging_lever)
def main():
args_parser = argparse.ArgumentParser("Pegasus Task")
args_parser.add_argument('--do_eval_only',
action='store_true',
default=False)
args_parser.add_argument('--other_model',
action='store_true',
default=False)
args_parser.add_argument('--reverse_src_tgt',
action='store_true',
default=False)
args_parser.add_argument('--tgt_zh',
action='store_true',
default=False)
args_parser.add_argument('--early_stopping_callback',
action='store_true',
default=False)
args_parser.add_argument('--pretrained_model_path',
default='facebook/mbart',
type=str)
args_parser.add_argument('--output_save_path',
default='predict.json',
type=str)
args_parser.add_argument('--max_enc_length', default=512, type=int)
args_parser.add_argument('--max_dec_length', default=512, type=int)
# * Args for data preprocessing
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
# * Args for training
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args_parser = FinetuneTranslation.add_model_specific_args(args_parser)
args_parser = add_module_args(args_parser)
args_parser = add_inverse_square_args(args_parser)
args = args_parser.parse_args()
if args.other_model:
tokenizer = AutoTokenizer.from_pretrained(args.model_path)
else:
tokenizer = DeltalmTokenizer.from_pretrained(args.model_path)
# tokenizer = AutoTokenizer.from_pretrained(args.model_path)
print("tokenizer vocab size: ", tokenizer.vocab_size)
model = FinetuneTranslation(args, tokenizer)
collator = DataCollator(model.model, tokenizer, args.max_enc_length, args.max_dec_length, args.reverse_src_tgt)
data_model = UniversalDataModule(tokenizer=tokenizer,
args=args,
# datasets=dataset,
collate_fn=collator)
lr_monitor = LearningRateMonitor(logging_interval='step')
configure_logger(logging_lever=logging.INFO)
if not args.do_eval_only:
lr_monitor = LearningRateMonitor(logging_interval='step')
tensorboard_logger = loggers.TensorBoardLogger(
save_dir=os.path.join(args.default_root_dir, 'logs/'),
name=os.path.basename(os.path.dirname(args.model_path)))
checkpoint_callback = UniversalCheckpoint(args)
# early_stop = EarlyStopping(monitor=args.monitor, mode=args.mode)
trainer = Trainer.from_argparse_args(
args, logger=tensorboard_logger, callbacks=[lr_monitor, checkpoint_callback])
trainer.fit(model, data_model)
else:
trainer = Trainer.from_argparse_args(args)
trainer.validate(model, data_model)
# trainer.test(model, data_model)
if __name__ == '__main__':
main()
| 18,263 | 39.586667 | 119 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/translate/prepare_dataset.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
import os
def main(file_path, src_lang, tgt_lang):
file_list = ["train", "valid", "test"]
for filename in file_list:
sys.stderr.write("**** Start processing {} ... ****\n".format(filename))
src_full_path = os.path.join(file_path, ".".join((filename, src_lang)))
tgt_full_path = os.path.join(file_path, ".".join((filename, tgt_lang)))
src_reader = open(src_full_path, 'r')
tgt_reader = open(tgt_full_path, "r")
writer_full_path = os.path.join(file_path, ".".join((filename, src_lang + "_" + tgt_lang)))
writer = open(writer_full_path, "w")
# combine_dict = OrderedDict()
for row_src, row_tgt in zip(src_reader, tgt_reader):
combine_line = {}
combine_line["src"] = row_src.strip()
combine_line["tgt"] = row_tgt.strip()
json.dump(combine_line, writer, ensure_ascii=False)
writer.write('\n')
# print(row_src)
# print(row_tgt)
sys.stderr.write(f"**** Done change {filename} format **** \n")
if __name__ == "__main__":
file_path = sys.argv[1]
src_lang, tgt_lang = sys.argv[2].split("-")
main(file_path, src_lang, tgt_lang)
| 1,278 | 32.657895 | 99 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pretrain_randeng_bart/pretrain_bart.py
|
from transformers import AutoTokenizer, BartForConditionalGeneration, BartConfig
from pytorch_lightning import (
LightningModule,
Trainer,
)
from pytorch_lightning.callbacks import LearningRateMonitor
from dataclasses import dataclass
import os
import argparse
import torch
import math
import time
from torch.utils.data._utils.collate import default_collate
from fengshen.data.data_utils.mask_utils import create_masked_lm_predictions
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.utils import UniversalCheckpoint
from fengshen.models.model_utils import (
get_total_steps,
configure_optimizers,
add_module_args,
)
import numpy as np
SHOW_DATA = False
@ dataclass
class BartCollator:
'''
由input处理成samples,也就是最终模型的输入
其中主要处理逻辑在__call__里
包含text infilling和sentence shuffle任务
'''
tokenizer: None # 分词
max_seq_length: 512
masked_lm_prob: 0.15
permute_sentence_ratio: 1.0
content_key: str = 'text'
def setup(self):
from fengshen.data.data_utils.sentence_split import ChineseSentenceSplitter
self.sentence_split = ChineseSentenceSplitter()
self.np_rng = np.random.RandomState(seed=((int(time.time()) % 2**32)))
inv_vocab = {v: k for k, v in self.tokenizer.vocab.items()}
self.vocab_id_list = list(inv_vocab.keys())
self.vocab_id_to_token_dict = inv_vocab
import jieba_fast
self.zh_tokenizer = jieba_fast.lcut
seg_tokens = ['。', ';', ';', '!', '!', '?', '?']
seg_token_ids = []
for t in seg_tokens:
if t in self.tokenizer.vocab:
seg_token_ids.append(self.tokenizer.vocab[t])
else:
print('seg_token "{}" not in vocab'.format(t))
self.seg_token_ids = set(seg_token_ids)
def permute_sentences(self, source, full_stops, p=1.0):
# Tokens that are full stops, where the previous token is not
sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2
result = source.clone()
num_sentences = sentence_ends.size(0)
num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0)
substitutions = torch.randperm(num_sentences)[:num_to_permute]
ordering = torch.arange(0, num_sentences)
ordering[substitutions] = substitutions[torch.randperm(num_to_permute)]
# Ignore <bos> at start
index = 1
for i in ordering:
sentence = source[(sentence_ends[i - 1] if i > 0 else 1): sentence_ends[i]]
result[index: index + sentence.size(0)] = sentence
index += sentence.size(0)
return result
def __call__(self, samples):
'''
samples: 一个sample长这样{"text": "hello world"}
'''
model_inputs = []
for s in samples:
sentences = self.sentence_split.tokenize(s[self.content_key])
tokenized_sentences = [self.tokenizer.convert_tokens_to_ids(
self.tokenizer.tokenize(sent)) for sent in sentences]
if len(tokenized_sentences) == 0:
print('find empty sentence')
continue
tokens = [self.tokenizer.cls_token_id]
for sent in tokenized_sentences:
for t in sent:
tokens.append(t)
if tokens[-1] != self.tokenizer.sep_token_id:
tokens.append(self.tokenizer.sep_token_id)
if len(tokens) > self.max_seq_length:
# 找到最后的一句话,如果有的话,尽量保证最后一句话的完整
last_pos = self.max_seq_length - 1
for i in range(self.max_seq_length - 1, 0, -1):
if tokens[i-1] in self.seg_token_ids:
last_pos = i
break
tokens = tokens[:last_pos]
tokens.append(self.tokenizer.sep_token_id)
tokens = torch.LongTensor(tokens)
full_stops = torch.any(torch.stack([torch.eq(tokens, aelem).logical_or_(
torch.eq(tokens, aelem)) for aelem in self.seg_token_ids], dim=0), dim=0)
assert (self.max_seq_length -
tokens.shape[0]) >= 0, (tokens.size(), tokens[-1], self.max_seq_length)
source, target = tokens, tokens.clone()
if self.permute_sentence_ratio > 0.0:
source = self.permute_sentences(source, full_stops, self.permute_sentence_ratio)
if self.masked_lm_prob > 0.0:
mask_prob = self.masked_lm_prob * 2
max_predictions_per_seq = mask_prob * len(source)
(source, _, _, _, _) = create_masked_lm_predictions(
source.numpy(), self.vocab_id_list, self.vocab_id_to_token_dict, mask_prob,
self.tokenizer.cls_token_id, self.tokenizer.sep_token_id, self.tokenizer.mask_token_id,
max_predictions_per_seq, self.np_rng,
masking_style='bert', zh_tokenizer=self.zh_tokenizer)
# 合并[MASK] 因为这里用的是Bert的mask函数,Bert是按字mask的,
# 这里把连续的mask合并成一个MASK从而达到span mask的效果
span_mask_souce = []
for t in source:
# 如果是连续的多个mask,则跳过
if len(span_mask_souce) > 0 \
and t is self.tokenizer.mask_token_id \
and span_mask_souce[-1] is self.tokenizer.mask_token_id:
continue
span_mask_souce.append(t)
source = torch.LongTensor(span_mask_souce)
assert (source >= 0).all()
# assert (source[1:-1] >= 1).all(), source
assert (source <= self.tokenizer.vocab_size).all()
assert source[0] == self.tokenizer.cls_token_id
assert source[-1] == self.tokenizer.sep_token_id
prev_output_tokens = torch.zeros_like(target)
# match the preprocessing in fairseq
prev_output_tokens[0] = self.tokenizer.sep_token_id
prev_output_tokens[1:] = target[:-1]
source_ = torch.full((self.max_seq_length,),
self.tokenizer.pad_token_id, dtype=torch.long)
source_[:source.shape[0]] = source
target_ = torch.full((self.max_seq_length,), -100, dtype=torch.long)
target_[:target.shape[0]] = target
prev_output_tokens_ = torch.full(
(self.max_seq_length,), self.tokenizer.pad_token_id, dtype=torch.long)
prev_output_tokens_[:prev_output_tokens.shape[0]] = prev_output_tokens
attention_mask = torch.full((self.max_seq_length,), 0, dtype=torch.long)
attention_mask[:source.shape[0]] = 1
model_inputs.append({
"input_ids": source_,
"labels": target_,
"decoder_input_ids": prev_output_tokens_,
"attention_mask": attention_mask,
})
return default_collate(model_inputs)
class RandengBart(LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('Randeng BART')
parser.add_argument('--masked_lm_prob', type=float, default=0.15)
parser.add_argument('--max_seq_length', type=int, default=512)
parser.add_argument('--sample_content_key', type=str, default='text')
parser.add_argument('--permute_sentence_ratio', type=str, default=1.0)
return parent_parser
def __init__(self, args, tokenizer, **kwargs) -> None:
super().__init__()
self.save_hyperparameters(args)
config = BartConfig.from_pretrained(args.model_path)
self.model = BartForConditionalGeneration(config)
self.tokenizer = tokenizer
def setup(self, stage) -> None:
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
def configure_optimizers(self):
return configure_optimizers(self)
def detokenize(self, token_ids):
toks = self.tokenizer.convert_ids_to_tokens(token_ids)
return self.tokenizer.convert_tokens_to_string(toks)
def training_step(self, batch, batch_idx):
if self.trainer.global_rank == 0:
global SHOW_DATA
if not SHOW_DATA:
SHOW_DATA = True
print('source: {}'.format(batch['input_ids'][0]))
print('target: {}'.format(batch['labels'][0]))
print('decoder source: {}'.format(batch['decoder_input_ids'][0]))
print('source: {}'.format(self.detokenize(batch['input_ids'][0])))
print('decoder source: {}'.format(self.detokenize(batch['decoder_input_ids'][0])))
label_idx = batch['labels'][0] != -100
print('target: {}'.format(self.detokenize(
batch['labels'][0][label_idx])))
output = self.model(**batch)
acc = self.comput_metrix(output.logits, batch['labels'])
self.log('train_loss', output.loss, sync_dist=True)
self.log('train_acc', acc, sync_dist=True)
return output.loss
def comput_metrix(self, logits, labels):
label_idx = labels != -100
labels = labels[label_idx]
logits = logits[label_idx].view(-1, logits.size(-1))
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.shape[0]
return acc
def validation_step(self, batch, batch_idx):
output = self.model(**batch)
acc = self.comput_metrix(output.logits, batch['labels'])
self.log('val_loss', output.loss, sync_dist=True)
self.log('val_acc', acc, sync_dist=True)
def on_load_checkpoint(self, checkpoint) -> None:
# 兼容低版本lightning,低版本lightning从ckpt起来时steps数会被重置为0
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = add_module_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = RandengBart.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained(args.model_path)
collator = BartCollator(
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
masked_lm_prob=args.masked_lm_prob,
content_key=args.sample_content_key,
permute_sentence_ratio=args.permute_sentence_ratio,
)
# 准备一些额外参数
collator.setup()
data_module = UniversalDataModule(tokenizer=tokenizer, args=args, collate_fn=collator)
module = RandengBart(args, tokenizer=tokenizer)
lr_monitor = LearningRateMonitor(logging_interval='step')
checkpoint_callback = UniversalCheckpoint(args)
# 做兼容,如果目录不存在的话把这个参数去掉,不然会报错
if args.load_ckpt_path is not None and \
not os.path.exists(args.load_ckpt_path):
print('--------warning no checkpoint found--------, remove args')
args.load_ckpt_path = None
trainer = Trainer.from_argparse_args(args,
callbacks=[
lr_monitor,
checkpoint_callback])
trainer.fit(module, data_module, ckpt_path=args.load_ckpt_path)
| 11,778 | 40.769504 | 107 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/GAVAE/generate.py
|
import torch
from transformers import BertTokenizer,T5Tokenizer
from fengshen.models.GAVAE.GAVAEModel import GAVAEModel
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder_tokenizer = BertTokenizer.from_pretrained("IDEA-CCNL/Randeng-GAVAE-1.2B-Augmentation-Chinese")
decoder_tokenizer = T5Tokenizer.from_pretrained("IDEA-CCNL/Randeng-GAVAE-1.2B-Augmentation-Chinese", eos_token = '<|endoftext|>', pad_token = '<pad>',extra_ids=0)
decoder_tokenizer.add_special_tokens({'bos_token':'<bos>'})
input_texts = [
"非常好的一个博物馆,是我所有去过的博物馆里感觉最正规的一家,凭有效证件可以入馆,可以自助免费存小件物品,讲解员和馆内外的工作人员也非常认真,其他的服务人员也很热情,非常好的!馆内的藏品也让人非常震撼!希望继续保持~",
"这是我来长沙最最期待的一定要去的地方,总算今天特地去瞻仰千古遗容了,开车到门口大屏幕显示着门票已发完的字样,心里一惊以为今天是白来了。但进了停车场才知道凭停车卡和有效身份证里面也能领,停车还不花钱,真好。",
"地方很大 很气派~~可以逛很久~~~去的时候是免费的~不过要安检~~~里面的马王堆~幸追夫人~还是很不错的~~~~去的时候有一个吴越文化特别展~~~东西也很多~~~~~很好看",
"我们到达的时候是下午3点,门票已经发完了。当时正焦虑的不知道怎么办才好,门卫大哥给我们俩补办了门票,这才得以入馆。非常感谢!绝对不虚此行!相当震撼的展览!原来古人也化妆,还有假发。记忆最深的是那个藕汤。可惜真颜已不得见。",
"去过三次,个人认为这是长沙最值得去的地方,博物馆的重点就是辛追,遗憾的是,每次去我都会感到悲哀,虽然我三次去的时候都要门票,但是每次看到辛追,都觉得现代的人类不应该挖她出来,除了第一次我觉得辛追像刚死去一样,后来两次我觉得太惨不忍睹了。建议大家要去就早去,以后肯定越来越腐烂",
"上大学时候去的,当时学生证是半价25,后来凭有效证件就不要钱了。非常喜欢的一家博物馆,里面可看的东西很多,当然最吸引我的就是那个辛追夫人和“素纱单衣”,果然不是盖的~里面的讲解员大部分都是师大学历史类的,非常专业和有耐心。虽然不在长沙了,不过对那里还是很有感情的,赞~~~",
"这两年也有很多机会去博物馆。。。不过还是想说湖南省博物馆是非常有特色的。。。应该说整个展览分成两个部分吧。。。一个部分是马王堆的主体展。。。另一个就是湖南的一些考古发现。。。其实来省博大部分的游客还是冲着马王堆来的吧。。。博物馆也很有心的为每一批游客安排了讲解员。。。从马王堆的发现到马王堆出土文物的介绍再到最后棺木和辛追的介绍。。。真是上了一节很生动的历史课。",
"网上订票去的,还是很顺利的就进去了,里面挺清净的,外围的环境也不错,还有鸽子可以喂。那天不是很闹,兜了一圈感觉还是很顺畅的,老娘娘和金缕玉衣挺震撼的。到此一游还是挺需要的",
]
gavae_model = GAVAEModel.from_pretrained("IDEA-CCNL/Randeng-GAVAE-1.2B-Augmentation-Chinese").to(device)
gavae_model.train_gan(encoder_tokenizer,decoder_tokenizer,input_texts)
# n:输出样本数量
texts = gavae_model.generate(n=5)
print(texts)
| 1,783 | 73.333333 | 188 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue_sim/main.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonlines
import torch
import pytorch_lightning as pl
from transformers import AutoTokenizer, BertTokenizer
from train_func import CustomDataset, CustomDataModule, CustomModel
import argparse
import os
import gpustat
if __name__ == '__main__':
my_parser = argparse.ArgumentParser()
my_parser.add_argument(
"--model_path", default="./weights/Erlangshen-MegatronBert-1.3B-Similarity", type=str, required=False)
my_parser.add_argument(
"--model_name", default="IDEA-CCNL/Erlangshen-MegatronBert-1.3B-Similarity", type=str, required=False)
my_parser.add_argument("--max_seq_length", default=64, type=int, required=False)
my_parser.add_argument("--batch_size", default=32, type=int, required=False)
my_parser.add_argument("--val_batch_size", default=64, type=int, required=False)
my_parser.add_argument("--num_epochs", default=10, type=int, required=False)
my_parser.add_argument("--learning_rate", default=4e-5, type=float, required=False)
my_parser.add_argument("--warmup_proportion", default=0.2, type=int, required=False)
my_parser.add_argument("--warmup_step", default=2, type=int, required=False)
my_parser.add_argument("--num_labels", default=3, type=int, required=False)
my_parser.add_argument("--cate_performance", default=False, type=bool, required=False)
my_parser.add_argument("--use_original_pooler", default=True, type=bool, required=False)
my_parser.add_argument("--model_output_path", default='./pl_model', type=str, required=False)
my_parser.add_argument("--mode", type=str, choices=['Train', 'Test'], required=True)
my_parser.add_argument("--predict_model_path", default='./pl_model/', type=str, required=False)
my_parser.add_argument("--test_output_path", default='./submissions', type=str, required=False)
my_parser.add_argument("--optimizer", default='AdamW', type=str, required=False) # ['Adam', 'AdamW']
# ['StepLR', 'CosineWarmup', 'CosineAnnealingLR']
my_parser.add_argument("--scheduler", default='CosineWarmup', type=str, required=False)
my_parser.add_argument("--loss_function", default='LSCE_correction', type=str,
required=False) # ['CE', 'Focal', 'LSCE_correction']
args = my_parser.parse_args()
print(args)
gpustat.print_gpustat()
if 'Erlangshen' in args.model_name:
tokenizer = BertTokenizer.from_pretrained(args.model_name, cache_dir=args.model_path)
else:
tokenizer = AutoTokenizer.from_pretrained(args.model_name, cache_dir=args.model_path)
seed = 1919
pl.seed_everything(seed)
dm = CustomDataModule(
args=args,
tokenizer=tokenizer,
)
metric_index = 2
checkpoint = pl.callbacks.ModelCheckpoint(
save_top_k=1,
verbose=True,
monitor=['val_loss', 'val_acc', 'val_f1'][metric_index],
mode=['min', 'max', 'max'][metric_index]
)
lr_monitor = pl.callbacks.LearningRateMonitor(logging_interval="step")
callbacks = [checkpoint, lr_monitor]
logger = pl.loggers.TensorBoardLogger(save_dir=os.getcwd(),
name='lightning_logs/' + args.model_name.split('/')[-1]),
trainer = pl.Trainer(
progress_bar_refresh_rate=50,
logger=logger,
gpus=-1 if torch.cuda.is_available() else None,
amp_backend='native',
amp_level='O2',
precision=16,
callbacks=callbacks,
gradient_clip_val=1.0,
max_epochs=args.num_epochs,
# accelerator='ddp',
# plugins='ddp_sharded',
)
if args.mode == 'Train':
print('Only Train')
model = CustomModel(
args=args,
)
trainer.fit(model, dm)
# Predict test, save results to json
if args.mode == 'Test':
print('Only Test')
test_loader = torch.utils.data.DataLoader(
CustomDataset('test.json', tokenizer, args.max_seq_length, 'test'),
batch_size=args.val_batch_size,
num_workers=4,
shuffle=False,
pin_memory=True,
drop_last=False
)
model = CustomModel(args=args).load_from_checkpoint(args.predict_model_path, args=args)
predict_results = trainer.predict(model, test_loader, return_predictions=True)
path = os.path.join(
args.test_output_path,
args.model_name.split('/')[-1].replace('-', '_'))
file_path = os.path.join(path, 'qbqtc_predict.json')
if not os.path.exists(path):
os.makedirs(path)
if os.path.exists(file_path):
print('Json文件已存在, 将用本次结果替换')
with jsonlines.open(file_path, 'w') as jsonf:
for predict_res in predict_results:
for i, p in zip(predict_res['id'], predict_res['logits']):
jsonf.write({"id": i, "label": str(p)})
print('Json saved:', file_path)
| 5,531 | 40.283582 | 110 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue_sim/finetune_clue_sim.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from sklearn import metrics
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import pytorch_lightning as pl
from collections import defaultdict
from transformers import AutoConfig, AutoModel, get_cosine_schedule_with_warmup
from loss import FocalLoss, LabelSmoothingCorrectionCrossEntropy
class CustomDataset(Dataset):
def __init__(self, file, tokenizer, max_len, mode='no_test'):
self.tokenizer = tokenizer
self.max_len = max_len
self.mode = mode
self.ex_list = []
with open('./dataset/' + file, "r", encoding='utf-8') as f:
for line in f:
sample = json.loads(line)
query = sample["query"]
title = sample["title"]
id = int(sample["id"])
if self.mode == 'no_test':
relevant = int(sample["label"])
self.ex_list.append((query, title, relevant, id))
else:
self.ex_list.append((query, title, id))
def __len__(self):
return len(self.ex_list)
def __getitem__(self, index):
if self.mode == 'no_test':
query, title, relevant, id = self.ex_list[index]
else:
query, title, id = self.ex_list[index]
inputs = self.tokenizer.encode_plus(
query, title,
truncation=True,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length',
return_token_type_ids=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
token_type_ids = inputs["token_type_ids"]
if self.mode == 'no_test':
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),
'targets': torch.tensor(relevant, dtype=torch.float),
'id': torch.tensor(id, dtype=torch.long)
}
else:
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),
'id': torch.tensor(id, dtype=torch.long)
}
class CustomDataModule(pl.LightningDataModule):
def __init__(self, args, tokenizer):
super().__init__()
self.args = args
self.tokenizer = tokenizer
self.max_len = self.args.max_seq_length
self.train_dataset = None
self.val_dataset = None
def setup(self, stage):
data_path = "./dataset"
assert os.path.exists(os.path.join(data_path, 'train.json'))
assert os.path.exists(os.path.join(data_path, 'dev.json'))
assert os.path.exists(os.path.join(data_path, 'test_public.json'))
if stage == 'fit':
self.train_dataset = CustomDataset('train.json', self.tokenizer, self.max_len)
self.val_dataset = CustomDataset('dev.json', self.tokenizer, self.max_len)
self.test_dataset = CustomDataset('test_public.json', self.tokenizer, self.max_len)
elif stage == 'test':
self.test_dataset = CustomDataset('test_public.json', self.tokenizer, self.max_len)
def train_dataloader(self):
full_dataset = ConcatDataset([self.train_dataset, self.val_dataset])
train_dataloader = DataLoader(
full_dataset,
batch_size=self.args.batch_size,
num_workers=4,
shuffle=True,
pin_memory=True,
drop_last=True)
return train_dataloader
def val_dataloader(self):
val_dataloader = DataLoader(
self.test_dataset,
batch_size=self.args.val_batch_size,
num_workers=4,
shuffle=False,
pin_memory=True,
drop_last=False)
return val_dataloader
def test_dataloader(self):
test_dataloader = DataLoader(
self.test_dataset,
batch_size=self.args.val_batch_size,
num_workers=4,
shuffle=False,
pin_memory=True,
drop_last=False)
return test_dataloader
class CustomModel(pl.LightningModule):
def __init__(self, args):
super().__init__()
self.args = args
self.model = self.args.model_name
self.cache_dir = self.args.model_path
self.scheduler = self.args.scheduler
self.step_scheduler_after = "batch"
self.optimizer = self.args.optimizer
self.pooler = self.args.use_original_pooler
self.category = self.args.cate_performance
self.loss_func = self.args.loss_function
hidden_dropout_prob: float = 0.1
layer_norm_eps: float = 1e-7
config = AutoConfig.from_pretrained(self.model, cache_dir=self.cache_dir)
config.update(
{
"output_hidden_states": False,
"hidden_dropout_prob": hidden_dropout_prob,
"layer_norm_eps": layer_norm_eps,
}
)
self.transformer = AutoModel.from_pretrained(self.model, config=config, cache_dir=self.cache_dir)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.linear = torch.nn.Linear(config.hidden_size, self.args.num_labels, bias=True) # 分三类
def configure_optimizers(self):
"""Prepare optimizer and schedule"""
model = self.transformer
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": 0.01,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer_index = ['Adam', 'AdamW'].index(self.optimizer)
optimizer = [
torch.optim.Adam(optimizer_grouped_parameters, lr=self.args.learning_rate),
torch.optim.AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate)][optimizer_index]
scheduler_index = ['StepLR', 'CosineWarmup', 'CosineAnnealingLR'].index(self.scheduler)
scheduler = [
torch.optim.lr_scheduler.StepLR(optimizer, step_size=self.args.warmup_step,
gamma=self.args.warmup_proportion),
get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=int(self.args.warmup_proportion * self.total_steps),
num_training_steps=self.total_steps,
),
torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=2e-06)][scheduler_index]
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return [optimizer], [scheduler]
def setup(self, stage=None):
if stage != "fit":
return
# calculate total steps
train_dataloader = self.trainer.datamodule.train_dataloader()
gpus = 0 if self.trainer.gpus is None else self.trainer.gpus
tb_size = self.args.batch_size * max(1, gpus)
ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs)
self.total_steps = (len(train_dataloader.dataset) // tb_size) // ab_size
def loss(self, outputs, targets):
lossf_index = ['CE', 'Focal', 'LSCE_correction'].index(self.loss_func)
loss_fct = [nn.CrossEntropyLoss(), FocalLoss(), LabelSmoothingCorrectionCrossEntropy()][lossf_index]
loss = loss_fct(outputs, targets)
return loss
def category_performance_measure(self, labels_right, labels_pred, num_label=3):
text_labels = [i for i in range(num_label)]
TP = dict.fromkeys(text_labels, 0) # 预测正确的各个类的数目
TP_FP = dict.fromkeys(text_labels, 0) # 测试数据集中各个类的数目
TP_FN = dict.fromkeys(text_labels, 0) # 预测结果中各个类的数目
label_dict = defaultdict(list)
for num in range(num_label):
label_dict[num].append(str(num))
# 计算TP等数量
for i in range(0, len(labels_right)):
TP_FP[labels_right[i]] += 1
TP_FN[labels_pred[i]] += 1
if labels_right[i] == labels_pred[i]:
TP[labels_right[i]] += 1
# 计算准确率P,召回率R,F1值
results = []
for key in TP_FP:
P = float(TP[key]) / float(TP_FP[key] + 1e-9)
R = float(TP[key]) / float(TP_FN[key] + 1e-9)
F1 = P * R * 2 / (P + R) if (P + R) != 0 else 0
# results.append("%s:\t P:%f\t R:%f\t F1:%f" % (key, P, R, F1))
results.append(F1)
return results
def monitor_metrics(self, outputs, targets):
pred = torch.argmax(outputs, dim=1).cpu().numpy().tolist()
targets = targets.int().cpu().numpy().tolist()
if self.category:
category_results = self.category_performance_measure(
labels_right=targets,
labels_pred=pred,
num_label=self.args.num_labels
)
return {"f1": category_results}
else:
f1_score = metrics.f1_score(targets, pred, average="macro")
return {"f1": f1_score}
def forward(self, ids, mask, token_type_ids, labels):
transformer_out = self.transformer(input_ids=ids, attention_mask=mask, token_type_ids=token_type_ids)
if self.pooler:
pooler_output = transformer_out.pooler_output
else:
sequence_output = transformer_out.last_hidden_state
pooler_output = torch.mean(sequence_output, dim=1)
logits = self.linear(self.dropout(pooler_output))
labels_hat = torch.argmax(logits, dim=1)
correct_count = torch.sum(labels == labels_hat)
return logits, correct_count
def predict(self, ids, mask, token_type_ids):
transformer_out = self.transformer(input_ids=ids, attention_mask=mask, token_type_ids=token_type_ids)
pooler_output = transformer_out.pooler_output
logits = self.linear(self.dropout(pooler_output))
logits = torch.argmax(logits, dim=1)
return logits
def training_step(self, batch, batch_idx):
ids, mask, token_type_ids, labels = batch['ids'], batch['mask'], batch['token_type_ids'], batch['targets']
logits, correct_count = self.forward(ids, mask, token_type_ids, labels)
loss = self.loss(logits, labels.long())
f1 = self.monitor_metrics(logits, labels)["f1"]
self.log("train_loss", loss, logger=True, prog_bar=True)
self.log('train_acc', correct_count.float() / len(labels), logger=True, prog_bar=True)
if self.category:
self.log("train_f1_key0", f1[0], logger=True, prog_bar=True)
self.log("train_f1_key1", f1[1], logger=True, prog_bar=True)
self.log("train_f1_key2", f1[2], logger=True, prog_bar=True)
else:
self.log("train_f1", f1, logger=True, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
ids, mask, token_type_ids, labels = batch['ids'], batch['mask'], batch['token_type_ids'], batch['targets']
logits, correct_count = self.forward(ids, mask, token_type_ids, labels)
loss = self.loss(logits, labels.long())
f1 = self.monitor_metrics(logits, labels)["f1"]
self.log("val_loss", loss, logger=True, prog_bar=True)
self.log("val_acc", correct_count.float() / len(labels), logger=True, prog_bar=True)
if self.category:
self.log("val_f1_key0", f1[0], logger=True, prog_bar=True)
self.log("val_f1_key1", f1[1], logger=True, prog_bar=True)
self.log("val_f1_key2", f1[2], logger=True, prog_bar=True)
else:
self.log("val_f1", f1, logger=True, prog_bar=True)
def test_step(self, batch, batch_idx):
ids, mask, token_type_ids, labels = batch['ids'], batch['mask'], batch['token_type_ids'], batch['targets']
logits, correct_count = self.forward(ids, mask, token_type_ids, labels)
loss = self.loss(logits, labels.long())
f1 = self.monitor_metrics(logits, labels)["f1"]
self.log("test_loss", loss, logger=True, prog_bar=True)
self.log("test_acc", correct_count.float() / len(labels), logger=True, prog_bar=True)
if self.category:
self.log("test_f1_key0", f1[0], logger=True, prog_bar=True)
self.log("test_f1_key1", f1[1], logger=True, prog_bar=True)
self.log("test_f1_key2", f1[2], logger=True, prog_bar=True)
else:
self.log("test_f1", f1, logger=True, prog_bar=True)
return {"test_loss": loss, "logits": logits, "labels": labels}
def predict_step(self, batch, batch_idx, dataloader_idx):
ids, mask, token_type_ids, id = batch['ids'], batch['mask'], batch['token_type_ids'], batch['id']
logits = self.predict(ids, mask, token_type_ids)
return {'id': id.cpu().numpy().tolist(), 'logits': logits.cpu().numpy().tolist()}
| 13,899 | 41.638037 | 114 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue_sim/loss.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.nn import functional as F
class FocalLoss(torch.nn.Module):
"""Multi-class Focal loss implementation"""
def __init__(self, gamma=2, weight=None, ignore_index=-100):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.weight = weight
self.ignore_index = ignore_index
def forward(self, input, target):
"""
input: [N, C]
target: [N, ]
"""
logpt = F.log_softmax(input, dim=1)
pt = torch.exp(logpt)
logpt = (1-pt)**self.gamma * logpt
loss = F.nll_loss(logpt, target, self.weight, ignore_index=self.ignore_index)
return loss
# 交叉熵平滑滤波 防止过拟合
class LabelSmoothingCorrectionCrossEntropy(torch.nn.Module):
def __init__(self, eps=0.1, reduction='mean', ignore_index=-100):
super(LabelSmoothingCorrectionCrossEntropy, self).__init__()
self.eps = eps
self.reduction = reduction
self.ignore_index = ignore_index
def forward(self, output, target):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
if self.reduction == 'sum':
loss = -log_preds.sum()
else:
loss = -log_preds.sum(dim=-1)
if self.reduction == 'mean':
loss = loss.mean()
# task specific
labels_hat = torch.argmax(output, dim=1)
lt_sum = labels_hat + target
abs_lt_sub = abs(labels_hat - target)
correction_loss = 0
for i in range(c):
if lt_sum[i] == 0:
pass
elif lt_sum[i] == 1:
if abs_lt_sub[i] == 1:
pass
else:
correction_loss -= self.eps*(0.5945275813408382)
else:
correction_loss += self.eps*(1/0.32447699714575207)
correction_loss /= c
# print(correction_loss)
return loss*self.eps/c + (1-self.eps) * \
F.nll_loss(log_preds, target, reduction=self.reduction, ignore_index=self.ignore_index) + correction_loss
| 2,693 | 33.538462 | 117 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/clue_sim/__init__.py
| 0 | 0 | 0 |
py
|
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/deepVAE/vae_pl_module.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Della model. """
import os
import torch
import numpy as np
from fengshen.models.deepVAE.deep_vae import DeepVAE
from pytorch_lightning.core.lightning import LightningModule
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
from transformers.models.bert.tokenization_bert import BertTokenizer
from fengshen.models.deepVAE.latent_connector import GPT2ForDecoderLatentConnector, GPT2ForEncoderLatentConnector
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
class DeepVAEModule(LightningModule):
@classmethod
def add_module_specific_args(cls, parser):
group = parser.add_argument_group('vae', 'configurations')
group.add_argument("--checkpoint_path", type=str, default=None)
group.add_argument("--gpt2_model_path", type=str)
group.add_argument("--beta_kl_constraints_start", default=1, type=float,
help="min beta for all the latent z posterior vs prior kl loss")
group.add_argument("--beta_kl_constraints_stop", default=1, type=float,
help="max beta for all the latent z posterior vs prior kl loss")
group.add_argument("--beta_n_cycles", default=30, type=int,
help="number of cycles for kl loss ratio within an epoch")
group.add_argument("--freebit_kl_constraints", default=.1, type=float,
help="free bit for all the latent z kl loss")
group.add_argument("--latent_dim", default=256, type=int,
help="latent dimension of deepVAE Z")
group.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
group.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
group.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
group.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
group.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
group.add_argument("--CVAE", action='store_true',
help="specify this argument if finetuning CVAE, otherwise ignore this argument")
return parser
@classmethod
def load_model(cls, args, labels_dict=None):
checkpoint = torch.load(os.path.join(args.checkpoint_path, 'mp_rank_00_model_states.pt'))
latent_dim = checkpoint['latent_dim'] if ('latent_dim' in checkpoint.keys()) else args.latent_dim
labels_dict = checkpoint['label_dict'] if ('label_dict' in checkpoint.keys()) else labels_dict
enc_config = GPT2Config.from_pretrained(args.gpt2_model_path)
tokenizer = BertTokenizer.from_pretrained(args.gpt2_model_path)
special_tokens_dict = {'bos_token': '<BOS>', 'eos_token': '<EOS>'}
# special_tokens_dict = {'bos_token': '<BOS>', 'eos_token': '<EOS>', 'additional_special_tokens': ['<ENT>', '<ENS>']}
tokenizer.add_special_tokens(special_tokens_dict)
encoder_model = GPT2ForEncoderLatentConnector(config=enc_config)
encoder_model.resize_token_embeddings(len(tokenizer))
dec_config = GPT2Config.from_pretrained(args.gpt2_model_path)
decoder_model = GPT2ForDecoderLatentConnector(config=dec_config, latent_dim=latent_dim)
decoder_model.resize_token_embeddings(len(tokenizer))
vae_model = DeepVAE(encoder_model, decoder_model, latent_dim=latent_dim,
hidden_dim=enc_config.hidden_size, layer_num=enc_config.num_hidden_layers,
pad_token_id=tokenizer.pad_token_id, unk_token_id=tokenizer.unk_token_id,
bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id,
CVAE=args.CVAE)
# TODO: all the related params should be loaded here! Including latent_nets, posterior_nets, prior_nets, pooling, decoder.transformer.Wv, decoder.transformer.Wz
anchor = 'module.model.'
start = len(anchor)
vae_dict = {key[start:]: val for key, val in checkpoint['module'].items() if anchor in key}
# comment out if not initialized from VAE
# if args.CVAE:
# # manually load prior and posterior if initialize CVAE model for the first time because of dim mismatch
# prior_post_dict = {key: vae_dict.pop(key) for key in list(vae_dict) if ('posterior_nets' in key or 'prior_nets' in key)}
# for idx in range(enc_config.num_hidden_layers):
# vae_model.posterior_nets[idx].weight.data[:, enc_config.hidden_size:] = prior_post_dict[f"posterior_nets.{idx}.weight"]
# vae_model.prior_nets[idx].weight.data[:, enc_config.hidden_size:] = prior_post_dict[f"prior_nets.{idx}.weight"]
# enc_wte_shape, dec_wte_shape = vae_dict['encoder.transformer.wte.weight'].shape[0], vae_dict['decoder.transformer.wte.weight'].shape[0]
# vae_model.encoder.transformer.wte.weight.data[:enc_wte_shape, :] = vae_dict.pop('encoder.transformer.wte.weight')
# vae_model.decoder.transformer.wte.weight.data[:dec_wte_shape, :] = vae_dict.pop('decoder.transformer.wte.weight')
# vae_model.decoder.lm_head.weight.data[:dec_wte_shape, :] = vae_dict.pop('decoder.lm_head.weight')
missing_keys, unexpected_keys = vae_model.load_state_dict(vae_dict, strict=False)
print(f"Vae model loading process: missing keys {missing_keys}, unexpected keys {unexpected_keys}")
return vae_model, tokenizer
def __init__(
self,
args,
train_steps=0,
labels_dict=None
):
super().__init__()
# self.save_hyperparameters()
self.args = args
if args.checkpoint_path is not None:
self.model, self.encoder_tokenizer, self.decoder_tokenizer, self.latent_dim, \
self.labels_dict, self.args = DeepVAEModule.load_model(self.args, labels_dict=labels_dict)
else:
self.encoder_tokenizer = BertTokenizer.from_pretrained(self.args.encoder_model_path)
encoder_config = GPT2Config.from_pretrained(self.args.encoder_model_path)
special_tokens_dict = {'bos_token': '<BOS>', 'eos_token': '<EOS>', 'additional_special_tokens': ['<ENT>', '<ENS>']}
self.encoder_tokenizer.add_special_tokens(special_tokens_dict)
self.latent_dim = self.args.latent_dim
encoder = GPT2ForEncoderLatentConnector.from_pretrained(self.args.encoder_model_path, config=encoder_config)
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
encoder.resize_token_embeddings(len(self.encoder_tokenizer))
self.decoder_tokenizer = BertTokenizer.from_pretrained(self.args.decoder_model_path)
self.decoder_tokenizer.add_special_tokens(special_tokens_dict)
decoder_config = GPT2Config.from_pretrained(self.args.decoder_model_path)
self.labels_dict = labels_dict
decoder = GPT2ForDecoderLatentConnector.from_pretrained(self.args.decoder_model_path, config=decoder_config,
latent_dim=self.latent_dim)
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
decoder.resize_token_embeddings(len(self.decoder_tokenizer))
self.model = DeepVAE(encoder, decoder, latent_dim=self.args.latent_dim,
hidden_dim=encoder_config.hidden_size, layer_num=encoder_config.num_hidden_layers,
pad_token_id=self.decoder_tokenizer.pad_token_id, unk_token_id=self.decoder_tokenizer.unk_token_id,
bos_token_id=self.decoder_tokenizer.bos_token_id, eos_token_id=self.decoder_tokenizer.eos_token_id,
CVAE=args.CVAE)
self.train_steps = train_steps
# TODO: adjust the cyclic schedule
self.beta_kl_constraints_list = self.get_cyclic_linear_beta_list(self.train_steps,
start=args.beta_kl_constraints_start, stop=args.beta_kl_constraints_stop, n_cycle=args.beta_n_cycles)
# self.mlm_probability_list = self.get_decoder_beta_list(self.train_steps,
# start=0., stop=1., n_cycle=args.beta_n_cycles)
# self.beta_kl_constraints_list = self.get_constant_ratio(self.train_steps, args.beta_kl_constraints)
self.mlm_probability_list = self.get_constant_ratio(self.train_steps, 0.)
# self.freebit_kl_constraints = args.freebit_kl_constraints
def get_constant_ratio(self, n_steps, ratio):
L = np.ones(n_steps)
L *= ratio
return L
def get_decoder_beta_list(self, n_steps, start=0., stop=1.0, n_cycle=4):
L = np.ones(n_steps)
t_range = int(n_steps / n_cycle)
for t_cur in range(n_steps):
if t_cur > t_range:
L[t_cur] = 0.
else:
ratio = t_cur / t_range
value = stop - ratio * (stop-start)
L[t_cur] = value
return L
def get_cyclic_linear_beta_list(self, n_steps, start=0.5, stop=1.0, n_cycle=4):
L = np.ones(n_steps)
t_range = int(n_steps / n_cycle)
for t_cur in range(n_steps):
loc = t_cur % t_range
split_range = int(t_range * 0.25)
if loc <= 2*split_range:
value = start
elif loc <= 3*split_range:
ratio = (loc % split_range) / split_range
value = ratio * (stop-start)
else:
value = stop
L[t_cur] = value
return L
#####
# Torch lightning
#####
def on_save_checkpoint(self, checkpoint) -> None:
checkpoint['label_dict'] = self.labels_dict
checkpoint['latent_dim'] = self.latent_dim
def training_step(self, batch, batch_idx):
if batch is None:
loss = torch.Tensor([0.]).to(next(self.model.parameters()).device)
loss.requires_grad = True
return loss
inputs, cond_inputs = batch, None
if self.args.CVAE:
inputs, cond_inputs = batch
total_loss, rec_loss, total_kl_loss, layer_kl_loss = \
self.model(inputs, self.beta_kl_constraints_list[batch_idx], cond_inputs)
# the logging interval are set by the trainer_args log_every_n_steps
for idx, pg in enumerate(self.optimizers().param_groups):
self.log(f"learning_rate_{idx}", pg['lr'])
unscaled_kl_constraint_loss = 0. if self.beta_kl_constraints_list[batch_idx] == 0. else total_kl_loss/self.beta_kl_constraints_list[batch_idx]
self.log("total_loss", total_loss)
self.log("total_kl_constraint_loss", total_kl_loss)
self.log("unscaled_kl_constraint_loss", unscaled_kl_constraint_loss)
self.log("beta_kl_constraints", self.beta_kl_constraints_list[batch_idx])
self.log("beta_mlm_probability", self.mlm_probability_list[batch_idx])
self.log("rec_loss", rec_loss)
for idx, kl_loss in enumerate(layer_kl_loss):
self.log(f"layer_{idx}_kl_loss", kl_loss.mean())
return total_loss
def training_step_end(self, batch_parts):
pass
def training_epoch_end(self, outputs):
pass
def validation_step(self, batch, batch_idx):
if batch is None:
loss = torch.Tensor([0.]).to(next(self.model.parameters()).device)
loss.requires_grad = True
return loss
inputs, cond_inputs = batch, None
if self.args.CVAE:
inputs, cond_inputs = batch
total_loss, rec_loss, total_kl_loss, layer_kl_loss = self.model(inputs, 1., cond_inputs)
# the logging interval are set by the trainer_args log_every_n_steps
self.log("val_total_loss", total_loss)
self.log("val_kl_constraint_loss", total_kl_loss)
self.log("val_recon_loss", rec_loss)
for idx, kl_loss in enumerate(layer_kl_loss):
self.log(f"layer_{idx}_kl_loss", kl_loss.mean())
return total_loss
def validation_epoch_end(self, outputs):
pass
def test_step(self, batch, batch_idx):
if batch is None:
loss = torch.Tensor([0.]).to(next(self.model.parameters()).device)
loss.requires_grad = True
return loss
inputs, cond_inputs = batch, None
if self.args.CVAE:
inputs, cond_inputs = batch
total_loss, rec_loss, total_kl_loss, layer_kl_loss = self.model(inputs, 1., cond_inputs)
self.log("test_total_loss", total_loss)
self.log("test_recon_loss", rec_loss)
self.log("test_kl_constraint_loss", total_kl_loss)
for idx, kl_loss in enumerate(layer_kl_loss):
self.log(f"layer_{idx}_kl_loss", kl_loss.mean())
return total_loss
def configure_optimizers(self):
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': self.args.weight_decay},
{'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=self.train_steps)
return {'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'interval': 'step',
'frequency': 1
}
}
| 14,840 | 52.193548 | 175 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/deepVAE/pretrain_deep_vae.py
|
import torch
import os
import random
import math
import argparse
from fengshen.data.fs_datasets.fs_datamodule import FSDataModule
from fengshen.example.deepVAE.vae_pl_module import DeepVAEModule
from pytorch_lightning import (
Trainer,
loggers,
)
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from torch.nn.utils.rnn import pad_sequence
class NER_RE_Collator:
def __init__(self, bos_token, eos_token, sep_token) -> None:
self.bos_token = bos_token
self.eos_token = eos_token
self.sep_token = sep_token
def __call__(self, samples, max_len=128):
# when len(samples) is larger than one, we need to save the sentence length info
inputs_tensors, entity_tensors = [], []
for sp in samples:
# NOTE: in TD-VAE, both encoder and decoder are gpt2, thus use decoder sent twice !
input_entities, input_ids = sp['decoder_entities'], sp['decoder_target']
input_entities = input_entities[:max_len] + [self.sep_token]
# shorten input_ids, based on the fact that sentence must be longer than the entities
input_ids = [self.bos_token] + input_ids[:max_len] + [self.eos_token]
entity_tensors.append(torch.tensor(input_entities, dtype=torch.long))
inputs_tensors.append(torch.tensor(input_ids, dtype=torch.long))
if not inputs_tensors or not entity_tensors:
return None # if all the examples in the batch exceed max_length sentence
inputs_tensors = pad_sequence(inputs_tensors, batch_first=True, padding_value=0)
entity_tensors = pad_sequence(entity_tensors, batch_first=True, padding_value=0)
return inputs_tensors, entity_tensors
class TDVAECollator:
def __init__(self, bos_token, eos_token) -> None:
self.bos_token = bos_token
self.eos_token = eos_token
def __call__(self, samples, max_len=120):
# when len(samples) is larger than one, we need to save the sentence length info
inputs = []
for sp in samples:
# NOTE: in TD-VAE, both encoder and decoder are gpt2, thus use decoder sent twice !
sent_lengths, input_ids = sp['decoder_sent_lengths'], sp['decoder_target']
potential_indices = [idx for idx, slen in enumerate(sent_lengths) if slen < max_len]
if len(potential_indices) == 0:
continue # we ignore paragraphs with only one sentence split
selected_idx = random.choice(potential_indices)
start_pos, end_pos = sum(sent_lengths[:selected_idx]), sum(sent_lengths[:selected_idx])+sent_lengths[selected_idx]
selected_input_ids = [self.bos_token] + input_ids[start_pos:end_pos] + [self.eos_token]
inputs.append(torch.tensor(selected_input_ids, dtype=torch.long))
if not inputs:
return None # if all the examples in the batch exceed max_length sentence
inputs = pad_sequence(inputs, batch_first=True, padding_value=0)
return inputs
class ZH_Fin_Collator:
def __init__(self, bos_token, eos_token) -> None:
self.bos_token = bos_token
self.eos_token = eos_token
def __call__(self, samples, max_len=120):
inputs = []
for sp in samples:
# NOTE: in TD-VAE, both encoder and decoder are gpt2, thus use decoder sent twice !
input_ids = sp['input_ids']
if len(input_ids) == 0:
continue # we ignore paragraphs with empty string
selected_input_ids = [self.bos_token] + input_ids + [self.eos_token]
inputs.append(torch.tensor(selected_input_ids, dtype=torch.long))
if not inputs:
return None
inputs = pad_sequence(inputs, batch_first=True, padding_value=0)
return inputs
class VAEModelCheckpoint:
@ staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='total_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./log/', type=str)
parser.add_argument('--filename', default='model-{epoch:2d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=-1, type=int)
parser.add_argument('--every_n_train_steps', default=1000, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
@staticmethod
def get_callback(args):
return ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename)
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = FSDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = DeepVAEModule.add_module_specific_args(args_parser)
args_parser = VAEModelCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
# TODO: update this to be tokenizer specific
# collator = NER_RE_Collator(bos_token=21128, eos_token=21129, sep_token=102)
# collator = TDVAECollator(bos_token=21128, eos_token=21129)
collator = ZH_Fin_Collator(bos_token=21128, eos_token=21129)
data_module = FSDataModule(args=args, collate_fn=collator)
train_steps = math.ceil(len(data_module.train_dataset)*args.max_epochs /
args.train_batchsize / args.num_nodes / args.gpus)
model = DeepVAEModule(args, train_steps)
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'logs/'), name='deepvae_lightning')
save_cpt_callback = VAEModelCheckpoint.get_callback(args)
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = Trainer.from_argparse_args(args,
callbacks=[save_cpt_callback, lr_monitor],
logger=logger)
trainer.fit(model, data_module)
| 6,369 | 43.859155 | 126 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/unimc/example.py
|
import argparse
from fengshen.pipelines.multiplechoice import UniMCPipelines
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser = UniMCPipelines.pipelines_args(total_parser)
args = total_parser.parse_args()
pretrained_model_path = 'IDEA-CCNL/Erlangshen-UniMC-RoBERTa-110M-Chinese'
args.learning_rate = 2e-5
args.max_length = 512
args.max_epochs = 3
args.batchsize = 8
args.train = 'train'
args.default_root_dir = './'
model = UniMCPipelines(args, model_path=pretrained_model_path)
train_data = [ # 训练数据
{
"texta": "凌云研发的国产两轮电动车怎么样,有什么惊喜?",
"textb": "",
"question": "下面新闻属于哪一个类别?",
"choice": [
"教育",
"科技",
"军事",
"旅游",
"国际",
"股票",
"农业",
"电竞"
],
"answer": "科技",
"label": 1,
"id": 0
}
]
dev_data = [ # 验证数据
{
"texta": "我四千一个月,老婆一千五一个月,存款八万且有两小孩,是先买房还是先买车?",
"textb": "",
"question": "下面新闻属于哪一个类别?",
"choice": [
"故事",
"文化",
"娱乐",
"体育",
"财经",
"房产",
"汽车"
],
"answer": "汽车",
"label": 6,
"id": 0
}
]
test_data = [ # 测试数据
{"texta": "街头偶遇2018款长安CS35,颜值美炸!或售6万起,还买宝骏510?",
"textb": "",
"question": "下面新闻属于哪一个类别?",
"choice": [
"房产",
"汽车",
"教育",
"军事"
],
"answer": "汽车",
"label": 1,
"id": 7759}
]
if args.train:
model.train(train_data, dev_data)
result = model.predict(test_data)
for line in result:
print(line)
if __name__ == "__main__":
main()
| 1,963 | 22.380952 | 77 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/PPVAE/generate.py
|
import torch
from transformers import BertTokenizer,T5Tokenizer
from fengshen.models.PPVAE.pluginVAE import PPVAEModel
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder_tokenizer = BertTokenizer.from_pretrained("IDEA-CCNL/Randeng-PPVAE-1.2B-Augmentation-Chinese")
decoder_tokenizer = T5Tokenizer.from_pretrained("IDEA-CCNL/Randeng-PPVAE-1.2B-Augmentation-Chinese", eos_token = '<|endoftext|>', pad_token = '<pad>',extra_ids=0)
decoder_tokenizer.add_special_tokens({'bos_token':'<bos>'})
ppvae_model = PPVAEModel.from_pretrained("IDEA-CCNL/Randeng-PPVAE-1.2B-Augmentation-Chinese").to(device)
input_texts = [
"非常好的一个博物馆,是我所有去过的博物馆里感觉最正规的一家,凭有效证件可以入馆,可以自助免费存小件物品,讲解员和馆内外的工作人员也非常认真,其他的服务人员也很热情,非常好的!馆内的藏品也让人非常震撼!希望继续保持~",
"这是我来长沙最最期待的一定要去的地方,总算今天特地去瞻仰千古遗容了,开车到门口大屏幕显示着门票已发完的字样,心里一惊以为今天是白来了。但进了停车场才知道凭停车卡和有效身份证里面也能领,停车还不花钱,真好。",
"地方很大 很气派~~可以逛很久~~~去的时候是免费的~不过要安检~~~里面的马王堆~幸追夫人~还是很不错的~~~~去的时候有一个吴越文化特别展~~~东西也很多~~~~~很好看",
"我们到达的时候是下午3点,门票已经发完了。当时正焦虑的不知道怎么办才好,门卫大哥给我们俩补办了门票,这才得以入馆。非常感谢!绝对不虚此行!相当震撼的展览!原来古人也化妆,还有假发。记忆最深的是那个藕汤。可惜真颜已不得见。",
"去过三次,个人认为这是长沙最值得去的地方,博物馆的重点就是辛追,遗憾的是,每次去我都会感到悲哀,虽然我三次去的时候都要门票,但是每次看到辛追,都觉得现代的人类不应该挖她出来,除了第一次我觉得辛追像刚死去一样,后来两次我觉得太惨不忍睹了。建议大家要去就早去,以后肯定越来越腐烂",
"上大学时候去的,当时学生证是半价25,后来凭有效证件就不要钱了。非常喜欢的一家博物馆,里面可看的东西很多,当然最吸引我的就是那个辛追夫人和“素纱单衣”,果然不是盖的~里面的讲解员大部分都是师大学历史类的,非常专业和有耐心。虽然不在长沙了,不过对那里还是很有感情的,赞~~~",
"这两年也有很多机会去博物馆。。。不过还是想说湖南省博物馆是非常有特色的。。。应该说整个展览分成两个部分吧。。。一个部分是马王堆的主体展。。。另一个就是湖南的一些考古发现。。。其实来省博大部分的游客还是冲着马王堆来的吧。。。博物馆也很有心的为每一批游客安排了讲解员。。。从马王堆的发现到马王堆出土文物的介绍再到最后棺木和辛追的介绍。。。真是上了一节很生动的历史课。",
"网上订票去的,还是很顺利的就进去了,里面挺清净的,外围的环境也不错,还有鸽子可以喂。那天不是很闹,兜了一圈感觉还是很顺畅的,老娘娘和金缕玉衣挺震撼的。到此一游还是挺需要的",
]
ppvae_model.train_plugin(encoder_tokenizer,decoder_tokenizer,input_texts,negative_samples=None)
# n:输出样本数量
texts = ppvae_model.generate(n=5)
print(texts)
| 1,807 | 74.333333 | 188 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pretrain_bert/pretrain_bert.py
|
from data.bert_dataloader.load import BertDataModule
from transformers import (
BertTokenizer,
BertConfig,
BertForPreTraining,
BertModel,
BertForMaskedLM
)
from pytorch_lightning import (
LightningDataModule,
LightningModule,
loggers,
Trainer,
)
from pytorch_lightning.callbacks import (
ModelCheckpoint,
LearningRateMonitor,
)
from typing import Optional
from torch.utils.data import DataLoader
from transformers.optimization import get_linear_schedule_with_warmup
import argparse
import sys
import torch
import os
import re
import jieba
import numpy as np
# 如果没有安装fengshen模块,请把Fengshenbang-LM/fengshen加入到系统环境变量
sys.path.insert(0, '../../../fengshen')
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'
class DataCollate(object):
def __init__(self, tokenizer, max_length, mask_rate=0.15, max_ngram=3, if_padding=True) -> None:
self.tokenizer = tokenizer
self.max_length = max_length
self.word_cuter = jieba.cut
self.vocab_length = len(tokenizer)
self.mask_rate = mask_rate
self.ignore_labels = -100
self.ngrams = np.arange(1, max_ngram + 1, dtype=np.int64)
pvals = 1. / np.arange(1, max_ngram + 1)
pvals /= pvals.sum(keepdims=True) # p(n) = 1/n / sigma(1/k)
self.pvals = pvals
self.padding = if_padding
def token_process(self, token_id):
rand = np.random.random()
if rand <= 0.8:
return self.tokenizer.mask_token_id
elif rand <= 0.9:
return token_id
else:
return np.random.randint(1, self.vocab_length)
def __call__(self, samples):
input_ids = []
attention_mask = []
token_type_ids = []
batch_labels = []
# print('^-^ batch size :',len(samples))
for sample in samples:
word_list = list(self.word_cuter(sample['text']))
mask_ids, labels = [], []
record = []
for i in range(len(word_list)):
rands = np.random.random()
if i in record:
continue
word = word_list[i]
if rands > self.mask_rate and len(word) < 4:
word = word_list[i]
word_encode = tokenizer.encode(word, add_special_tokens=False)
for token in word_encode:
mask_ids.append(token)
labels.append(self.ignore_labels)
record.append(i)
else:
n = np.random.choice(self.ngrams, p=self.pvals)
for index in range(n):
ind = index + i
if ind in record or ind >= len(word_list):
continue
record.append(ind)
word = word_list[ind]
word_encode = tokenizer.encode(word, add_special_tokens=False)
for token in word_encode:
mask_ids.append(self.token_process(token))
labels.append(token)
if self.padding:
if len(mask_ids) > self.max_length:
input_ids.append(mask_ids[:self.max_length])
batch_labels.append(labels[:self.max_length])
else:
lenght = len(mask_ids)
mask_ids.extend([0]*(self.max_length-lenght))
labels.extend([-100]*(self.max_length-lenght))
input_ids.append(mask_ids)
batch_labels.append(labels)
attention_mask.append([1]*self.max_length)
token_type_ids.append([0]*self.max_length)
# print('sentence:',sample['text'])
# print('input_ids:',mask_ids)
# print('decode inputids:',self.tokenizer.decode(mask_ids))
# print('labels',labels)
# print('decode labels:',self.tokenizer.decode(labels))
# print('*'*20)
return {
'input_ids': torch.tensor(input_ids),
'labels': torch.tensor(batch_labels),
'attention_mask': torch.tensor(attention_mask),
'token_type_ids': torch.tensor(token_type_ids)
}
class Bert(LightningModule):
@staticmethod
def add_module_specific_args(args_parser):
parser = args_parser.add_argument_group('Bert')
parser.add_argument('--model_path', type=str, default='')
parser.add_argument('--learning_rate', default=1e-5, type=float)
parser.add_argument('--weight_decay', default=0.1, type=float)
parser.add_argument('--warmup', default=0.01, type=float)
return args_parser
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
self.bertconfig = BertConfig.from_pretrained(args.model_path)
# self.model = BertForPreTraining(self.bertconfig)
self.model = BertForMaskedLM(self.bertconfig)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
tb_size = self.hparams.train_batchsize * max(1, self.trainer.gpus)
ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs)
self.total_steps = (len(train_loader.dataset) // tb_size) // ab_size
def configure_optimizers(self):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
paras = list(
filter(lambda p: p[1].requires_grad, self.named_parameters()))
paras = [{
'params':
[p for n, p in paras if not any(nd in n for nd in no_decay)],
'weight_decay': self.hparams.weight_decay
}, {
'params': [p for n, p in paras if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}]
optimizer = torch.optim.AdamW(paras, lr=self.hparams.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, int(self.total_steps * self.hparams.warmup),
self.total_steps)
return [{
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'interval': 'step',
'frequency': 1
}
}]
def training_step(self, batch, batch_idx):
output = self.model(**batch)
# print(output)
self.log('train_loss', output.loss)
return output.loss
def comput_metrix(self, logits, labels):
ones = torch.ones_like(labels)
zero = torch.zeros_like(labels)
mask = torch.where(labels < 0, zero, ones)
mask = mask.view(size=(-1,)).float()
# y_true=labels.view(size=(-1,)).float()
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
corr = torch.multiply(corr.float(), mask)
acc = torch.sum(corr.float()) / torch.sum(mask)
return acc
def validation_step(self, batch, batch_idx):
output = self.model(**batch)
# print(output)
acc = self.comput_metrix(output.logits, batch['labels'])
print('val_loss ', output.loss)
self.log('val_loss', output.loss)
self.log('val_acc', acc)
# pass
def predict_step(self, batch, batch_idx):
output = self.model(**batch)
return output.prediction_logits
class CustomCKPT:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('ckpt call back')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./ckpt/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_last', action='store_true', default=True)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', action='store_true', default=False)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename,
save_last=args.save_last)
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = BertDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = Bert.add_module_specific_args(args_parser)
args_parser = CustomCKPT.add_argparse_args(args_parser)
args_parser.add_argument('--deepspeed')
args_parser.add_argument('--seq_max_length')
args = args_parser.parse_args()
tokenizer = BertTokenizer.from_pretrained(args.model_path)
collate_fn = DataCollate(tokenizer, 512)
data_module = BertDataModule(tokenizer=tokenizer, args=args, collate_fn=collate_fn)
print('data load complete')
model = Bert(args)
print('model load complete')
lr_monitor = LearningRateMonitor(logging_interval='step')
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'logs/'),
name=os.path.basename(os.path.dirname(args.model_path)))
checkpoint_callback = CustomCKPT(args).callbacks
if args.resume_from_checkpoint is not None and \
not os.path.exists(args.resume_from_checkpoint):
print('--------warning no checkpoint found--------, remove args')
del args.resume_from_checkpoint
# autotuning
if args.deepspeed is not None:
os.environ['PL_DEEPSPEED_CONFIG_PATH'] = args.deepspeed
trainer = Trainer.from_argparse_args(args, logger=logger,
callbacks=[
lr_monitor,
checkpoint_callback])
trainer.fit(model, data_module)
| 10,636 | 37.125448 | 100 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/metric/utils_ner.py
|
import csv
import json
import torch
from transformers import BertTokenizer
class CNerTokenizer(BertTokenizer):
def __init__(self, vocab_file, do_lower_case=True):
super().__init__(vocab_file=str(vocab_file), do_lower_case=do_lower_case)
self.vocab_file = str(vocab_file)
self.do_lower_case = do_lower_case
def tokenize(self, text):
_tokens = []
for c in text:
if self.do_lower_case:
c = c.lower()
if c in self.vocab:
_tokens.append(c)
else:
_tokens.append('[UNK]')
return _tokens
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_text(self, input_file):
lines = []
with open(input_file, 'r') as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
lines.append({"words": words, "labels": labels})
words = []
labels = []
else:
splits = line.split(" ")
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
lines.append({"words": words, "labels": labels})
return lines
@classmethod
def _read_json(self, input_file):
lines = []
with open(input_file, 'r', encoding='utf8') as f:
for line in f:
line = json.loads(line.strip())
text = line['text']
label_entities = line.get('label', None)
words = list(text)
labels = ['O'] * len(words)
if label_entities is not None:
for key, value in label_entities.items():
for sub_name, sub_index in value.items():
for start_index, end_index in sub_index:
assert ''.join(words[start_index:end_index+1]) == sub_name
if start_index == end_index:
labels[start_index] = 'S-'+key
else:
if end_index - start_index == 1:
labels[start_index] = 'B-' + key
labels[end_index] = 'E-' + key
else:
labels[start_index] = 'B-' + key
labels[start_index + 1:end_index] = ['I-' + key] * (len(sub_name) - 2)
labels[end_index] = 'E-' + key
lines.append({"words": words, "labels": labels})
return lines
def get_entity_bios(seq, id2label, middle_prefix='I-'):
"""Gets entities from sequence.
note: BIOS
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
# >>> seq = ['B-PER', 'I-PER', 'O', 'S-LOC']
# >>> get_entity_bios(seq)
[['PER', 0,1], ['LOC', 3, 3]]
"""
chunks = []
chunk = [-1, -1, -1]
for indx, tag in enumerate(seq):
if not isinstance(tag, str):
tag = id2label[tag]
if tag.startswith("S-"):
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
chunk[1] = indx
chunk[2] = indx
chunk[0] = tag.split('-')[1]
chunks.append(chunk)
chunk = (-1, -1, -1)
if tag.startswith("B-"):
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
chunk[1] = indx
chunk[0] = tag.split('-')[1]
elif tag.startswith(middle_prefix) and chunk[1] != -1:
_type = tag.split('-')[1]
if _type == chunk[0]:
chunk[2] = indx
if indx == len(seq) - 1:
chunks.append(chunk)
else:
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
return chunks
def get_entity_bio(seq, id2label, middle_prefix='I-'):
"""Gets entities from sequence.
note: BIO
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
seq = ['B-PER', 'I-PER', 'O', 'B-LOC']
get_entity_bio(seq)
#output
[['PER', 0,1], ['LOC', 3, 3]]
"""
chunks = []
chunk = [-1, -1, -1]
for indx, tag in enumerate(seq):
if not isinstance(tag, str):
tag = id2label[tag]
if tag.startswith("B-"):
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
chunk[1] = indx
chunk[0] = tag.split('-')[1]
chunk[2] = indx
if indx == len(seq) - 1:
chunks.append(chunk)
elif tag.startswith(middle_prefix) and chunk[1] != -1:
_type = tag.split('-')[1]
if _type == chunk[0]:
chunk[2] = indx
if indx == len(seq) - 1:
chunks.append(chunk)
else:
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
return chunks
def get_entity_bioes(seq, id2label, middle_prefix='I-'):
"""Gets entities from sequence.
note: BIOS
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
# >>> seq = ['B-PER', 'I-PER', 'O', 'S-LOC']
# >>> get_entity_bios(seq)
[['PER', 0,1], ['LOC', 3, 3]]
"""
chunks = []
chunk = [-1, -1, -1]
for indx, tag in enumerate(seq):
if not isinstance(tag, str):
tag = id2label[tag]
if tag.startswith("S-"):
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
chunk[1] = indx
chunk[2] = indx
chunk[0] = tag.split('-')[1]
chunks.append(chunk)
chunk = (-1, -1, -1)
if tag.startswith("B-"):
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
chunk[1] = indx
chunk[0] = tag.split('-')[1]
elif (tag.startswith(middle_prefix) or tag.startswith("E-")) and chunk[1] != -1:
_type = tag.split('-')[1]
if _type == chunk[0]:
chunk[2] = indx
if indx == len(seq) - 1:
chunks.append(chunk)
else:
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
return chunks
def get_entities(seq, id2label, markup='bio', middle_prefix='I-'):
'''
:param seq:
:param id2label:
:param markup:
:return:
'''
assert markup in ['bio', 'bios', 'bioes']
if markup == 'bio':
return get_entity_bio(seq, id2label, middle_prefix)
elif markup == 'bios':
return get_entity_bios(seq, id2label, middle_prefix)
else:
return get_entity_bioes(seq, id2label, middle_prefix)
def bert_extract_item(start_logits, end_logits):
S = []
start_pred = torch.argmax(start_logits, -1).cpu().numpy()[0][1:-1]
end_pred = torch.argmax(end_logits, -1).cpu().numpy()[0][1:-1]
for i, s_l in enumerate(start_pred):
if s_l == 0:
continue
for j, e_l in enumerate(end_pred[i:]):
if s_l == e_l:
S.append((s_l, i, i + j))
break
return S
| 8,822 | 32.675573 | 110 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/metric/metric.py
|
# coding=utf-8
from collections import Counter
import torch
from torch import nn
# import seqeval
from .utils_ner import get_entities
class metrics_mlm_acc(nn.Module):
def __init__(self):
super().__init__()
def forward(self, logits, labels, masked_lm_metric):
# if len(list(logits.shape))==3:
mask_label_size = 0
for i in masked_lm_metric:
for j in i:
if j > 0:
mask_label_size += 1
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,))
masked_lm_metric = masked_lm_metric.view(size=(-1,))
corr = torch.eq(y_pred, y_true)
corr = torch.multiply(masked_lm_metric, corr)
acc = torch.sum(corr.float())/mask_label_size
return acc
class EntityScore(object):
def __init__(self):
self.reset()
def reset(self):
self.origins = []
self.founds = []
self.rights = []
def compute(self, origin, found, right):
recall = 0 if origin == 0 else (right / origin)
precision = 0 if found == 0 else (right / found)
f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall)
return recall, precision, f1
def result(self):
class_info = {}
origin_counter = Counter([x[0] for x in self.origins])
found_counter = Counter([x[0] for x in self.founds])
right_counter = Counter([x[0] for x in self.rights])
for type_, count in origin_counter.items():
origin = count
found = found_counter.get(type_, 0)
right = right_counter.get(type_, 0)
recall, precision, f1 = self.compute(origin, found, right)
class_info[type_] = {"acc": round(precision, 4), 'recall': round(recall, 4), 'f1': round(f1, 4)}
origin = len(self.origins)
found = len(self.founds)
right = len(self.rights)
recall, precision, f1 = self.compute(origin, found, right)
return {'acc': precision, 'recall': recall, 'f1': f1}, class_info
def update(self, true_subject, pred_subject):
self.origins.extend(true_subject)
self.founds.extend(pred_subject)
self.rights.extend([pre_entity for pre_entity in pred_subject if pre_entity in true_subject])
class SeqEntityScore(object):
def __init__(self, id2label, markup='bios', middle_prefix='I-'):
self.id2label = id2label
self.markup = markup
self.middle_prefix = middle_prefix
self.reset()
def reset(self):
self.origins = []
self.founds = []
self.rights = []
def compute(self, origin, found, right):
recall = 0 if origin == 0 else (right / origin)
precision = 0 if found == 0 else (right / found)
f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall)
return recall, precision, f1
def result(self):
class_info = {}
origin_counter = Counter([x[0] for x in self.origins])
found_counter = Counter([x[0] for x in self.founds])
right_counter = Counter([x[0] for x in self.rights])
for type_, count in origin_counter.items():
origin = count
found = found_counter.get(type_, 0)
right = right_counter.get(type_, 0)
# print('origin:', origin, ' found:', found, ' right:', right)
recall, precision, f1 = self.compute(origin, found, right)
class_info[type_] = {"acc": round(precision, 4), 'recall': round(recall, 4), 'f1': round(f1, 4)}
origin = len(self.origins)
found = len(self.founds)
right = len(self.rights)
recall, precision, f1 = self.compute(origin, found, right)
return {'acc': precision, 'recall': recall, 'f1': f1}, class_info
def update(self, label_paths, pred_paths):
'''
labels_paths: [[],[],[],....]
pred_paths: [[],[],[],.....]
:param label_paths:
:param pred_paths:
:return:
Example:
>>> labels_paths = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> pred_paths = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
'''
for label_path, pre_path in zip(label_paths, pred_paths):
label_entities = get_entities(label_path, self.id2label, self.markup, self.middle_prefix)
pre_entities = get_entities(pre_path, self.id2label, self.markup, self.middle_prefix)
# print('label:', label_path, ',label_entities: ', label_entities)
# print('pred:', pre_path, ',pre_entities: ', pre_entities)
self.origins.extend(label_entities)
self.founds.extend(pre_entities)
self.rights.extend([pre_entity for pre_entity in pre_entities if pre_entity in label_entities])
| 4,970 | 37.238462 | 111 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/model_utils.py
|
from pytorch_lightning import LightningModule
from pytorch_lightning.strategies import DeepSpeedStrategy
from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam
from transformers.optimization import AdamW, TYPE_TO_SCHEDULER_FUNCTION
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from transformers.trainer_utils import SchedulerType
from typing import Optional, Union
import warnings
import types
def add_module_args(parent_args):
parser = parent_args.add_argument_group('Basic Module')
parser.add_argument('--learning_rate', default=5e-5, type=float)
parser.add_argument('--min_learning_rate', default=1e-7, type=float)
parser.add_argument('--lr_decay_steps', default=0, type=int)
# lr decay的时候会依赖total_steps,这里设置的是total_steps的比例,比如我只需要前50%步做decay,ratio设置为0.5
parser.add_argument('--lr_decay_ratio', default=1.0, type=float)
parser.add_argument('--warmup_steps', default=0, type=int)
parser.add_argument('--warmup_ratio', default=0.1, type=float)
parser.add_argument('--weight_decay', default=1e-1, type=float)
parser.add_argument('--adam_beta1', default=0.9, type=float)
parser.add_argument('--adam_beta2', default=0.999, type=float)
parser.add_argument('--adam_epsilon', default=1e-8, type=float)
parser.add_argument('--model_path', default=None, type=str)
parser.add_argument('--scheduler_type', default='polynomial', type=str)
return parent_args
def add_inverse_square_args(parent_args):
parser = parent_args.add_argument_group('Basic Module')
parser.add_argument('--warmup_min_lr', default=1e-9, type=float)
parser.add_argument('--warmup_max_lr', default=1e-4, type=float)
return parent_args
def get_default_update_params(pl_model: LightningModule):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight', 'layer_norm.', 'layernorm.']
optimizer_grouped_params = [
{'params': [p for n, p in pl_model.named_parameters() if not any(
nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': pl_model.hparams.weight_decay},
{'params': [p for n, p in pl_model.named_parameters() if any(
nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': 0.0}
]
return optimizer_grouped_params
def configure_optimizers(pl_model: LightningModule, model_params=None):
'''
Args:
pl_model: lightning module
model_params: 需要优化的模型参数
'''
# get params that optimizer need
if model_params is None:
optimizer_grouped_params = get_default_update_params(pl_model)
else:
optimizer_grouped_params = model_params
# Configure optimizer.
if isinstance(pl_model.trainer.strategy, DeepSpeedStrategy):
if 'offload_optimizer' in pl_model.trainer.strategy.config['zero_optimization']:
optimizer = DeepSpeedCPUAdam(
optimizer_grouped_params, adamw_mode=True,
lr=pl_model.hparams.learning_rate,
betas=(pl_model.hparams.adam_beta1, pl_model.hparams.adam_beta2), eps=pl_model.hparams.adam_epsilon)
else:
optimizer = FusedAdam(
optimizer_grouped_params, adam_w_mode=True,
lr=pl_model.hparams.learning_rate,
betas=(pl_model.hparams.adam_beta1, pl_model.hparams.adam_beta2), eps=pl_model.hparams.adam_epsilon)
# elif isinstance(pl_model.trainer.strategy, ColossalAIStrategy):
# from colossalai.nn.optimizer import HybridAdam
# optimizer = HybridAdam(
# optimizer_grouped_params,
# lr=pl_model.hparams.learning_rate,
# betas=(pl_model.hparams.adam_beta1, pl_model.hparams.adam_beta2),
# eps=pl_model.hparams.adam_epsilon)
else:
optimizer = AdamW(optimizer_grouped_params, lr=pl_model.hparams.learning_rate,
betas=(pl_model.hparams.adam_beta1, pl_model.hparams.adam_beta2),
eps=pl_model.hparams.adam_epsilon)
# Configure learning rate scheduler.
total_steps = pl_model.hparams.lr_decay_ratio * \
pl_model.total_steps if pl_model.hparams.lr_decay_steps == 0 else pl_model.hparams.lr_decay_steps
warmup_steps = pl_model.hparams.warmup_ratio * \
pl_model.total_steps if pl_model.hparams.warmup_steps == 0 else pl_model.hparams.warmup_steps
if pl_model.hparams.scheduler_type == "inverse_sqrt":
scheduler = inverse_square_root_schedule(optimizer=optimizer,
num_warmup_steps=warmup_steps, lr_min=pl_model.hparams.warmup_min_lr, lr_max=pl_model.hparams.warmup_max_lr)
else:
scheduler = get_scheduler(name=pl_model.hparams.scheduler_type, optimizer=optimizer,
num_warmup_steps=warmup_steps, num_training_steps=total_steps,
lr_end=pl_model.hparams.min_learning_rate)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return [optimizer], [scheduler]
def inverse_square_root_schedule(
optimizer: Optimizer,
num_warmup_steps: int = 4000,
lr_min=1e-9,
lr_max=1e-4,
power=0.5,
last_epoch: int = -1):
lr_init = optimizer.defaults["lr"]
if (lr_min > lr_max):
raise ValueError(f"lr_min ({lr_min}) must be be smaller than lr_max ({lr_max})")
lr_step = (lr_max - lr_init) / num_warmup_steps
decay_factor = lr_max * num_warmup_steps**power
def lr_lambda(current_step: int):
# 自定义函数
if current_step < num_warmup_steps:
return lr_step * current_step
return decay_factor * current_step ** (-power)
return Direct_LR(optimizer, lr_lambda, last_epoch, True)
class Direct_LR(_LRScheduler):
"""
Modified from LambdaLR
"""
def __init__(self, optimizer, lr_lambda, last_epoch=-1, warmup_steps=4000, verbose=False):
self.optimizer = optimizer
self.warmup_steps = warmup_steps
if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):
self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
else:
if len(lr_lambda) != len(optimizer.param_groups):
raise ValueError("Expected {} lr_lambdas, but got {}".format(
len(optimizer.param_groups), len(lr_lambda)))
self.lr_lambdas = list(lr_lambda)
super(Direct_LR, self).__init__(optimizer, last_epoch, verbose)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
The learning rate lambda functions will only be saved if they are callable objects
and not if they are functions or lambdas.
When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.
"""
state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')}
state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas)
for idx, fn in enumerate(self.lr_lambdas):
if not isinstance(fn, types.FunctionType):
state_dict['lr_lambdas'][idx] = fn.__dict__.copy()
return state_dict
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
lr_lambdas = state_dict.pop('lr_lambdas')
self.__dict__.update(state_dict)
# Restore state_dict keys in order to prevent side effects
# https://github.com/pytorch/pytorch/issues/32756
state_dict['lr_lambdas'] = lr_lambdas
for idx, fn in enumerate(lr_lambdas):
if fn is not None:
self.lr_lambdas[idx].__dict__.update(fn)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.")
if self._step_count < self.warmup_steps:
return [base_lr + lmbda(self.last_epoch)
for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)]
return [lmbda(self.last_epoch) for lmbda in self.lr_lambdas]
def get_total_steps(trainer, hparams):
train_loader = trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if trainer.max_epochs > 0:
if hasattr(hparams, 'use_mpu'):
from fengshen.models.megatron import mpu
world_size = mpu.get_data_parallel_world_size() if hparams.use_mpu else trainer.world_size
else:
world_size = trainer.world_size
tb_size = hparams.train_batchsize * max(1, world_size)
ab_size = trainer.accumulate_grad_batches
total_steps = (len(train_loader.dataset) *
trainer.max_epochs // tb_size) // ab_size
else:
total_steps = trainer.max_steps
return total_steps
def get_scheduler(
name: Union[str, SchedulerType],
optimizer: Optimizer,
num_warmup_steps: Optional[int] = None,
num_training_steps: Optional[int] = None,
lr_end: Optional[float] = None
):
"""
Unified API to get any scheduler from its name.
Args:
name (`str` or `SchedulerType`):
The name of the scheduler to use.
optimizer (`torch.optim.Optimizer`):
The optimizer that will be used during training.
num_warmup_steps (`int`, *optional*):
The number of warmup steps to do. This is not required by all schedulers (hence the argument being
optional), the function will raise an error if it's unset and the scheduler type requires it.
num_training_steps (`int``, *optional*):
The number of training steps to do. This is not required by all schedulers (hence the argument being
optional), the function will raise an error if it's unset and the scheduler type requires it.
"""
name = SchedulerType(name)
schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(optimizer)
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.")
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps)
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.")
if name == SchedulerType.POLYNOMIAL:
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps, lr_end=lr_end)
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
| 11,270 | 43.2 | 157 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/transformer_utils.py
| 0 | 0 | 0 |
py
|
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/__init__.py
|
# coding=utf-8
| 15 | 7 | 14 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/bart/modeling_bart.py
|
import warnings
from pytorch_lightning import LightningModule
from fengshen.models import transformer_utils
import torch
import torch.utils.checkpoint
from torch import nn
import torch.nn.functional as F
from dataclasses import dataclass
from typing import Optional, Tuple
from transformers.file_utils import *
from transformers.modeling_outputs import *
from transformers.models.bart import *
from transformers.models.bart.modeling_bart import BartClassificationHead
_CONFIG_FOR_DOC = "BartConfig"
# ------------------------ ZZ: CBart addition ------------------------
def _reorder_buffer(attn_cache, new_order):
for k, input_buffer_k in attn_cache.items():
if input_buffer_k is not None:
attn_cache[k] = input_buffer_k.index_select(0, new_order)
return attn_cache
def _make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
BART_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig
>>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
Mask filling example::
>>> from transformers import BartTokenizer, BartForConditionalGeneration
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')
>>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
"""
@dataclass
class CBartLMOutput(ModelOutput):
"""
Base class for CBart specific language models outputs.
Args:
....
"""
loss: Optional[torch.FloatTensor] = None
encoder_loss: Optional[torch.FloatTensor] = None
decoder_loss: Optional[torch.FloatTensor] = None
encoder_logits: torch.FloatTensor = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
class BartForTextInfill(BartPretrainedModel):
"""
this class is designed for text infilling.
During training, the encoder is used to predict replace, insert,
and the decoder is used to generate original input.
Compared with BartForConditionalGeneration class,
we add a module over the encoder and add a new loss for the encoder.
"""
base_model_prefix = "model"
authorized_missing_keys = [r"final_logits_bias",
r"encoder\.version", r"decoder\.version"]
def __init__(self, config: BartConfig):
super().__init__(config)
base_model = BartModel(config)
self.model = base_model
self.register_buffer("final_logits_bias", torch.zeros(
(1, self.model.shared.num_embeddings)))
# print( config.encoder_loss_type, config.num_labels)
# add a new attribute into BartConfig class (revise BartConfig)
self.encoder_loss_type = config.encoder_loss_type
self.num_labels = config.num_labels
if self.encoder_loss_type == 0: # 0 is classification loss, 1 is regression loss
# add a classification module for the encoder
self.classification_head = BartClassificationHead(
config.d_model, config.d_model, config.num_labels, config.classif_dropout,
)
else:
# add a regression module for the encoder
self.classification_head = BartClassificationHead(
config.d_model, config.d_model, 1, config.classif_dropout,
)
self.model._init_weights(self.classification_head.dense)
self.model._init_weights(self.classification_head.out_proj)
self.loss_weight = config.loss_weight
self.register_buffer("label_weights", torch.zeros((self.num_labels)))
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
old_num_tokens = self.model.shared.num_embeddings
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self.model.shared = new_embeddings
self._resize_final_logits_bias(new_num_tokens, old_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int, old_num_tokens: int) -> None:
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens),
device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(BART_GENERATION_EXAMPLE)
def forward(
self,
input_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
past_key_values=None,
encoder_labels=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=True,
**unused,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring).
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens
with labels in ``[0, ..., config.vocab_size]``.
Returns:
Conditional generation example::
# Mask filling only works for bart-large
from transformers import BartTokenizer, BartForConditionalGeneration
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
TXT = "My friends are <mask> but they eat too many carbs."
model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')
input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']
logits = model(input_ids).logits
masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
probs = logits[0, masked_index].softmax(dim=0)
values, predictions = probs.topk(5)
tokenizer.decode(predictions).split()
# ['good', 'great', 'all', 'really', 'very']
"""
if "lm_labels" in unused:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = unused.pop("lm_labels")
if "decoder_cached_states" in unused:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `decoder_past_key_values` instead.",
FutureWarning,
)
decoder_past_key_values = unused.pop("decoder_cached_states")
return_dict = return_dict if return_dict is not None else False
if labels is not None:
use_cache = False
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# logits and loss for the encoder
# last hidden state
encoder_last_hidden_state = outputs['encoder_last_hidden_state']
# eos_mask = input_ids.eq(self.config.eos_token_id)
# if len(torch.unique(eos_mask.sum(1))) > 1:
# raise ValueError("All examples must have the same number of <eos> tokens.")
# sentence_representation = x[eos_mask, :].view(x.size(0), -1, x.size(-1))[:, -1, :]
encoder_logits = self.classification_head(encoder_last_hidden_state)
encoder_loss = None
if encoder_labels is not None:
# classification loss
if self.encoder_loss_type == 0:
# ZZ: seems like MSE loss does not support weighting, so only CEL has weighting applied for now
loss_fct = nn.CrossEntropyLoss(weight=self.label_weights)
encoder_loss = loss_fct(
encoder_logits.view(-1, self.config.num_labels), encoder_labels.view(-1))
# regression loss
else:
encoder_logits = encoder_logits.view(
encoder_logits.size(0), -1)
encoder_logits = torch.sigmoid(
encoder_logits) * self.num_labels - 0.5
loss_fct = nn.MSELoss(reduction='none')
_loss = loss_fct(encoder_logits, encoder_labels)
encoder_loss = torch.mean(_loss[encoder_labels >= 0])
# encoder_loss =_loss[encoder_labels>=0]
# logits and loss for the decoder
lm_logits = F.linear(
outputs[0], self.model.shared.weight, bias=self.final_logits_bias)
masked_lm_loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
# TODO(SS): do we need to ignore pad tokens in labels?
masked_lm_loss = loss_fct(
lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
loss = None
if masked_lm_loss is not None and encoder_loss is not None:
loss = encoder_loss * self.loss_weight + masked_lm_loss
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return CBartLMOutput(
loss=loss,
encoder_loss=encoder_loss,
decoder_loss=masked_lm_loss,
encoder_logits=encoder_logits,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(self, decoder_input_ids, past, attention_mask, use_cache, **kwargs):
assert past is not None, "past has to be defined for encoder_outputs"
encoder_outputs, past_key_values = past
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
# change this to avoid caching (presumably for debugging)
"use_cache": use_cache,
}
def adjust_logits_during_generation(self, logits, cur_len, max_length):
if cur_len == 1:
self._force_token_ids_generation(logits, self.config.bos_token_id)
if cur_len == max_length - 1 and self.config.eos_token_id is not None:
self._force_token_ids_generation(logits, self.config.eos_token_id)
return logits
def _force_token_ids_generation(self, scores, token_ids) -> None:
"""force one of token_ids to be generated by setting prob of all other tokens to 0"""
if isinstance(token_ids, int):
token_ids = [token_ids]
all_but_token_ids_mask = torch.tensor(
[x for x in range(self.config.vocab_size) if x not in token_ids],
dtype=torch.long,
device=next(self.parameters()).device,
)
assert len(
scores.shape) == 2, "scores should be of rank 2 with shape: [batch_size, vocab_size]"
scores[:, all_but_token_ids_mask] = -float("inf")
@staticmethod
def _reorder_cache(past, beam_idx):
((enc_out, enc_mask), past_key_values) = past
reordered_past = []
for layer_past in past_key_values:
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
layer_past_new = {
attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
}
reordered_past.append(layer_past_new)
new_enc_out = enc_out if enc_out is None else enc_out.index_select(
0, beam_idx)
new_enc_mask = enc_mask if enc_mask is None else enc_mask.index_select(
0, beam_idx)
past = ((new_enc_out, new_enc_mask), reordered_past)
return past
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return _make_linear_from_emb(self.model.shared) # make it on the fly
def get_encoder_logits(self, input_ids, attention_mask=None):
# print(input_ids, attention_mask)
# encoder_outputs = self.model.get_encoder_outputs(
# self,
# input_ids,
# attention_mask=attention_mask,
# output_attentions=None,
# output_hidden_states=None,
# return_dict=None,
# )
encoder_outputs = self.model.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict=True
)
# logits and loss for the encoder
# last hidden state
encoder_last_hidden_state = encoder_outputs['last_hidden_state']
encoder_logits = self.classification_head(encoder_last_hidden_state)
# classification
if self.encoder_loss_type == 0:
# probs = torch.softmax(encoder_logits,dim=-1)
pass
# regression
else:
encoder_logits = encoder_logits.view(encoder_logits.size(0), -1)
encoder_logits = torch.sigmoid(
encoder_logits) * self.num_labels - 0.5
return encoder_outputs, encoder_logits
class CBartLightning(LightningModule):
@staticmethod
def add_module_specific_args(parent_args):
parser = parent_args.add_argument_group("CBart specific parameters")
parser.add_argument('--num_labels', type=int, default=3)
parser.add_argument('--encoder_loss_type', type=int, default=0)
parser.add_argument('--loss_weight', type=float, default=1.0)
parser.add_argument('--label_weights', type=float, nargs='+', default=[1.0, 1.0, 1.0])
parser.add_argument('--masked_lm', type=float, default=0)
return parent_args
def __init__(
self,
args,
**kwargs,
):
super().__init__()
self.save_hyperparameters(args)
self.model = BartForTextInfill.from_pretrained(args.model_path, num_labels=self.hparams.num_labels,
encoder_loss_type=self.hparams.encoder_loss_type,
loss_weight=self.hparams.loss_weight,)
self.model.label_weights = torch.tensor(
self.hparams.label_weights, dtype=torch.half)
def forward(self, **inputs):
return self.model(**inputs)
def training_step(self, batch, batch_idx):
outputs = self(**batch)
return outputs
def validation_step(self, batch, batch_idx, dataloader_idx=0):
outputs = self(**batch)
val_loss = outputs["loss"]
return {"loss": val_loss}
def setup(self, stage=None) -> None:
if stage != "fit":
return
# Get dataloader by calling it - train_dataloader() is called after setup() by default
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
tb_size = self.hparams.train_batchsize * max(1, self.trainer.gpus)
ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs)
self.total_steps = (len(train_loader.dataset) // tb_size) // ab_size
def configure_optimizers(self):
transformer_utils.configure_optimizers(self)
| 17,777 | 40.929245 | 149 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/DAVAE/run_latent_generation.py
|
import re
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
import numpy as np
import json
import jsonlines
from tqdm import tqdm, trange
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def filter_noise(text):
space_pattern = '([\u4e00-\u9fa5|0-9|,|。|?|!|@|¥|……|——|《|》|“|”|、|;|:|‘|’|(|)|「|」|【|】|·|~|-|+])\s+([\u4e00-\u9fa5|0-9|,|。|?|!|@|¥|……|——|《|》|“|”|、|;|:|‘|’|(|)|「|」|【|】|·|~|-|+])'
text = re.sub(space_pattern, r'\1\2', text)
text = re.sub(space_pattern, r'\1\2', text)
patterns = ['引用日期.*$', '参考资料.*$', '\[.*\]', '【.*】', '原文地址:', '原文转载:', '本文转自:', '本文摘要:', '<unk>']
for pattern in patterns:
text = re.sub(pattern, "", text)
return text.strip()
def get_raw_data(raw_data):
train_data = {}
with open(raw_data, 'r', encoding='utf8') as fh:
for line in fh:
line = json.loads(line)
for key in line.keys():
if key not in train_data.keys():
train_data[key] = [line[key]]
else:
train_data[key].append(line[key])
return train_data
def save_output(input_text, output, output_file):
with jsonlines.open(output_file, mode='a') as writer:
for text_in,text_out in zip(input_text, output):
otc = {}
otc['text_a'] = str(text_in)
otc['text_b'] = str(text_out)
writer.write(otc)
def enforce_repetition_penalty(lprobs, prev_output_tokens, repetition_penalty = 1.5):
"""repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858). """
for i in range(len(prev_output_tokens)):
for previous_token in set(prev_output_tokens[i]):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if lprobs[i, previous_token] < 0:
lprobs[i, previous_token] *= repetition_penalty
else:
lprobs[i, previous_token] /= repetition_penalty
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
# assert logits.dim() == 1# batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, dim=-1, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for i in range(sorted_indices.size()[0]):
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
logits[i][indices_to_remove] = filter_value
# indices_to_remove = sorted_indices[sorted_indices_to_remove]
# logits[indices_to_remove] = filter_value
return logits
def sample_sequence_conditional(model, length, context, latent_z=None, temperature=1, top_k=0, top_p=0.0, repetition_penalty=1.0, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0)
generated = context
with torch.no_grad():
for i in trange(length):
if i == 2:
generated[generated[:, 1] == 127, 1] = 0
attention_mask = model.get_attn_mask(generated.shape[1]).to(device)
inputs = {'input_ids': generated, 'latent_state': latent_z, 'attention_mask':attention_mask, 'mems':None}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][:, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
log_probs = F.softmax(filtered_logits, dim=-1)
if repetition_penalty != 1.0:
enforce_repetition_penalty(log_probs, generated, repetition_penalty)
next_token = torch.multinomial(log_probs, num_samples=1)
generated = torch.cat((generated, next_token), dim=1)
# pdb.set_trace()
# if next_token[0,0].item() == decoder_tokenizer.encode('<EOS>')[0]:
if next_token[0, 0] == 50000: # end of token 50000
break
return generated
def latent_code_from_text(text, tokenizer_encoder, model_vae, args, scale=1.0):
tokenized1 = tokenizer_encoder.encode(text)
coded = torch.Tensor([tokenized1]).long()
with torch.no_grad():
coded = coded.to(device)
outputs = model_vae.encoder(coded, attention_mask=(coded > 0).float())
pooled_hidden_fea = outputs[1]
mean, logvar = model_vae.encoder.linear(pooled_hidden_fea).chunk(2, -1)
std = logvar.mul(0.5).exp()
eps = torch.zeros_like(std).normal_()
return mean + torch.mul(eps, std)*scale
def text_from_latent_code(latent_z, model_vae, args, tokenizer_decoder, prompt=None):
bos_token = tokenizer_decoder.convert_tokens_to_ids(tokenizer_decoder.bos_token)
context_tokens = [bos_token]
if prompt is not None:
context_tokens.append(tokenizer_decoder.encode(prompt)[:-1]) # remove eos token
out = sample_sequence_conditional(
model=model_vae.decoder,
context=context_tokens,
latent_z=latent_z,
length= args.max_out_length, # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
repetition_penalty=args.repetition_penalty,
device=device
)
out_tokens = out[0, :].tolist()
out_tokens = out_tokens[1:out_tokens.index(50000)] if 50000 in out_tokens else out_tokens # remove bos and eos
text_x1 = tokenizer_decoder.decode(out_tokens, clean_up_tokenization_spaces=True)
return text_x1
def simulate(model_vae, tokenizer_encoder, tokenizer_decoder, args, sent_input, prompt=None):
latent_z, _ = latent_code_from_text(sent_input, tokenizer_encoder, model_vae, args)
text_analogy = text_from_latent_code(latent_z, model_vae, args, tokenizer_decoder, prompt=prompt)
return text_analogy
def switch(next_value, init, is_update):
is_update = is_update.type_as(next_value)
return (1-is_update)*init + is_update*next_value
def sample_sequence_conditional_batch(model, max_out_length, context_tokens_tensor, context_length_tensor, latent_z=None, temperature=1, top_k=0, top_p=0.0, repetition_penalty=1.0, device='cpu', end_token=50000):
org_context_length = torch.min(context_length_tensor).item()
batch_size = context_tokens_tensor.shape[0]
generated = context_tokens_tensor[:,:org_context_length]
counter = org_context_length
output_tokens_lists = []
output_order = []
orig_order = torch.LongTensor(list(range(batch_size)))
with torch.no_grad():
while counter < max_out_length:
if counter == org_context_length+2:
generated[generated[:,org_context_length] == 127, org_context_length] = 0
attention_mask = model.get_attn_mask(generated.shape[1]).to(device)
inputs = {'input_ids': generated, 'latent_state': latent_z, 'attention_mask': attention_mask}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][:, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
# if counter == org_context_length:
# filtered_logits[:, 43488] = -float('Inf') # forbid starting with '《'
log_probs = F.softmax(filtered_logits, dim=-1)
if repetition_penalty != 1.0:
enforce_repetition_penalty(log_probs, generated, repetition_penalty)
if any(log_probs.sum(dim=-1) <= 0.0) :
break
next_token = torch.multinomial(log_probs, num_samples=1).view(-1)
next_token = switch(next_token, context_tokens_tensor[:, counter], context_length_tensor<=counter)
if torch.all(next_token == end_token).item():
break
stop_idx = next_token == end_token
output_order.extend(orig_order[stop_idx].tolist())
finished = generated[stop_idx]
output_tokens_lists.extend(finished.detach().cpu().tolist())
# continue with non-ending tokens
conti_idx = next_token != end_token
orig_order = orig_order[conti_idx]
generated = generated[conti_idx]
latent_z = latent_z[conti_idx]
next_token = next_token[conti_idx]
context_tokens_tensor = context_tokens_tensor[conti_idx]
context_length_tensor = context_length_tensor[conti_idx]
batch_size = generated.shape[0]
generated = torch.cat((generated, next_token.view(batch_size, 1)), dim=-1)
counter += 1
output_order.extend(orig_order.tolist())
generated = generated.detach().cpu().tolist()
output_tokens_lists.extend(generated)
output_tokens_lists = [tokens[:tokens.index(end_token)] if end_token in tokens else tokens for tokens in output_tokens_lists]
output_tokens_lists = [tokens for _,tokens in sorted(zip(output_order, output_tokens_lists))]
return output_tokens_lists
def latent_code_from_text_batch(texts, tokenizer_encoder, model_vae, args):
tokens_tensor_list = []
for text in texts:
tokens = tokenizer_encoder.encode(text)[:510]
tokens_tensor_list.append(torch.tensor([101]+tokens+[102]))
coded = pad_sequence(tokens_tensor_list, batch_first=True, padding_value=0).long()
with torch.no_grad():
coded = coded.to(device)
pooled_hidden_fea = model_vae.encoder(coded, attention_mask=(coded > 0).float())[1]
mean, logvar = model_vae.encoder.linear(pooled_hidden_fea).chunk(2, -1)
std = logvar.mul(0.5).exp()
eps = torch.zeros_like(std).normal_()
latent_z = mean + torch.mul(eps, std)*args.std_scale
return latent_z
def text_from_latent_code_batch(latent_z, model_vae, args, tokenizer_decoder, prompt=None):
past = latent_z
batch_size = latent_z.shape[0]
bos_token = tokenizer_decoder.convert_tokens_to_ids(tokenizer_decoder.bos_token)
end_token = tokenizer_decoder.convert_tokens_to_ids(tokenizer_decoder.eos_token)
if prompt is not None:
prompt = [[bos_token] + tokenizer_decoder.encode(text)[:-1] for text in prompt]
else:
prompt = [[bos_token]]*batch_size
context_tokens_tensor = torch.tensor([[end_token]*args.max_out_length]*batch_size).to(device) # 2-d tensor
context_length_tensor = torch.tensor([1]*batch_size).to(device)
for i in range(batch_size):
context_tokens_tensor[i,:len(prompt[i])] = torch.tensor(prompt[i]).long().to(device)
context_length_tensor[i] = len(prompt[i])
# length = 128 # maximum length, but not used
out = sample_sequence_conditional_batch(
model=model_vae.decoder,
max_out_length= args.max_out_length, # Chunyuan: Fix length; or use <EOS> to complete a sentence
context_tokens_tensor=context_tokens_tensor,
context_length_tensor=context_length_tensor,
latent_z=latent_z,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
repetition_penalty=args.repetition_penalty,
device=device
)
out_text = []
for i, tokens in enumerate(out):
tokens = tokens[len(prompt[i]):]
tokens = tokens[:tokens.index(end_token)] if end_token in tokens else tokens
text = tokenizer_decoder.decode(tokens, clean_up_tokenization_spaces=True)
out_text.append(filter_noise(text))
return out_text
def simulate_batch(model_vae, tokenizer_encoder, tokenizer_decoder, args, sent_inputs, prompt=None):
latent_z = latent_code_from_text_batch(sent_inputs, tokenizer_encoder, model_vae, args)
text_analogy = text_from_latent_code_batch(latent_z, model_vae, args, tokenizer_decoder, prompt=prompt)
return text_analogy
def simulate_bz(model_vae, tokenizer_encoder, tokenizer_decoder, args, sent_inputs, prompt=None):
latent_z = latent_code_from_text_batch(sent_inputs, tokenizer_encoder, model_vae, args)
return latent_z
def my_shuffle(x, index):
result = []
for field in index:
result.append(x[field])
return result
| 13,632 | 43.993399 | 212 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/DAVAE/BertForLatentConnector.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import pdb
import torch
from torch import nn
from transformers import BertConfig,BertPreTrainedModel
from transformers.models.bert.modeling_bert import BertEmbeddings,BertEncoder,BertPooler
class BertForLatentConnector(BertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config, latent_size):
super(BertForLatentConnector, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.linear = nn.Linear(config.hidden_size, 2 * latent_size, bias=False)
self.init_weights()
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, emb_noise=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
if emb_noise is not None:
embedding_output = embedding_output + emb_noise(embedding_output).to(embedding_output.dtype)
encoder_outputs = self.encoder(embedding_output,
extended_attention_mask,
head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
| 7,251 | 51.550725 | 134 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/DAVAE/DAVAEModel.py
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertConfig,TransfoXLConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import cached_path,hf_bucket_url
from fengshen.models.DAVAE.GPT2ModelForLatent import GPT2ModelForLatent
from fengshen.models.DAVAE.BertForLatentConnector import BertForLatentConnector
from fengshen.models.DAVAE.run_latent_generation import *
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def log_sum_exp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0), dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
return m + torch.log(sum_exp)
class VAEPretrainedModel(PreTrainedModel):
def _init_weights(self, module):
""" Initialize the weights """
pass # to bypass the not implement error
class DAVAEModel(VAEPretrainedModel):
config_class = PretrainedConfig
def __init__(self, config:PretrainedConfig,*model_args, **model_kwargs):
super().__init__(config=config)
self.config = config
self.vae_model = DAVAEModel.load_model(self.config)
@classmethod
def load_model(cls, config):
encoder_config = BertConfig.from_dict(config.encoder)
encoder_model = BertForLatentConnector(config=encoder_config, latent_size=config.latent_size)
dec_config = TransfoXLConfig.from_dict(config.decoder)
dec_config.latent_size = config.latent_size
decoder_model = GPT2ModelForLatent(config=dec_config)
vae_model = EncDecAAE(config,encoder_model, decoder_model, dec_config.latent_size, pad_token_id=50000)
return vae_model
def set_tokenizers(self,encoder_tokenizer,decoder_tokenizer):
if not hasattr(self, 'encoder_tokenizer'):
self.encoder_tokenizer = encoder_tokenizer
if not hasattr(self, 'decoder_tokenizer'):
self.decoder_tokenizer = decoder_tokenizer
def simulate_batch(self,encoder_tokenizer,decoder_tokenizer, sent_inputs, prompt=None):
self.set_tokenizers(encoder_tokenizer,decoder_tokenizer)
# 生成相似句
latent_z = self.latent_code_from_text_batch(sent_inputs)
text_analogy = self.text_from_latent_code_batch(latent_z,prompt=prompt)
return text_analogy
def latent_code_from_text_batch(self,texts):
# texts->latents
tokens_tensor_list = []
for text in texts:
tokens = self.encoder_tokenizer.encode(text)[:510]
tokens_tensor_list.append(torch.tensor([101]+tokens+[102]))
coded = pad_sequence(tokens_tensor_list, batch_first=True, padding_value=0).long()
device = next(self.vae_model.decoder.parameters()).device
with torch.no_grad():
coded = coded.to(device)
pooled_hidden_fea = self.vae_model.encoder(coded, attention_mask=(coded > 0).float())[1]
mean, logvar = self.vae_model.encoder.linear(pooled_hidden_fea).chunk(2, -1)
std = logvar.mul(0.5).exp()
eps = torch.zeros_like(std).normal_()
latent_z = mean + torch.mul(eps, std)*self.config.std_scale
return latent_z
def text_from_latent_code_batch(self,latent_z, prompt=None):
# latents->texts
device = next(self.vae_model.decoder.parameters()).device
past = latent_z
batch_size = latent_z.shape[0]
bos_token = self.decoder_tokenizer.convert_tokens_to_ids(self.decoder_tokenizer.bos_token)
end_token = self.decoder_tokenizer.convert_tokens_to_ids(self.decoder_tokenizer.eos_token)
if prompt is not None:
prompt = [[bos_token] + self.decoder_tokenizer.encode(text)[:-1] for text in prompt]
else:
prompt = [[bos_token]]*batch_size
context_tokens_tensor = torch.tensor([[end_token]*self.config.max_out_length]*batch_size).to(device) # 2-d tensor
context_length_tensor = torch.tensor([1]*batch_size).to(device)
for i in range(batch_size):
context_tokens_tensor[i,:len(prompt[i])] = torch.tensor(prompt[i]).long().to(device)
context_length_tensor[i] = len(prompt[i])
out = sample_sequence_conditional_batch(
model=self.vae_model.decoder,
max_out_length= self.config.max_out_length,
context_tokens_tensor=context_tokens_tensor,
context_length_tensor=context_length_tensor,
latent_z=latent_z,
temperature=self.config.temperature,
top_k=self.config.top_k,
top_p=self.config.top_p,
repetition_penalty=self.config.repetition_penalty,
device=device
)
out_text = []
for i, tokens in enumerate(out):
tokens = tokens[len(prompt[i]):]
tokens = tokens[:tokens.index(end_token)] if end_token in tokens else tokens
text = self.decoder_tokenizer.decode(tokens, clean_up_tokenization_spaces=True)
out_text.append(filter_noise(text))
return out_text
class EncDecAAE(nn.Module):
"""Adversarial Auto-Encoder"""
def __init__(self,config, encoder, decoder, latent_size, pad_token_id):
super(EncDecAAE, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.config = config
self.pad_token_id = pad_token_id
self.Disc = nn.Sequential(nn.Linear(latent_size, 4*latent_size), nn.ReLU(),
nn.Linear(4*latent_size, 1))
# Standard Normal prior
loc = torch.zeros(latent_size)
scale = torch.ones(latent_size)
self.prior = torch.distributions.normal.Normal(loc, scale)
def connect(self, bert_fea, nsamples=1, fb_mode=0):
"""
Returns: Tensor1, Tensor2
Tensor1: the tensor latent z with shape [batch, nsamples, nz]
Tensor2: the tenor of KL for each x with shape [batch]
"""
# (batch_size, nz)
mean, logvar = self.encoder.linear(bert_fea).chunk(2, -1)
z = self.reparameterize(mean, logvar, nsamples)
if fb_mode == 0:
KL = 0.5 * (mean.pow(2) + logvar.exp() - logvar - 1).sum(dim=1)
elif fb_mode == 1:
kl_loss = 0.5 * (mean.pow(2) + logvar.exp() - logvar - 1)
kl_mask = (kl_loss > self.config.dim_target_kl).float()
KL = (kl_mask * kl_loss).sum(dim=1)
return z, KL
def connect_deterministic(self, bert_fea, nsamples=1):
"""
Returns: Tensor1, Tensor2
Tensor1: the tensor latent z with shape [batch, nsamples, nz]
Tensor2: the tenor of KL for each x with shape [batch]
"""
# (batch_size, nz)
mean, logvar = self.encoder.linear(bert_fea).chunk(2, -1)
logvar = torch.zeros_like(logvar)
z = self.reparameterize(mean, logvar, nsamples)
KL = 0.5 * (mean.pow(2) + logvar.exp() - logvar - 1).sum(dim=1)
return z, KL
def reparameterize(self, mu, logvar, nsamples=1):
"""sample from posterior Gaussian family
Args:
mu: Tensor
Mean of gaussian distribution with shape (batch, nz)
logvar: Tensor
logvar of gaussian distibution with shape (batch, nz)
Returns: Tensor
Sampled z with shape (batch, nsamples, nz)
"""
batch_size, nz = mu.size()
std = logvar.mul(0.5).exp()
mu_expd = mu.unsqueeze(1).expand(batch_size, nsamples, nz)
std_expd = std.unsqueeze(1).expand(batch_size, nsamples, nz)
eps = torch.zeros_like(std_expd).normal_()
return mu_expd + torch.mul(eps, std_expd)
def loss_adv(self, z):
zn = torch.randn_like(z)
zeros = torch.zeros(len(z), 1, device=z.device).half()
ones = torch.ones(len(z), 1, device=z.device).half()
loss_d = F.binary_cross_entropy_with_logits(self.Disc(z.detach().half()), zeros) + \
F.binary_cross_entropy_with_logits(self.Disc(zn.half()), ones)
loss_g = F.binary_cross_entropy_with_logits(self.Disc(z.half()), ones)
return loss_d, loss_g
def forward(self, inputs, labels, beta=0.0, iw=None, fb_mode=0, emb_noise=None):
attention_mask = (inputs > 0).float()
reconstrution_mask = (labels != self.pad_token_id).float() # the padding token for GPT2
sent_length = torch.sum(reconstrution_mask, dim=1)
outputs = self.encoder(inputs, attention_mask, emb_noise=emb_noise)
pooled_hidden_fea = outputs[1]
seq_length = labels.size(1)
dec_attn_mask = self.decoder.get_attn_mask(seq_length).to(labels.device)
if fb_mode in [0,1]:
latent_z, loss_kl = self.connect(pooled_hidden_fea, fb_mode=fb_mode)
latent_z = latent_z.squeeze(1)
outputs = self.decoder(input_ids=labels, attention_mask=dec_attn_mask, latent_state=latent_z, labels=labels, label_ignore=self.pad_token_id) # ignore loss over padding tokens
loss_rec = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
elif fb_mode==2:
latent_z, loss_kl = self.connect_deterministic(pooled_hidden_fea)
latent_z = latent_z.squeeze(1)
outputs = self.decoder(input_ids=labels, attention_mask=dec_attn_mask, latent_state=latent_z, labels=labels, label_ignore=self.pad_token_id)
loss_rec = outputs[0] # model outputs are always tuple
if self.config.length_weighted_loss:
loss = loss_rec / sent_length + beta * loss_kl
else:
loss = loss_rec + beta * loss_kl
if iw!=None:
total_loss = torch.sum(loss*iw)/torch.sum(iw)
else:
total_loss = torch.sum(loss)
return (loss_rec/sent_length).mean(), loss_kl.mean(), total_loss
| 10,280 | 42.563559 | 186 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/DAVAE/GPT2ModelForLatent.py
|
# coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT-2 model."""
import torch
import torch.nn.functional as F
import math
import torch.nn as nn
from torch.nn import CrossEntropyLoss
# from ......configuration_transfo_xl import TransfoXLConfig
from transformers import TransfoXLConfig
from transformers.modeling_utils import (
PreTrainedModel
)
class PositionalEmbedding(torch.nn.Module):
def __init__(self, hidden_size):
super(PositionalEmbedding, self).__init__()
self.hidden_size = hidden_size
inv_freq = 1 / (10000 ** (torch.arange(0.0, hidden_size, 2.0) / hidden_size))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[None, :, :].expand(bsz, -1, -1)
else:
return pos_emb[None, :, :]
def ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, '{} is not divisible by {}'.format(
numerator, denominator)
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
def scaled_init_method(sigma, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
def unscaled_init_method(sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_
@torch.jit.script
def gelu_impl(x):
"""OpenAI's gelu implementation."""
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x *
(1.0 + 0.044715 * x * x)))
def gelu(x):
return gelu_impl(x)
class GPT2SelfAttention(torch.nn.Module):
"""Parallel self-attention layer for GPT2.
Self-attention layer takes input with size [b, s, h] where b is
the batch size, s is the sequence lenght, and h is the hidden size
and creates output of the same size.
Arguments:
hidden_size: total hidden size of the layer (h).
num_attention_heads: number of attention heads (n). Note that we
require n to be divisible by number of GPUs
used to parallelize the model. Also, we
require hidden size to be divisible by n.
dropout_prob: dropout probability for the attention scores.
init_method: weight initialization.
output_layer_init_method: output layer initialization. If None, use
`init_method`.
We use the following notation:
h: hidden_size
n: num_attention_heads
p: number of partitions
np: n/p
hp: h/p
hn: h/n
b: batch size
s: sequence length
"""
def __init__(self, hidden_size, num_attention_heads,
attention_dropout_prob, output_dropout_prob,
init_method, output_layer_init_method=None, relative_encoding=False):
super(GPT2SelfAttention, self).__init__()
# Set output layer initialization if not provided.
if output_layer_init_method is None:
output_layer_init_method = init_method
# Per attention head and per partition values.
self.hidden_size_per_partition = hidden_size
self.hidden_size_per_attention_head = divide(hidden_size,
num_attention_heads)
self.num_attention_heads_per_partition = num_attention_heads
self.relative_encoding = relative_encoding
# Strided linear layer.
self.query_key_value = torch.nn.Linear(hidden_size, 3*hidden_size, bias=True)
if relative_encoding:
self.relative = torch.nn.Linear(hidden_size, hidden_size, bias=True)
# Dropout. Note that for a single iteration, this layer will generate
# different outputs on different number of parallel partitions but
# on average it should not be partition dependent.
self.attention_dropout = torch.nn.Dropout(attention_dropout_prob)
# Output.
self.dense = torch.nn.Linear(hidden_size, hidden_size, bias=True)
self.output_dropout = torch.nn.Dropout(output_dropout_prob)
def _transpose_for_scores(self, tensor):
"""Transpose a 3D tensor [b, s, np*hn] into a 4D tensor with
size [b, np, s, hn].
"""
new_tensor_shape = tensor.size()[:-1] + \
(self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head)
tensor = tensor.view(*new_tensor_shape)
return tensor.permute(0, 2, 1, 3)
@staticmethod
def _rel_shift(x, zero_triu=False):
# ql x kl x bsz x h
# bsz x h x ql x kl
zero_pad = torch.zeros((*x.size()[:-2], x.size(-2), 1),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(*x.size()[:-2], x.size(-1) + 1, x.size(-2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:, :, None, None]
return x
@staticmethod
def _rel_shift_latest(x: torch.Tensor):
ndims = x.dim()
x_shape = x.size()
row_dim = 2
col_dim = row_dim + 1
assert col_dim < ndims
tgt_shape_1, tgt_shape_2 = [], []
for i in range(ndims):
if i == row_dim:
tgt_shape_1.append(x_shape[col_dim])
tgt_shape_2.append(x_shape[row_dim])
elif i == col_dim:
tgt_shape_1.append(x_shape[row_dim])
tgt_shape_2.append(x_shape[col_dim] - 1)
else:
tgt_shape_1.append(x_shape[i])
tgt_shape_2.append(x_shape[i])
x = x.view(*tgt_shape_1)
x = x[:, :, 1:, :]
x = x.view(*tgt_shape_2)
return x
def forward(self, hidden_states, ltor_mask, position_embeddings=None, r_w_bias=None, r_r_bias=None, mem=None):
# hidden_states: [b, s, h]
# ltor_mask: [1, 1, s, s]
# Attention heads. [b, s, hp]
query_length = hidden_states.size(1)
if mem is None:
mixed_x_layer = self.query_key_value(hidden_states)
(mixed_query_layer,
mixed_key_layer,
mixed_value_layer) = torch.chunk(mixed_x_layer, 3, dim=-1)
else:
cat = torch.cat((mem, hidden_states), 1)
mixed_x_layer = self.query_key_value(cat)
(mixed_query_layer,
mixed_key_layer,
mixed_value_layer) = torch.chunk(mixed_x_layer, 3, dim=-1)
mixed_query_layer = mixed_query_layer[:, -query_length:]
# Reshape and transpose [b, np, s, hn]
query_layer = self._transpose_for_scores(mixed_query_layer)
key_layer = self._transpose_for_scores(mixed_key_layer)
value_layer = self._transpose_for_scores(mixed_value_layer)
if self.relative_encoding:
relative_layer = self.relative(position_embeddings)
relative_layer = self._transpose_for_scores(relative_layer) # 1 (bsz) x n_head x klen x d_head
# Raw attention scores. [b, np, qs, ks]
rw_head_q = query_layer + r_w_bias.unsqueeze(1)
ac_score = torch.matmul(rw_head_q, key_layer.transpose(-1, -2))
rr_head_q = query_layer + r_r_bias.unsqueeze(1)
bd_score = torch.matmul(rr_head_q, relative_layer.transpose(-1, -2))
bd_score = self._rel_shift(bd_score) # qlen x klen x bsz x n_head
# bd_score = bd_score.permute(2, 3, 0, 1) # bsz n_head qlen klen
attention_scores = ac_score + bd_score
else:
# Raw attention scores. [b, np, s, s]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(
self.hidden_size_per_attention_head)
# Apply the left to right attention mask.
attention_scores = torch.mul(attention_scores, ltor_mask) - \
10000.0 * (1.0 - ltor_mask)
# Attention probabilities. [b, np, s, s]
attention_probs = torch.nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
# with get_cuda_rng_tracker().fork():
# attention_probs = self.attention_dropout(attention_probs)
# Context layer.
# [b, np, s, hn]
# print(f'attn_probs {attention_probs}, value_layer {value_layer}')
context_layer = torch.matmul(attention_probs, value_layer.float())
# [b, s, np, hn]
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + \
(self.hidden_size_per_partition,)
# [b, s, hp]
context_layer = context_layer.view(*new_context_layer_shape)
# Output. [b, s, h]
output = self.dense(context_layer)
output = self.output_dropout(output)
return output
class GPT2MLP(torch.nn.Module):
"""MLP for GPT2.
MLP will take the input with h hidden state, project it to 4*h
hidden dimension, perform gelu transformation, and project the
state back into h hidden dimension. At the end, dropout is also
applied.
Arguments:
hidden_size: The hidden size of the self attention.
output_dropout_prob: dropout probability for the outputs
after self attention and final output.
init_method: initialization method used for the weights. Note
that all biases are initialized to zero and
layernorm weight are initialized to one.
output_layer_init_method: output layer initialization. If None,
use `init_method`.
"""
def __init__(self, hidden_size, output_dropout_prob, init_method,
output_layer_init_method=None):
super(GPT2MLP, self).__init__()
# Set output layer initialization if not provided.
if output_layer_init_method is None:
output_layer_init_method = init_method
# Project to 4h.
self.dense_h_to_4h = torch.nn.Linear(hidden_size, 4*hidden_size)
# Project back to h.
self.dense_4h_to_h = torch.nn.Linear(4*hidden_size, hidden_size)
self.dropout = torch.nn.Dropout(output_dropout_prob)
def forward(self, hidden_states):
# [b, s, 4hp]
intermediate_parallel = self.dense_h_to_4h(hidden_states)
intermediate_parallel = gelu(intermediate_parallel)
# [b, s, h]
output = self.dense_4h_to_h(intermediate_parallel)
output = self.dropout(output)
return output
class GPT2TransformerLayer(torch.nn.Module):
"""A single layer transformer for GPT2.
We use the following notation:
h: hidden size
n: number of attention heads
b: batch size
s: sequence length
Transformore layer takes input with size [b, s, h] and returns an
output of the same size.
Arguments:
hidden_size: The hidden size of the self attention.
num_attention_heads: number of attention head in the self
attention.
attention_dropout_prob: dropout probability of the attention
score in self attention.
output_dropout_prob: dropout probability for the outputs
after self attention and final output.
layernorm_epsilon: epsilon used in layernorm to avoid
division by zero.
init_method: initialization method used for the weights. Note
that all biases are initialized to zero and
layernorm weight are initialized to one.
output_layer_init_method: output layers (attention output and
mlp output) initialization. If None,
use `init_method`.
"""
def __init__(self,
hidden_size,
num_attention_heads,
attention_dropout_prob,
output_dropout_prob,
layernorm_epsilon,
init_method,
output_layer_init_method=None,
relative_encoding=False):
super(GPT2TransformerLayer, self).__init__()
# Set output layer initialization if not provided.
if output_layer_init_method is None:
output_layer_init_method = init_method
# Layernorm on the input data.
self.input_layernorm = torch.nn.LayerNorm(hidden_size, eps=layernorm_epsilon)
# Self attention.
self.attention = GPT2SelfAttention(
hidden_size,
num_attention_heads,
attention_dropout_prob,
output_dropout_prob,
init_method,
output_layer_init_method=output_layer_init_method,
relative_encoding=relative_encoding)
# Layernorm on the input data.
self.post_attention_layernorm = torch.nn.LayerNorm(hidden_size,
eps=layernorm_epsilon)
# MLP
self.mlp = GPT2MLP(
hidden_size,
output_dropout_prob,
init_method,
output_layer_init_method=output_layer_init_method)
def forward(self, hidden_states, ltor_mask, position_embeddings=None, r_w_bias=None, r_r_bias=None, mem=None):
# hidden_states: [b, s, h]
# ltor_mask: [1, 1, s, s]
# Layer norm at the begining of the transformer layer.
layernorm_output = self.input_layernorm(hidden_states)
mem = self.input_layernorm(mem) if mem is not None else None
# Self attention.
attention_output = self.attention(layernorm_output, ltor_mask, position_embeddings, r_w_bias, r_r_bias, mem)
# Residual connection.
# print(f'hz {hidden_states.shape}, attn {attention_output.shape}')
layernorm_input = hidden_states + attention_output
# Layer norm post the self attention.
layernorm_output = self.post_attention_layernorm(layernorm_input)
# MLP.
mlp_output = self.mlp(layernorm_output)
# Second residual connection.
output = layernorm_input + mlp_output
return output
class GPT2TransformerForLatent(torch.nn.Module):
"""GPT-2 transformer.
This module takes input from embedding layer and it's output can
be used directly by a logit layer. It consists of L (num-layers)
blocks of:
layer norm
self attention
residual connection
layer norm
mlp
residual connection
followed by a final layer norm.
Arguments:
num_layers: Number of transformer layers.
hidden_size: The hidden size of the self attention.
num_attention_heads: number of attention head in the self
attention.
attention_dropout_prob: dropout probability of the attention
score in self attention.
output_dropout_prob: dropout probability for the outputs
after self attention and final output.
checkpoint_activations: if True, checkpoint activations.
checkpoint_num_layers: number of layers to checkpoint. This
is basically the chunk size in checkpoitning.
layernorm_epsilon: epsilon used in layernorm to avoid
division by zero.
init_method_std: standard deviation of the init method which has
the form N(0, std).
use_scaled_init_for_output_weights: If Ture use 1/sqrt(2*num_layers)
scaling for the output weights (
output of self attention and mlp).
"""
def __init__(self,
num_layers,
hidden_size,
num_attention_heads,
max_sequence_length,
max_memory_length,
embedding_dropout_prob,
attention_dropout_prob,
output_dropout_prob,
checkpoint_activations,
latent_size = 64,
checkpoint_num_layers=1,
layernorm_epsilon=1.0e-5,
init_method_std=0.02,
use_scaled_init_for_output_weights=True,
relative_encoding=False):
super(GPT2TransformerForLatent, self).__init__()
# Store activation checkpoiting flag.
self.checkpoint_activations = checkpoint_activations
self.checkpoint_num_layers = checkpoint_num_layers
self.max_memory_length = max_memory_length
self.latent_size = latent_size
# self.linear = nn.Linear(self.latent_size, hidden_size * num_layers, bias=False).float() # different latent vector for each layer
# self.linear_emb = nn.Linear(self.latent_size, hidden_size * num_layers, bias=False).float()
self.linear_emb = nn.Linear(self.latent_size, hidden_size, bias=False).float()
# torch.nn.init.normal_(self.linear.weight, mean=0.0, std=init_method_std)
torch.nn.init.normal_(self.linear_emb.weight, mean=0.0, std=init_method_std)
output_layer_init_method = None
if use_scaled_init_for_output_weights:
output_layer_init_method = scaled_init_method(init_method_std,
num_layers)
# Embeddings dropout
self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)
self.relative_encoding = relative_encoding
if relative_encoding:
# Relative position embedding
self.position_embeddings = PositionalEmbedding(hidden_size)
# Per attention head and per partition values.
self.hidden_size_per_attention_head = divide(hidden_size,
num_attention_heads)
self.num_attention_heads_per_partition = num_attention_heads
self.r_w_bias = torch.nn.Parameter(
torch.Tensor(self.num_attention_heads_per_partition, self.hidden_size_per_attention_head))
self.r_r_bias = torch.nn.Parameter(
torch.Tensor(self.num_attention_heads_per_partition, self.hidden_size_per_attention_head))
# Always initialize bias to zero.
with torch.no_grad():
self.r_w_bias.zero_()
self.r_r_bias.zero_()
else:
# Position embedding (serial).
self.position_embeddings = torch.nn.Embedding(max_sequence_length,
hidden_size)
# Initialize the position embeddings.
torch.nn.init.normal_(self.position_embeddings.weight, mean=0.0, std=init_method_std)
def get_layer():
return GPT2TransformerLayer(
hidden_size,
num_attention_heads,
attention_dropout_prob,
output_dropout_prob,
layernorm_epsilon,
unscaled_init_method(init_method_std),
output_layer_init_method=output_layer_init_method,
relative_encoding=relative_encoding)
# Transformer layers.
self.layers = torch.nn.ModuleList(
[get_layer() for _ in range(num_layers)])
# Final layer norm before output.
self.final_layernorm = torch.nn.LayerNorm(hidden_size, eps=layernorm_epsilon)
def forward(self, hidden_states, attention_mask, latent_state, mems):
batch_size, query_length, hidden_size = hidden_states.size()
# memory_length = self.latent_size
memory_length = mems[0].size(1) if mems else 0
# key_length = query_length + memory_length+1
# attention_mask = attention_mask[:, :, :, -query_length-memory_length-1:]
key_length = query_length + memory_length
attention_mask = attention_mask[:, :, :, -query_length - memory_length:]
if latent_state is not None:
latent_emb = self.linear_emb(latent_state)
# latent_emb = torch.split(latent_emb.unsqueeze(1), hidden_size, dim=2)
latent_emb = latent_emb.unsqueeze(1)
# print(f'latent_state {latent_state.half()}\n linear_emb {self.linear_emb.weight} \n latent_emb {latent_emb}')
# torch.save(latent_state, '/cognitive_comp/wanghao/experiments/fengshen/latent_state.pt')
# torch.save(self.linear_emb, '/cognitive_comp/wanghao/experiments/fengshen/weight.pt')
position_sequence = torch.arange(key_length - 1, -1, -1.0, device=hidden_states.device,
dtype=hidden_states.dtype)
position_embeddings = self.position_embeddings(position_sequence)
# print(f'pos {position_embeddings.shape}, latent {latent_emb.shape}')
# if latent_state is not None:
# position_embeddings += latent_emb.unsqueeze(0)
# Apply dropout
position_embeddings = self.embedding_dropout(position_embeddings)
# print(f'latent_emb {latent_emb.shape}, {hidden_states.shape}')
if latent_state is not None:
hidden_states = hidden_states + latent_emb
hidden_states = self.embedding_dropout(hidden_states)
# latent_mem = self.linear(latent_state.half())
# latent_mem = torch.split(latent_mem.unsqueeze(1), hidden_size, dim=2)
if self.max_memory_length > 0:
mem_layers = [hidden_states.detach()]
else:
mem_layers = []
for i, layer in enumerate(self.layers):
args = [hidden_states, attention_mask]
if self.relative_encoding:
args += [position_embeddings, self.r_w_bias, self.r_r_bias]
mem_i = mems[i] if mems else None
# print(f'mems {len(mems)} {mems[0].shape}')
# mem_i = torch.cat((latent_mem[i], mems[i]), 1) if mems else latent_mem[i]
# print(f'mem_i {mem_i.shape}, {mem_i}')
hidden_states = layer(*args, mem=mem_i)
if latent_state is not None:
hidden_states = hidden_states + latent_emb
if self.max_memory_length > 0:
mem_layers.append(hidden_states.detach())
# print(f'mem_layers {len(mem_layers)} mems {len(mems)}')
# Final layer norm.
output = self.final_layernorm(hidden_states)
if self.max_memory_length > 0:
mem_layers = self.update_mems(mem_layers, mems)
return (output, mem_layers)
def update_mems(self, hiddens, mems):
memory_length = mems[0].size(1) if mems else 0
query_length = hiddens[0].size(1)
new_memory_length = min(self.max_memory_length, memory_length + query_length)
new_mems = []
with torch.no_grad():
for i in range(len(hiddens)):
if new_memory_length <= query_length:
new_mems.append(hiddens[i][:, -new_memory_length:])
else:
new_mems.append(torch.cat((mems[i][:, -new_memory_length+query_length:], hiddens[i]), dim=1))
return new_mems
class GPT2ModelForLatent(PreTrainedModel):
"""GPT-2 Language model.
The output of the forward method are the logits (parallel or
serial depending on the `parallel_output` flag.
"""
def _init_weights(self, module):
""" Initialize the weights """
pass # to bypass the not implement error
def __init__(self, config:TransfoXLConfig):
super().__init__(config)
self.config = config
self.word_embeddings = torch.nn.Embedding(config.vocab_size, config.hidden_size)
# Transformer
self.transformer = GPT2TransformerForLatent(config.num_layers,
config.hidden_size,
config.num_attention_heads,
config.max_sequence_length,
config.max_memory_length,
config.embedding_dropout_prob,
config.attention_dropout_prob,
config.output_dropout_prob,
config.checkpoint_activations,
config.latent_size,
config.checkpoint_num_layers,
relative_encoding=config.relative_encoding)
def forward(self, input_ids, attention_mask, latent_state, mems=None, labels=None, label_ignore=None):
embeddings = self.word_embeddings(input_ids)
# Transformer.
logits, hidden_layers = self.transformer(embeddings, attention_mask, latent_state, mems)
lm_logits = F.linear(logits,
self.word_embeddings.weight)
outputs = (lm_logits, hidden_layers) # (bz, sql, vocab), ()
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=label_ignore, reduce=False)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
loss = torch.sum(loss.view(-1, shift_labels.shape[-1]), -1)
outputs = (loss,) + outputs
return outputs
def get_attn_mask(self, seq_length):
# mem_length = self.config.max_memory_length + 1
mem_length = self.config.max_memory_length
attention_mask = torch.ones((1, seq_length, seq_length + mem_length))
attention_mask = torch.tril(torch.triu(attention_mask, 1 - seq_length + mem_length), mem_length)
attention_mask = attention_mask.unsqueeze(1)
return attention_mask
| 27,514 | 41.925117 | 139 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/DAVAE/__init__.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DAVAE model. """
| 665 | 40.625 | 74 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/clip/configuration_taiyi_clip.py
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" CLIP model configuration"""
# from transformers import MegatronBertConfig as BertConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.clip.configuration_clip import CLIPVisionConfig
import copy
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
if TYPE_CHECKING:
from transformers.processing_utils import ProcessorMixin
from transformers.utils import TensorType
from transformers.configuration_utils import PretrainedConfig
from transformers.onnx import OnnxConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class TaiyiCLIPConfig(PretrainedConfig):
r"""
[`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is used to instantiate
CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating a
configuration with the defaults will yield a similar configuration to that of the CLIP
[openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`CLIPTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`CLIPVisionConfig`].
projection_dim (`int`, *optional*, defaults to 512):
Dimentionality of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import CLIPConfig, CLIPModel
>>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration
>>> configuration = CLIPConfig()
>>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
>>> model = CLIPModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig
>>> # Initializing a CLIPText and CLIPVision configuration
>>> config_text = CLIPTextConfig()
>>> config_vision = CLIPVisionConfig()
>>> config = CLIPConfig.from_text_vision_configs(config_text, config_vision)
```"""
model_type = "clip"
is_composition = True
def __init__(
self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
):
super().__init__(**kwargs)
# If `_config_dict` exist, we use them for the backward compatibility.
text_config_dict = kwargs.pop("text_config_dict", None)
vision_config_dict = kwargs.pop("vision_config_dict", None)
if text_config_dict is not None:
text_config = text_config_dict
if vision_config_dict is not None:
vision_config = vision_config_dict
if text_config is None:
text_config = {}
logger.info("text_config is None. Initializing the CLIPTextConfig with default values.")
if vision_config is None:
vision_config = {}
logger.info("vision_config is None. initializing the CLIPVisionConfig with default values.")
self.text_config = BertConfig(**text_config)
self.vision_config = CLIPVisionConfig(**vision_config)
self.projection_dim = projection_dim
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = 1.0
@classmethod
def from_text_vision_configs(cls, text_config: BertConfig, vision_config: CLIPVisionConfig, **kwargs):
r"""
Instantiate a [`CLIPConfig`] (or a derived class) from clip text model configuration and clip vision model
configuration.
Returns:
[`CLIPConfig`]: An instance of a configuration object
"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output["text_config"] = self.text_config.to_dict()
output["vision_config"] = self.vision_config.to_dict()
output["model_type"] = self.__class__.model_type
return output
class CLIPOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
]
)
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
]
)
@property
def atol_for_validation(self) -> float:
return 1e-4
def generate_dummy_inputs(
self,
processor: "ProcessorMixin",
batch_size: int = -1,
seq_length: int = -1,
framework: Optional["TensorType"] = None,
) -> Mapping[str, Any]:
text_input_dict = super().generate_dummy_inputs(
processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
)
image_input_dict = super().generate_dummy_inputs(
processor.feature_extractor, batch_size=batch_size, framework=framework
)
return {**text_input_dict, **image_input_dict}
@property
def default_onnx_opset(self) -> int:
return 14
| 7,069 | 37.423913 | 118 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/clip/processing_taiyi_clip.py
|
# coding=utf-8
# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Image/Text processor class for Taiyi-CLIP
"""
from transformers.processing_utils import ProcessorMixin
from transformers.tokenization_utils_base import BatchEncoding
class TaiyiCLIPProcessor(ProcessorMixin):
r"""
Constructs a Taiyi-CLIP processor which wraps a Taiyi-CLIP feature extractor and a Taiyi-CLIP tokenizer into
a single processor.
[`TaiyiCLIPProcessor`] offers all the functionalities of [`CLIPFeatureExtractor`] and
[`BertTokenizerFast`]. See the [`~TaiyiCLIPProcessor.__call__`] and [`~TaiyiCLIPProcessor.decode`] for more
information.
Args:
feature_extractor ([`CLIPFeatureExtractor`]):
The feature extractor is a required input.
tokenizer ([`BertTokenizerFast`]):
The tokenizer is a required input.
"""
feature_extractor_class = "CLIPFeatureExtractor"
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
CLIPFeatureExtractor's [`~CLIPFeatureExtractor.__call__`] if `images` is not `None`. Please refer to the
doctsring of the above two methods for more information.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
if images is not None:
image_features = self.feature_extractor(images, return_tensors=return_tensors, **kwargs)
if text is not None and images is not None:
encoding["pixel_values"] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
feature_extractor_input_names = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 5,784 | 48.87069 | 136 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/clip/modeling_taiyi_clip.py
|
import torch
from torch import nn
from transformers import BertTokenizer
from transformers.models.clip.modeling_clip import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
CLIP_START_DOCSTRING,
CLIP_TEXT_INPUTS_DOCSTRING,
CLIP_VISION_INPUTS_DOCSTRING,
CLIP_INPUTS_DOCSTRING,
replace_return_docstrings,
CLIPVisionConfig,
CLIPPreTrainedModel,
CLIPVisionTransformer,
CLIPOutput,
CLIPConfig,
clip_loss,
)
from typing import Optional, Tuple, Union
# from transformers import MegatronBertConfig as BertConfig
# from transformers import MegatronBertModel as BertModel
from transformers.models.bert.modeling_bert import BertModel
from transformers.models.bert.configuration_bert import BertConfig
from .configuration_taiyi_clip import TaiyiCLIPConfig
@add_start_docstrings(CLIP_START_DOCSTRING)
class TaiyiCLIPModel(CLIPPreTrainedModel):
config_class = TaiyiCLIPConfig
def __init__(self, config: TaiyiCLIPConfig):
super().__init__(config)
if not isinstance(config.text_config, BertConfig):
raise ValueError(
"config.text_config is expected to be of type CLIPTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, CLIPVisionConfig):
raise ValueError(
"config.vision_config is expected to be of type CLIPVisionConfig but is of type"
f" {type(config.vision_config)}."
)
text_config = config.text_config
vision_config = config.vision_config
self.projection_dim = config.projection_dim
self.text_embed_dim = text_config.hidden_size
self.vision_embed_dim = vision_config.hidden_size
self.text_model = BertModel(text_config)
self.vision_model = CLIPVisionTransformer(vision_config)
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
def get_text_features(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> torch.FloatTensor:
r"""
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`CLIPTextModel`].
Examples:
```python
>>> from transformers import CLIPTokenizer, CLIPModel
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
>>> tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> text_features = model.get_text_features(**inputs)
```"""
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
token_type_ids=token_type_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# pooled_output = text_outputs[1]
pooled_output = text_outputs[0][:, 0, :]
text_features = self.text_projection(pooled_output)
return text_features
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
def get_image_features(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> torch.FloatTensor:
r"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`CLIPVisionModel`].
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import CLIPProcessor, CLIPModel
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
>>> processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> image_features = model.get_image_features(**inputs)
```"""
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = vision_outputs[1] # pooled_output
image_features = self.visual_projection(pooled_output)
return image_features
@add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CLIPOutput, config_class=CLIPConfig)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
return_loss: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CLIPOutput]:
r"""
Returns:
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import CLIPProcessor, CLIPModel
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
>>> processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
... )
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_embeds = vision_outputs[1]
image_embeds = self.visual_projection(image_embeds)
text_embeds = text_outputs[1]
text_embeds = self.text_projection(text_embeds)
# normalized features
image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
logits_per_image = logits_per_text.t()
loss = None
if return_loss:
loss = clip_loss(logits_per_text)
if not return_dict:
output = (logits_per_image, logits_per_text, text_embeds,
image_embeds, text_outputs, vision_outputs)
return ((loss,) + output) if loss is not None else output
return CLIPOutput(
loss=loss,
logits_per_image=logits_per_image,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
image_embeds=image_embeds,
text_model_output=text_outputs,
vision_model_output=vision_outputs,
)
# use by webui
class TaiyiCLIPEmbedder(torch.nn.Module):
"""Uses the Taiyi CLIP transf ormer encoder for text (from Hugging Face)"""
def __init__(self, version="IDEA-CCNL/Taiyi-Stable-Diffusion-1B-Chinese-v0.1", device="cuda", max_length=512):
super().__init__()
self.tokenizer = BertTokenizer.from_pretrained(version, subfolder="tokenizer")
self.transformer = BertModel.from_pretrained(version+"/text_encoder")
self.device = device
self.max_length = max_length
self.freeze()
def freeze(self):
self.transformer = self.transformer.eval()
for param in self.parameters():
param.requires_grad = False
def forward(self, text):
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
tokens = batch_encoding["input_ids"].to(self.device)
outputs = self.transformer(input_ids=tokens)
z = outputs.last_hidden_state
return z
def encode(self, text):
return self(text)
| 11,583 | 39.362369 | 117 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/clip/__init__.py
|
from .modeling_taiyi_clip import TaiyiCLIPModel, TaiyiCLIPEmbedder
from .processing_taiyi_clip import TaiyiCLIPProcessor
__all__ = ['TaiyiCLIPModel', 'TaiyiCLIPProcessor', 'TaiyiCLIPEmbedder']
| 194 | 38 | 71 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/longformer/modeling_longformer.py
|
# coding=utf-8
# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Longformer model. """
import math
from dataclasses import dataclass
from typing import Optional, Tuple
from numpy.lib.function_base import kaiser
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN, gelu
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers import LongformerConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "allenai/longformer-base-4096"
_CONFIG_FOR_DOC = "LongformerConfig"
_TOKENIZER_FOR_DOC = "LongformerTokenizer"
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"allenai/longformer-base-4096",
"allenai/longformer-large-4096",
"allenai/longformer-large-4096-finetuned-triviaqa",
"allenai/longformer-base-4096-extra.pos.embd.only",
"allenai/longformer-large-4096-extra.pos.embd.only",
# See all Longformer models at https://huggingface.co/models?filter=longformer
]
@dataclass
class LongformerBaseModelOutput(ModelOutput):
"""
Base class for Longformer's outputs, with potential hidden states, local and global attentions.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: torch.FloatTensor
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerBaseModelOutputWithPooling(ModelOutput):
"""
Base class for Longformer's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: torch.FloatTensor
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerMaskedLMOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Masked language modeling (MLM) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of question answering Longformer models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerMultipleChoiceModelOutput(ModelOutput):
"""
Base class for outputs of multiple choice Longformer models.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerTokenClassifierOutput(ModelOutput):
"""
Base class for outputs of token classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention
mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, x)`, where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
def _get_question_end_index(input_ids, sep_token_id):
"""
Computes the index of the first occurrence of `sep_token_id`.
"""
sep_token_indices = (input_ids == sep_token_id).nonzero()
batch_size = input_ids.shape[0]
assert sep_token_indices.shape[1] == 2, "`input_ids` should have two dimensions"
assert (
sep_token_indices.shape[0] == 3 * batch_size
), f"There should be exactly three separator tokens: {sep_token_id} in every sample for questions answering. You might also consider to set `global_attention_mask` manually in the forward function to avoid this error."
return sep_token_indices.view(batch_size, 3, 2)[:, 0, 1]
def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=True):
"""
Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is
True` else after `sep_token_id`.
"""
question_end_index = _get_question_end_index(input_ids, sep_token_id)
question_end_index = question_end_index.unsqueeze(
dim=1) # size: batch_size x 1
# bool attention mask with True in locations of global attention
attention_mask = torch.arange(input_ids.shape[1], device=input_ids.device)
if before_sep_token is True:
attention_mask = (attention_mask.expand_as(input_ids)
< question_end_index).to(torch.uint8)
else:
# last token is separation token and should not be counted and in the middle are two separation tokens
attention_mask = (attention_mask.expand_as(input_ids) > (question_end_index + 1)).to(torch.uint8) * (
attention_mask.expand_as(input_ids) < input_ids.shape[-1]
).to(torch.uint8)
return attention_mask
def create_position_ids_from_input_ids(input_ids, padding_idx):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
class LongformerEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
# Modify
# self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
# self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
# self.padding_idx = config.pad_token_id
# self.position_embeddings = nn.Embedding(
# config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
# )
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
# if position_ids is None:
# if input_ids is not None:
# # Create the position ids from the input token ids. Any padded tokens remain padded.
# position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
# else:
# position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# if position_ids is None:
# position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
# Modify
# position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor inputs_embeds:
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
class RoPEmbedding(nn.Module):
def __init__(self, d_model):
super(RoPEmbedding, self).__init__()
self.d_model = d_model
div_term = torch.exp(torch.arange(
0, d_model, 2).float() * (-math.log(10000.0) / d_model))
self.register_buffer('div_term', div_term)
def forward(self, x, seq_dim=0):
x = x # [seq_len,num_head,batch_size,per_head_hidden_size]
t = torch.arange(x.size(seq_dim), device=x.device).type_as(
self.div_term)
sinusoid_inp = torch.outer(t, self.div_term)
sin, cos = sinusoid_inp.sin(), sinusoid_inp.cos() # [s, hn]
o_shape = (sin.size(0), 1, 1, sin.size(1))
sin, cos = sin.view(*o_shape), cos.view(*o_shape) # [s, 1, 1, hn]
sin = torch.repeat_interleave(sin, 2, dim=-1)
cos = torch.repeat_interleave(cos, 2, dim=-1)
x2 = torch.stack([-x[..., 1::2], x[..., ::2]], dim=-1).reshape_as(x)
x = cos * x + sin * x2
return x
class LongformerSelfAttention(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.config = config
self.num_heads = config.num_attention_heads
self.head_dim = int(config.hidden_size / config.num_attention_heads)
self.embed_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.embed_dim)
self.key = nn.Linear(config.hidden_size, self.embed_dim)
self.value = nn.Linear(config.hidden_size, self.embed_dim)
# separate projection layers for tokens with global attention
# self.query_global = nn.Linear(config.hidden_size, self.embed_dim)
# self.key_global = nn.Linear(config.hidden_size, self.embed_dim)
# self.value_global = nn.Linear(config.hidden_size, self.embed_dim)
self.dropout = config.attention_probs_dropout_prob
self.layer_id = layer_id
attention_window = config.attention_window[self.layer_id]
assert (
attention_window % 2 == 0
), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}"
assert (
attention_window > 0
), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}"
self.one_sided_attn_window_size = attention_window // 2
self.rope_emb = RoPEmbedding(self.head_dim)
def forward(
self,
hidden_states,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
):
"""
:class:`LongformerSelfAttention` expects `len(hidden_states)` to be multiple of `attention_window`. Padding to
`attention_window` happens in :meth:`LongformerModel.forward` to avoid redoing the padding on each layer.
The `attention_mask` is changed in :meth:`LongformerModel.forward` from 0, 1, 2 to:
* -10000: no attention
* 0: local attention
* +10000: global attention
"""
# print(attention_mask.shape)
if not self.config.use_sparse_attention: # 如果不使用稀疏attention,则使用标准的attention
hidden_states = hidden_states.transpose(0, 1)
# project hidden states
query_vectors = self.query(hidden_states)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
seq_len, batch_size, embed_dim = hidden_states.size()
assert (
embed_dim == self.embed_dim
), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}"
# normalize query
# query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
# key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
# print('query_vectors',query_vectors.shape)
query_vectors = query_vectors.view(
seq_len, batch_size, self.num_heads, self.head_dim).transpose(1, 2)
key_vectors = key_vectors.view(
seq_len, batch_size, self.num_heads, self.head_dim).transpose(1, 2)
query_vectors = self.rope_emb(query_vectors)
key_vectors = self.rope_emb(key_vectors)
query_vectors = query_vectors.transpose(0, 2) # [b,mh,s,hd]
key_vectors = key_vectors.transpose(0, 2).transpose(2, 3)
# print('query_vectors',query_vectors.shape)
query_vectors /= math.sqrt(self.head_dim)
attention_mask = self.get_extended_attention_mask(
attention_mask, attention_mask.shape, attention_mask.device)
attn_scores = torch.matmul(
query_vectors, key_vectors)+attention_mask
attn_scores = torch.nn.functional.softmax(attn_scores, dim=-1)
value_vectors = value_vectors.view(
seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1).transpose(1, 2)
outputs = torch.matmul(attn_scores, value_vectors).transpose(
1, 2).contiguous().view(batch_size, seq_len, self.num_heads*self.head_dim)
# print('output',outputs.shape)
outputs = (outputs,)
return outputs+(attn_scores,)
# print('hidden.shape',hidden_states.shape)
# print('attention_mask.shape',attention_mask.shape)
# print('att_mask:',attention_mask)
hidden_states = hidden_states.transpose(0, 1)
# project hidden states
query_vectors = self.query(hidden_states)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
seq_len, batch_size, embed_dim = hidden_states.size()
assert (
embed_dim == self.embed_dim
), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}"
# normalize query
# query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
# key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
query_vectors = query_vectors.view(
seq_len, batch_size, self.num_heads, self.head_dim).transpose(1, 2)
key_vectors = key_vectors.view(
seq_len, batch_size, self.num_heads, self.head_dim).transpose(1, 2)
query_vectors = self.rope_emb(query_vectors)
key_vectors = self.rope_emb(key_vectors)
query_vectors = query_vectors.transpose(1, 2).transpose(0, 1)
key_vectors = key_vectors.transpose(1, 2).transpose(0, 1)
query_vectors /= math.sqrt(self.head_dim)
attn_scores = self._sliding_chunks_query_key_matmul(
query_vectors, key_vectors, self.one_sided_attn_window_size
)
# print('att:',attn_scores.shape)
# values to pad for attention probs
remove_from_windowed_attention_mask = (
attention_mask != 0)[:, :, None, None]
# cast to fp32/fp16 then replace 1's with -inf
float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill(
remove_from_windowed_attention_mask, -10000.0
)
# diagonal mask with zeros everywhere and -inf inplace of padding
diagonal_mask = self._sliding_chunks_query_key_matmul(
float_mask.new_ones(size=float_mask.size()
), float_mask, self.one_sided_attn_window_size
)
# pad local attention probs
attn_scores += diagonal_mask
assert list(attn_scores.size()) == [
batch_size,
seq_len,
self.num_heads,
self.one_sided_attn_window_size * 2 + 1,
], f"local_attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}"
# compute local attention probs from global attention keys and contact over window dim
if is_global_attn:
# compute global attn indices required through out forward fn
(
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
) = self._get_global_attn_indices(is_index_global_attn)
# calculate global attn probs from global key
global_key_attn_scores = self._concat_with_global_key_attn_probs(
query_vectors=query_vectors,
key_vectors=key_vectors,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
)
# concat to local_attn_probs
# (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
attn_scores = torch.cat(
(global_key_attn_scores, attn_scores), dim=-1)
# free memory
del global_key_attn_scores
attn_probs = nn.functional.softmax(
attn_scores, dim=-1, dtype=torch.float32
) # use fp32 for numerical stability
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_probs = layer_head_mask.view(1, 1, -1, 1) * attn_probs
# softmax sometimes inserts NaN if all positions are masked, replace them with 0
attn_probs = torch.masked_fill(
attn_probs, is_index_masked[:, :, None, None], 0.0)
attn_probs = attn_probs.type_as(attn_scores)
# free memory
del attn_scores
# apply dropout
attn_probs = nn.functional.dropout(
attn_probs, p=self.dropout, training=self.training)
value_vectors = value_vectors.view(
seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
# compute local attention output with global attention value and add
if is_global_attn:
# compute sum of global and local attn
attn_output = self._compute_attn_output_with_global_indices(
value_vectors=value_vectors,
attn_probs=attn_probs,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
)
else:
# compute local attn only
attn_output = self._sliding_chunks_matmul_attn_probs_value(
attn_probs, value_vectors, self.one_sided_attn_window_size
)
assert attn_output.size() == (batch_size, seq_len, self.num_heads,
self.head_dim), "Unexpected size"
attn_output = attn_output.transpose(0, 1).reshape(
seq_len, batch_size, embed_dim).contiguous()
# compute value for global attention and overwrite to attention output
# TODO: remove the redundant computation
if is_global_attn:
global_attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden(
global_query_vectors=query_vectors,
global_key_vectors=key_vectors,
global_value_vectors=value_vectors,
max_num_global_attn_indices=max_num_global_attn_indices,
layer_head_mask=layer_head_mask,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
is_index_masked=is_index_masked,
)
# print('global_attn_output',global_attn_output.shape)
# get only non zero global attn output
nonzero_global_attn_output = global_attn_output[
is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1]
]
# print('nonzero_global_attn_output',nonzero_global_attn_output.shape)
# overwrite values with global attention
attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view(
len(is_local_index_global_attn_nonzero[0]), -1
)
# The attention weights for tokens with global attention are
# just filler values, they were never used to compute the output.
# Fill with 0 now, the correct values are in 'global_attn_probs'.
attn_probs[is_index_global_attn_nonzero] = 0
outputs = (attn_output.transpose(0, 1),)
if output_attentions:
outputs += (attn_probs,)
return outputs + (global_attn_probs,) if (is_global_attn and output_attentions) else outputs
@staticmethod
def _pad_and_transpose_last_two_dims(hidden_states_padded, padding):
"""pads rows and then flips rows and columns"""
hidden_states_padded = nn.functional.pad(
hidden_states_padded, padding
) # padding value is not important because it will be overwritten
hidden_states_padded = hidden_states_padded.view(
*hidden_states_padded.size()[:-2], hidden_states_padded.size(-1), hidden_states_padded.size(-2)
)
return hidden_states_padded
@staticmethod
def _pad_and_diagonalize(chunked_hidden_states):
"""
shift every row 1 step right, converting columns into diagonals.
Example::
chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492,
-1.8348, 0.7672, 0.2986, 0.0285,
-0.7584, 0.4206, -0.0405, 0.1599,
2.0514, -1.1600, 0.5372, 0.2629 ]
window_overlap = num_rows = 4
(pad & diagonalize) =>
[ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000
0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000
0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
"""
total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size()
chunked_hidden_states = nn.functional.pad(
chunked_hidden_states, (0, window_overlap + 1)
) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten
chunked_hidden_states = chunked_hidden_states.view(
total_num_heads, num_chunks, -1
) # total_num_heads x num_chunks x window_overlap*window_overlap+window_overlap
chunked_hidden_states = chunked_hidden_states[
:, :, :-window_overlap
] # total_num_heads x num_chunks x window_overlap*window_overlap
chunked_hidden_states = chunked_hidden_states.view(
total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim
)
chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
return chunked_hidden_states
@staticmethod
def _chunk(hidden_states, window_overlap):
"""convert into overlapping chunks. Chunk size = 2w, overlap size = w"""
# non-overlapping chunks of size = 2w
hidden_states = hidden_states.view(
hidden_states.size(0),
hidden_states.size(1) // (window_overlap * 2),
window_overlap * 2,
hidden_states.size(2),
)
# use `as_strided` to make the chunks overlap with an overlap size = window_overlap
chunk_size = list(hidden_states.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(hidden_states.stride())
chunk_stride[1] = chunk_stride[1] // 2
return hidden_states.as_strided(size=chunk_size, stride=chunk_stride)
@staticmethod
def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor:
beginning_mask_2d = input_tensor.new_ones(
affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0])
beginning_mask = beginning_mask_2d[None, :, None, :]
ending_mask = beginning_mask.flip(dims=(1, 3))
beginning_input = input_tensor[:,
:affected_seq_len, :, : affected_seq_len + 1]
beginning_mask = beginning_mask.expand(beginning_input.size())
# `== 1` converts to bool or uint8
beginning_input.masked_fill_(beginning_mask == 1, -float("inf"))
ending_input = input_tensor[:, -
affected_seq_len:, :, -(affected_seq_len + 1):]
ending_mask = ending_mask.expand(ending_input.size())
# `== 1` converts to bool or uint8
ending_input.masked_fill_(ending_mask == 1, -float("inf"))
def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int):
"""
Matrix multiplication of query and key tensors using with a sliding window attention pattern. This
implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an
overlap of size window_overlap
"""
batch_size, seq_len, num_heads, head_dim = query.size()
assert (
seq_len % (window_overlap * 2) == 0
), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}"
assert query.size() == key.size()
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
query = query.transpose(1, 2).reshape(
batch_size * num_heads, seq_len, head_dim)
key = key.transpose(1, 2).reshape(
batch_size * num_heads, seq_len, head_dim)
query = self._chunk(query, window_overlap)
key = self._chunk(key, window_overlap)
# matrix multiplication
# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap
diagonal_chunked_attention_scores = torch.einsum(
"bcxd,bcyd->bcxy", (query, key)) # multiply
# convert diagonals into columns
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
diagonal_chunked_attention_scores, padding=(0, 0, 0, 1)
)
# allocate space for the overall attention matrix where the chunks are combined. The last dimension
# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
# window_overlap previous words). The following column is attention score from each word to itself, then
# followed by window_overlap columns for the upper triangle.
diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty(
(batch_size * num_heads, chunks_count + 1,
window_overlap, window_overlap * 2 + 1)
)
# copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
# - copying the main diagonal and the upper triangle
diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, :, :window_overlap, : window_overlap + 1
]
diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, -1, window_overlap:, : window_overlap + 1
]
# - copying the lower triangle
diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[
:, :, -(window_overlap + 1): -1, window_overlap + 1:
]
diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[
:, 0, : window_overlap - 1, 1 - window_overlap:
]
# separate batch_size and num_heads dimensions again
diagonal_attention_scores = diagonal_attention_scores.view(
batch_size, num_heads, seq_len, 2 * window_overlap + 1
).transpose(2, 1)
self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
return diagonal_attention_scores
def _sliding_chunks_matmul_attn_probs_value(
self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int
):
"""
Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the
same shape as `attn_probs`
"""
batch_size, seq_len, num_heads, head_dim = value.size()
assert seq_len % (window_overlap * 2) == 0
assert attn_probs.size()[:3] == value.size()[:3]
assert attn_probs.size(3) == 2 * window_overlap + 1
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
chunked_attn_probs = attn_probs.transpose(1, 2).reshape(
batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1
)
# group batch_size and num_heads dimensions into one
value = value.transpose(1, 2).reshape(
batch_size * num_heads, seq_len, head_dim)
# pad seq_len with w at the beginning of the sequence and another window overlap at the end
padded_value = nn.functional.pad(
value, (0, 0, window_overlap, window_overlap), value=-1)
# chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
chunked_value_size = (batch_size * num_heads,
chunks_count + 1, 3 * window_overlap, head_dim)
chunked_value_stride = padded_value.stride()
chunked_value_stride = (
chunked_value_stride[0],
window_overlap * chunked_value_stride[1],
chunked_value_stride[1],
chunked_value_stride[2],
)
chunked_value = padded_value.as_strided(
size=chunked_value_size, stride=chunked_value_stride)
chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
context = torch.einsum(
"bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value))
return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2)
@staticmethod
def _get_global_attn_indices(is_index_global_attn):
"""compute global attn indices required throughout forward pass"""
# helper variable
num_global_attn_indices = is_index_global_attn.long().sum(dim=1)
# max number of global attn indices in batch
max_num_global_attn_indices = num_global_attn_indices.max()
# indices of global attn
is_index_global_attn_nonzero = is_index_global_attn.nonzero(
as_tuple=True)
# helper variable
is_local_index_global_attn = torch.arange(
max_num_global_attn_indices, device=is_index_global_attn.device
) < num_global_attn_indices.unsqueeze(dim=-1)
# location of the non-padding values within global attention indices
is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(
as_tuple=True)
# location of the padding values within global attention indices
is_local_index_no_global_attn_nonzero = (
is_local_index_global_attn == 0).nonzero(as_tuple=True)
return (
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
)
def _concat_with_global_key_attn_probs(
self,
key_vectors,
query_vectors,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
):
batch_size = key_vectors.shape[0]
# create only global key vectors
key_vectors_only_global = key_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
)
key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero]
# (batch_size, seq_len, num_heads, max_num_global_attn_indices)
attn_probs_from_global_key = torch.einsum(
"blhd,bshd->blhs", (query_vectors, key_vectors_only_global))
attn_probs_from_global_key[
is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1]
] = -10000.0
return attn_probs_from_global_key
def _compute_attn_output_with_global_indices(
self,
value_vectors,
attn_probs,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
):
batch_size = attn_probs.shape[0]
# cut local attn probs to global only
attn_probs_only_global = attn_probs.narrow(
-1, 0, max_num_global_attn_indices)
# get value vectors for global only
value_vectors_only_global = value_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
)
value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero]
# use `matmul` because `einsum` crashes sometimes with fp16
# attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v))
# compute attn output only global
attn_output_only_global = torch.matmul(
attn_probs_only_global.transpose(
1, 2), value_vectors_only_global.transpose(1, 2)
).transpose(1, 2)
# reshape attn probs
attn_probs_without_global = attn_probs.narrow(
-1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices
).contiguous()
# compute attn output with global
attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
attn_probs_without_global, value_vectors, self.one_sided_attn_window_size
)
return attn_output_only_global + attn_output_without_global
def _compute_global_attn_output_from_hidden(
self,
global_query_vectors,
global_key_vectors,
global_value_vectors,
max_num_global_attn_indices,
layer_head_mask,
is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
is_index_masked,
):
global_query_vectors = global_query_vectors.transpose(0, 1)
seq_len, batch_size, _, _ = global_query_vectors.shape
global_query_vectors_only_global = global_query_vectors.new_zeros(
max_num_global_attn_indices, batch_size, self.num_heads, self.head_dim)
global_query_vectors_only_global[is_local_index_global_attn_nonzero[::-1]] = global_query_vectors[
is_index_global_attn_nonzero[::-1]
]
seq_len_q, batch_size_q, _, _ = global_query_vectors_only_global.shape
# print('global_query_vectors_only_global',global_query_vectors_only_global.shape)
global_query_vectors_only_global = global_query_vectors_only_global.view(
seq_len_q, batch_size_q, self.num_heads, self.head_dim)
global_key_vectors = global_key_vectors.transpose(0, 1)
global_value_vectors = global_value_vectors.transpose(0, 1)
# reshape
global_query_vectors_only_global = (
global_query_vectors_only_global.contiguous()
.view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim)
global_key_vectors = (
global_key_vectors.contiguous().view(-1, batch_size * self.num_heads,
self.head_dim).transpose(0, 1)
) # batch_size * self.num_heads, seq_len, head_dim)
global_value_vectors = (
global_value_vectors.contiguous().view(-1, batch_size * self.num_heads,
self.head_dim).transpose(0, 1)
) # batch_size * self.num_heads, seq_len, head_dim)
# compute attn scores
global_attn_scores = torch.bmm(
global_query_vectors_only_global, global_key_vectors.transpose(1, 2))
assert list(global_attn_scores.size()) == [
batch_size * self.num_heads,
max_num_global_attn_indices,
seq_len,
], f"global_attn_scores have the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is {global_attn_scores.size()}."
global_attn_scores = global_attn_scores.view(
batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_scores[
is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], :
] = -10000.0
global_attn_scores = global_attn_scores.masked_fill(
is_index_masked[:, None, None, :],
-10000.0,
)
global_attn_scores = global_attn_scores.view(
batch_size * self.num_heads, max_num_global_attn_indices, seq_len)
# compute global attn probs
global_attn_probs_float = nn.functional.softmax(
global_attn_scores, dim=-1, dtype=torch.float32
) # use fp32 for numerical stability
# apply layer head masking
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
global_attn_probs_float = layer_head_mask.view(1, -1, 1, 1) * global_attn_probs_float.view(
batch_size, self.num_heads, max_num_global_attn_indices, seq_len
)
global_attn_probs_float = global_attn_probs_float.view(
batch_size * self.num_heads, max_num_global_attn_indices, seq_len
)
global_attn_probs = nn.functional.dropout(
global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training
)
# global attn output
global_attn_output = torch.bmm(global_attn_probs, global_value_vectors)
assert list(global_attn_output.size()) == [
batch_size * self.num_heads,
max_num_global_attn_indices,
self.head_dim,
], f"global_attn_output tensor has the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is {global_attn_output.size()}."
global_attn_probs = global_attn_probs.view(
batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_output = global_attn_output.view(
batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim
)
return global_attn_output, global_attn_probs
def get_extended_attention_mask(self, attention_mask, input_shape, device):
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
ones = torch.ones_like(attention_mask)
zero = torch.zeros_like(attention_mask)
attention_mask = torch.where(attention_mask < 0, zero, ones)
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class LongformerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LongformerAttention(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.self = LongformerSelfAttention(config, layer_id)
self.output = LongformerSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - \
len(heads)
self.self.all_head_size = self.self.attention_head_size * \
self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
)
attn_output = self.output(self_outputs[0], hidden_states)
outputs = (attn_output,) + self_outputs[1:]
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class LongformerIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class LongformerOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LongformerLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.attention = LongformerAttention(config, layer_id)
self.intermediate = LongformerIntermediate(config)
self.output = LongformerOutput(config)
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
def forward(
self,
hidden_states,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
):
self_attn_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
)
attn_output = self_attn_outputs[0]
outputs = self_attn_outputs[1:]
layer_output = apply_chunking_to_forward(
self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attn_output
)
outputs = (layer_output,) + outputs
return outputs
def ff_chunk(self, attn_output):
intermediate_output = self.intermediate(attn_output)
layer_output = self.output(intermediate_output, attn_output)
return layer_output
class LongformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[LongformerLayer(config, layer_id=i) for i in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
all_hidden_states = () if output_hidden_states else None
# All local attentions.
all_attentions = () if output_attentions else None
all_global_attentions = () if (output_attentions and is_global_attn) else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layer)
), f"The head_mask should be specified for {len(self.layer)} layers, but it is for {head_mask.size()[0]}."
for idx, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, is_global_attn, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
head_mask[idx] if head_mask is not None else None,
is_index_masked,
is_index_global_attn,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=head_mask[idx] if head_mask is not None else None,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
# bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1)
all_attentions = all_attentions + \
(layer_outputs[1].transpose(1, 2),)
if is_global_attn:
# bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn
all_global_attentions = all_global_attentions + \
(layer_outputs[2].transpose(2, 3),)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None
)
return LongformerBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
global_attentions=all_global_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class LongformerPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Longformer
class LongformerLMHead(nn.Module):
"""Longformer Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
class LongformerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LongformerConfig
base_model_prefix = "longformer"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
LONGFORMER_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.LongformerConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
LONGFORMER_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.LongformerTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
global_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to decide the attention given on each token, local attention or global attention. Tokens with global
attention attends to all other tokens, and all other tokens attend to them. This is important for
task-specific finetuning because it makes the model more flexible at representing the task. For example,
for classification, the <s> token should be given global attention. For QA, all question tokens should also
have global attention. Please refer to the `Longformer paper <https://arxiv.org/abs/2004.05150>`__ for more
details. Mask values selected in ``[0, 1]``:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Longformer Model outputting raw hidden-states without any specific head on top.",
LONGFORMER_START_DOCSTRING,
)
class LongformerModel(LongformerPreTrainedModel):
"""
This class copied code from :class:`~transformers.RobertaModel` and overwrote standard self-attention with
longformer self-attention to provide the ability to process long sequences following the self-attention approach
described in `Longformer: the Long-Document Transformer <https://arxiv.org/abs/2004.05150>`__ by Iz Beltagy,
Matthew E. Peters, and Arman Cohan. Longformer self-attention combines a local (sliding window) and global
attention to extend to long documents without the O(n^2) increase in memory and compute.
The self-attention module :obj:`LongformerSelfAttention` implemented here supports the combination of local and
global attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and
dilated attention are more relevant for autoregressive language modeling than finetuning on downstream tasks.
Future release will add support for autoregressive attention, but the support for dilated attention requires a
custom CUDA kernel to be memory and compute efficient.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
if isinstance(config.attention_window, int):
assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value"
assert config.attention_window > 0, "`config.attention_window` has to be positive"
config.attention_window = [
config.attention_window] * config.num_hidden_layers # one value per layer
else:
assert len(config.attention_window) == config.num_hidden_layers, (
"`len(config.attention_window)` should equal `config.num_hidden_layers`. "
f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}"
)
self.embeddings = LongformerEmbeddings(config)
self.encoder = LongformerEncoder(config)
self.pooler = LongformerPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def _pad_to_window_size(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
token_type_ids: torch.Tensor,
position_ids: torch.Tensor,
inputs_embeds: torch.Tensor,
pad_token_id: int,
):
"""A helper function to pad tokens and mask to work with implementation of Longformer self-attention."""
# padding
attention_window = (
self.config.attention_window
if isinstance(self.config.attention_window, int)
else max(self.config.attention_window)
)
assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (attention_window - seq_len %
attention_window) % attention_window
if padding_len > 0:
logger.info(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.attention_window`: {attention_window}"
)
if input_ids is not None:
input_ids = nn.functional.pad(
input_ids, (0, padding_len), value=pad_token_id)
if position_ids is not None:
# pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings
position_ids = nn.functional.pad(
position_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len),
self.config.pad_token_id,
dtype=torch.long,
)
inputs_embeds_padding = self.embeddings(input_ids_padding)
inputs_embeds = torch.cat(
[inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = nn.functional.pad(
attention_mask, (0, padding_len), value=False
) # no attention on the padding tokens
token_type_ids = nn.functional.pad(
token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0
return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor):
# longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn)
# (global_attention_mask + 1) => 1 for local attention, 2 for global attention
# => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention
if attention_mask is not None:
attention_mask = attention_mask * (global_attention_mask + 1)
else:
# simply use `global_attention_mask` as `attention_mask`
# if no `attention_mask` is given
attention_mask = global_attention_mask + 1
return attention_mask
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LongformerBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
head_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Examples::
>>> import torch
>>> from transformers import LongformerModel, LongformerTokenizer
>>> model = LongformerModel.from_pretrained('allenai/longformer-base-4096')
>>> tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096')
>>> SAMPLE_TEXT = ' '.join(['Hello world! '] * 1000) # long input document
>>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1
>>> attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) # initialize to local attention
>>> global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device) # initialize to global attention to be deactivated for all tokens
>>> global_attention_mask[:, [1, 4, 21,]] = 1 # Set global attention to random tokens for the sake of this example
... # Usually, set global attention based on the task. For example,
... # classification: the <s> token
... # QA: question tokens
... # LM: potentially on the beginning of sentences and paragraphs
>>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)
>>> sequence_output = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=device)
# merge `global_attention_mask` and `attention_mask`
if global_attention_mask is not None:
attention_mask = self._merge_to_attention_mask(
attention_mask, global_attention_mask)
if self.config.use_sparse_attention:
padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pad_token_id=self.config.pad_token_id,
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)[
:, 0, 0, :
]
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(
sequence_output) if self.pooler is not None else None
# undo padding
if self.config.use_sparse_attention:
if padding_len > 0:
# unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1)
sequence_output = sequence_output[:, :-padding_len]
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return LongformerBaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
global_attentions=encoder_outputs.global_attentions,
)
@add_start_docstrings("""Longformer Model with a `language modeling` head on top. """, LONGFORMER_START_DOCSTRING)
class LongformerForMaskedLM(LongformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.lm_head = LongformerLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LongformerMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
head_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Examples::
>>> import torch
>>> from transformers import LongformerForMaskedLM, LongformerTokenizer
>>> model = LongformerForMaskedLM.from_pretrained('allenai/longformer-base-4096')
>>> tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096')
>>> SAMPLE_TEXT = ' '.join(['Hello world! '] * 1000) # long input document
>>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1
>>> attention_mask = None # default is local attention everywhere, which is a good choice for MaskedLM
... # check ``LongformerModel.forward`` for more details how to set `attention_mask`
>>> outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids)
>>> loss = outputs.loss
>>> prediction_logits = output.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
head_mask=head_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return LongformerMaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
@add_start_docstrings(
"""
Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
LONGFORMER_START_DOCSTRING,
)
class LongformerForSequenceClassification(LongformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.classifier = LongformerClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=LongformerSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
head_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if global_attention_mask is None:
logger.info("Initializing global attention on CLS token...")
global_attention_mask = torch.zeros_like(input_ids)
# global attention on cls token
global_attention_mask[:, 0] = 1
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
head_mask=head_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(
logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return LongformerSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
class LongformerClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, hidden_states, **kwargs):
# take <s> token (equiv. to [CLS])
hidden_states = hidden_states[:, 0, :]
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
output = self.out_proj(hidden_states)
return output
@add_start_docstrings(
"""
Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD /
TriviaQA (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
LONGFORMER_START_DOCSTRING,
)
class LongformerForQuestionAnswering(LongformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LongformerQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
head_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
Returns:
Examples::
>>> from transformers import LongformerTokenizer, LongformerForQuestionAnswering
>>> import torch
>>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa")
>>> model = LongformerForQuestionAnswering.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> encoding = tokenizer(question, text, return_tensors="pt")
>>> input_ids = encoding["input_ids"]
>>> # default is local attention everywhere
>>> # the forward method will automatically set global attention on question tokens
>>> attention_mask = encoding["attention_mask"]
>>> outputs = model(input_ids, attention_mask=attention_mask)
>>> start_logits = outputs.start_logits
>>> end_logits = outputs.end_logits
>>> all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist())
>>> answer_tokens = all_tokens[torch.argmax(start_logits) :torch.argmax(end_logits)+1]
>>> answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer_tokens)) # remove space prepending space token
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if global_attention_mask is None:
if input_ids is None:
logger.warning(
"It is not possible to automatically generate the `global_attention_mask` because input_ids is None. Please make sure that it is correctly set."
)
else:
# set global attention on question tokens automatically
global_attention_mask = _compute_global_attention_mask(
input_ids, self.config.sep_token_id)
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
head_mask=head_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return LongformerQuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
@add_start_docstrings(
"""
Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
LONGFORMER_START_DOCSTRING,
)
class LongformerForTokenClassification(LongformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=LongformerTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
head_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
head_mask=head_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(
loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(
logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return LongformerTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
@add_start_docstrings(
"""
Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for RocStories/SWAG tasks.
""",
LONGFORMER_START_DOCSTRING,
)
class LongformerForMultipleChoice(LongformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.longformer = LongformerModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(
LONGFORMER_INPUTS_DOCSTRING.format(
"batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=LongformerMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
global_attention_mask=None,
head_mask=None,
labels=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# set global attention on question tokens
if global_attention_mask is None and input_ids is not None:
logger.info("Initializing global attention on multiple choice...")
# put global attention on all tokens after `config.sep_token_id`
global_attention_mask = torch.stack(
[
_compute_global_attention_mask(
input_ids[:, i], self.config.sep_token_id, before_sep_token=False)
for i in range(num_choices)
],
dim=1,
)
flat_input_ids = input_ids.view(-1, input_ids.size(-1)
) if input_ids is not None else None
flat_position_ids = position_ids.view(
-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(
-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(
-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_global_attention_mask = (
global_attention_mask.view(-1, global_attention_mask.size(-1))
if global_attention_mask is not None
else None
)
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2),
inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.longformer(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
global_attention_mask=flat_global_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return LongformerMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
| 118,901 | 46.82864 | 222 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/longformer/tokenization_longformer.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import BertTokenizer as LongformerTokenizer
| 677 | 38.882353 | 74 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/longformer/configuration_longformer.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import LongformerConfig
| 657 | 37.705882 | 74 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/longformer/__init__.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from transformers.file_utils import _LazyModule, is_torch_available
_import_structure = {
"configuration_longformer": ["LongformerConfig"],
"tokenization_longformer": ["LongformerTokenizer"],
}
if is_torch_available():
_import_structure["modeling_longformer"] = [
"LongformerModel",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerPreTrainedModel",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
]
if TYPE_CHECKING:
from .configuration_longformer import LongformerConfig
from .tokenization_longformer import LongformerTokenizer
if is_torch_available():
from .modeling_longformer import (
LongformerModel,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerPreTrainedModel,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 1,823 | 31.571429 | 91 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/tagging_models/bert_for_tagging.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers.crf import CRF
from .layers.bert_output import BiaffineClassifierOutput, TokenClassifierOutput, SpanClassifierOutput
from transformers import BertPreTrainedModel
from transformers import BertModel
from .layers.linears import PoolerEndLogits, PoolerStartLogits, Biaffine
from torch.nn import CrossEntropyLoss
from .losses.focal_loss import FocalLoss
from .losses.label_smoothing import LabelSmoothingCrossEntropy
PRETRAINED_MODEL_ARCHIVE_MAP = {
'IDEA-CCNL/BertCrf': '/cognitive_comp/lujunyu/NER/outputs/ccks_crf/bert/best_checkpoint/pytorch_model.bin',
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'IDEA-CCNL/BertCrf': '/cognitive_comp/lujunyu/NER/outputs/ccks_crf/bert/best_checkpoint/config.json',
}
class BertLinear(BertPreTrainedModel):
def __init__(self, config, num_labels, loss_type):
super(BertLinear, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.loss_type = loss_type
def forward(self, input_ids, attention_mask=None, token_type_ids=None, input_len=None,
position_ids=None, head_mask=None, labels=None):
outputs = self.bert(input_ids = input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids,output_hidden_states=True)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss=None
if labels is not None:
assert self.loss_type in ['lsr', 'focal', 'ce']
if self.loss_type == 'lsr':
loss_fct = LabelSmoothingCrossEntropy(ignore_index=0)
elif self.loss_type == 'focal':
loss_fct = FocalLoss(ignore_index=0)
else:
loss_fct = CrossEntropyLoss(ignore_index=0)
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(loss, logits) # (loss), scores, (hidden_states), (attentions)
class BertCrf(BertPreTrainedModel):
def __init__(self, config, num_labels, loss_type):
super(BertCrf, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.crf = CRF(num_tags=num_labels, batch_first=True)
def forward(self, input_ids, token_type_ids=None, attention_mask=None,labels=None,input_len=None):
outputs =self.bert(input_ids = input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss=None
if labels is not None:
loss = -1 * self.crf(emissions = logits, tags=labels, mask=attention_mask)
return TokenClassifierOutput(loss, logits)
class BertBiaffine(BertPreTrainedModel):
def __init__(self, config, num_labels, loss_type):
super(BertBiaffine, self).__init__(config)
self.num_labels=num_labels
self.bert = BertModel(config)
self.start_layer=torch.nn.Sequential(torch.nn.Linear(in_features=config.hidden_size, out_features=128),torch.nn.ReLU())
self.end_layer=torch.nn.Sequential(torch.nn.Linear(in_features=config.hidden_size, out_features=128),torch.nn.ReLU())
self.biaffne_layer = Biaffine(128,self.num_labels)
self.lstm = nn.LSTM(config.hidden_size, config.hidden_size//2, num_layers=2, dropout=0.5,
batch_first=True, bidirectional=True)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.loss_type = loss_type
def forward(self, input_ids, token_type_ids=None, attention_mask=None, span_labels=None, span_mask=None, input_len=None):
outputs=self.bert(input_ids = input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)
sequence_output=outputs[0]
sequence_output=self.dropout(self.lstm(sequence_output)[0])
start_logits=self.start_layer(sequence_output)
end_logits=self.end_layer(sequence_output)
span_logits=self.biaffne_layer(start_logits,end_logits)
# breakpoint()
span_loss=None
if span_labels is not None:
assert self.loss_type in ['lsr', 'focal', 'ce']
if self.loss_type == 'lsr':
loss_fct = LabelSmoothingCrossEntropy(ignore_index=0)
elif self.loss_type == 'focal':
loss_fct = FocalLoss(ignore_index=0)
else:
loss_fct = CrossEntropyLoss(ignore_index=0)
span_logits=span_logits.contiguous()
active_loss=span_mask.view(-1) == 1
active_logits = span_logits.view(-1, self.num_labels)[active_loss]
active_labels = span_labels.view(-1)[active_loss]
span_loss = 10*loss_fct(active_logits, active_labels)
return BiaffineClassifierOutput(loss=span_loss,span_logits=span_logits)
class BertSpan(BertPreTrainedModel):
def __init__(self, config, num_labels, loss_type, soft_label=True):
super(BertSpan, self).__init__(config)
self.soft_label = soft_label
self.num_labels = num_labels
self.loss_type = loss_type
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.start_fc = PoolerStartLogits(config.hidden_size, self.num_labels)
if self.soft_label:
self.end_fc = PoolerEndLogits(config.hidden_size + self.num_labels, self.num_labels)
else:
self.end_fc = PoolerEndLogits(config.hidden_size + 1, self.num_labels)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,end_positions=None, subjects=None, input_len=None):
outputs = self.bert(input_ids = input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
start_logits = self.start_fc(sequence_output)
if start_positions is not None and self.training:
if self.soft_label:
batch_size = input_ids.size(0)
seq_len = input_ids.size(1)
label_logits = torch.FloatTensor(batch_size, seq_len, self.num_labels)
label_logits.zero_()
label_logits = label_logits.to(input_ids.device)
label_logits.scatter_(2, start_positions.unsqueeze(2), 1)
else:
label_logits = start_positions.unsqueeze(2).float()
else:
label_logits = F.softmax(start_logits, -1)
if not self.soft_label:
label_logits = torch.argmax(label_logits, -1).unsqueeze(2).float()
end_logits = self.end_fc(sequence_output, label_logits)
total_loss=None
if start_positions is not None and end_positions is not None:
assert self.loss_type in ['lsr', 'focal', 'ce']
if self.loss_type =='lsr':
loss_fct = LabelSmoothingCrossEntropy()
elif self.loss_type == 'focal':
loss_fct = FocalLoss()
else:
loss_fct = CrossEntropyLoss()
active_loss = attention_mask.view(-1) == 1
active_start_logits = start_logits.view(-1, self.num_labels)[active_loss]
active_end_logits = end_logits.view(-1, self.num_labels)[active_loss]
active_start_labels = start_positions.view(-1)[active_loss]
active_end_labels = end_positions.view(-1)[active_loss]
start_loss = loss_fct(active_start_logits, active_start_labels)
end_loss = loss_fct(active_end_logits, active_end_labels)
total_loss = (start_loss + end_loss) / 2
return SpanClassifierOutput(loss=total_loss,start_logits=start_logits,end_logits=end_logits)
# class BertLstmCrf(BertPreTrainedModel):
# def __init__(self, config):
# super(BertLstmCrf, self).__init__(config)
# self.bert = BertModel(config)
# self.lstm = nn.LSTM(config.hidden_size, config.hidden_size//2, 2,
# batch_first=True, bidirectional=True)
# self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# self.dropout1 = nn.Dropout(config.hidden_dropout_prob)
# self.dropout2 = nn.Dropout(config.hidden_dropout_prob)
# self.crf = CRF(num_tags=config.num_labels, batch_first=True)
# self.layernorm = nn.LayerNorm(config.hidden_size)
# def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, input_lens=None):
# outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
# sequence_output =outputs[0]
# sequence_output = self.dropout1(sequence_output)
# sequence_output = self.dropout2(self.lstm(sequence_output)[0])
# logits = self.classifier(self.layernorm(sequence_output))
# outputs = (logits,)
# if labels is not None:
# loss = self.crf(emissions=logits, tags=labels, mask=attention_mask)
# outputs = (-1 * loss,) + outputs
# return outputs
| 9,885 | 47.460784 | 147 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/tagging_models/layers/linears.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FeedForwardNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, dropout_rate=0):
super(FeedForwardNetwork, self).__init__()
self.dropout_rate = dropout_rate
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x_proj = F.dropout(F.relu(self.linear1(x)), p=self.dropout_rate, training=self.training)
x_proj = self.linear2(x_proj)
return x_proj
class PoolerStartLogits(nn.Module):
def __init__(self, hidden_size, num_classes):
super(PoolerStartLogits, self).__init__()
self.dense = nn.Linear(hidden_size, num_classes)
def forward(self, hidden_states, p_mask=None):
x = self.dense(hidden_states)
return x
class PoolerEndLogits(nn.Module):
def __init__(self, hidden_size, num_classes):
super(PoolerEndLogits, self).__init__()
self.dense_0 = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(hidden_size)
self.dense_1 = nn.Linear(hidden_size, num_classes)
def forward(self, hidden_states, start_positions=None, p_mask=None):
x = self.dense_0(torch.cat([hidden_states, start_positions], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x)
return x
class Biaffine(nn.Module):
def __init__(self, in_size, out_size, bias_x=True, bias_y=True):
super().__init__()
self.bias_x = bias_x
self.bias_y = bias_y
self.out_size = out_size
self.U = torch.nn.Parameter(torch.randn(in_size + int(bias_x),out_size,in_size + int(bias_y)))
# self.U1 = self.U.view(size=(in_size + int(bias_x),-1))
#U.shape = [in_size,out_size,in_size]
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), dim=-1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), dim=-1)
"""
batch_size,seq_len,hidden=x.shape
bilinar_mapping=torch.matmul(x,self.U)
bilinar_mapping=bilinar_mapping.view(size=(batch_size,seq_len*self.out_size,hidden))
y=torch.transpose(y,dim0=1,dim1=2)
bilinar_mapping=torch.matmul(bilinar_mapping,y)
bilinar_mapping=bilinar_mapping.view(size=(batch_size,seq_len,self.out_size,seq_len))
bilinar_mapping=torch.transpose(bilinar_mapping,dim0=2,dim1=3)
"""
bilinar_mapping = torch.einsum('bxi,ioj,byj->bxyo', x, self.U, y)
return bilinar_mapping
| 2,701 | 39.328358 | 102 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/tagging_models/layers/bert_output.py
|
import torch
from dataclasses import dataclass
from typing import Optional
@dataclass
class TokenClassifierOutput:
"""
Base class for outputs of token classification models.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
@dataclass
class SpanClassifierOutput:
"""
Base class for outputs of span classification models.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.LongTensor = None
end_logits: torch.LongTensor = None
@dataclass
class BiaffineClassifierOutput:
"""
Base class for outputs of span classification models.
"""
loss: Optional[torch.FloatTensor] = None
span_logits: torch.FloatTensor = None
| 721 | 22.290323 | 58 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/tagging_models/layers/crf.py
|
import torch
import torch.nn as nn
from typing import List, Optional
class CRF(nn.Module):
"""Conditional random field.
This module implements a conditional random field [LMP01]_. The forward computation
of this class computes the log likelihood of the given sequence of tags and
emission score tensor. This class also has `~CRF.decode` method which finds
the best tag sequence given an emission score tensor using `Viterbi algorithm`_.
Args:
num_tags: Number of tags.
batch_first: Whether the first dimension corresponds to the size of a minibatch.
Attributes:
start_transitions (`~torch.nn.Parameter`): Start transition score tensor of size
``(num_tags,)``.
end_transitions (`~torch.nn.Parameter`): End transition score tensor of size
``(num_tags,)``.
transitions (`~torch.nn.Parameter`): Transition score tensor of size
``(num_tags, num_tags)``.
.. [LMP01] Lafferty, J., McCallum, A., Pereira, F. (2001).
"Conditional random fields: Probabilistic models for segmenting and
labeling sequence data". *Proc. 18th International Conf. on Machine
Learning*. Morgan Kaufmann. pp. 282–289.
.. _Viterbi algorithm: https://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def __init__(self, num_tags: int, batch_first: bool = False) -> None:
if num_tags <= 0:
raise ValueError(f'invalid number of tags: {num_tags}')
super().__init__()
self.num_tags = num_tags
self.batch_first = batch_first
self.start_transitions = nn.Parameter(torch.empty(num_tags))
self.end_transitions = nn.Parameter(torch.empty(num_tags))
self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))
self.reset_parameters()
def reset_parameters(self) -> None:
"""Initialize the transition parameters.
The parameters will be initialized randomly from a uniform distribution
between -0.1 and 0.1.
"""
nn.init.uniform_(self.start_transitions, -0.1, 0.1)
nn.init.uniform_(self.end_transitions, -0.1, 0.1)
nn.init.uniform_(self.transitions, -0.1, 0.1)
def __repr__(self) -> str:
return f'{self.__class__.__name__}(num_tags={self.num_tags})'
def forward(self, emissions: torch.Tensor,
tags: torch.LongTensor,
mask: Optional[torch.ByteTensor] = None,
reduction: str = 'mean') -> torch.Tensor:
"""Compute the conditional log likelihood of a sequence of tags given emission scores.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
tags (`~torch.LongTensor`): Sequence of tags tensor of size
``(seq_length, batch_size)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
reduction: Specifies the reduction to apply to the output:
``none|sum|mean|token_mean``. ``none``: no reduction will be applied.
``sum``: the output will be summed over batches. ``mean``: the output will be
averaged over batches. ``token_mean``: the output will be averaged over tokens.
Returns:
`~torch.Tensor`: The log likelihood. This will have size ``(batch_size,)`` if
reduction is ``none``, ``()`` otherwise.
"""
if reduction not in ('none', 'sum', 'mean', 'token_mean'):
raise ValueError(f'invalid reduction: {reduction}')
if mask is None:
mask = torch.ones_like(tags, dtype=torch.uint8, device=tags.device)
if mask.dtype != torch.uint8:
mask = mask.byte()
self._validate(emissions, tags=tags, mask=mask)
if self.batch_first:
emissions = emissions.transpose(0, 1)
tags = tags.transpose(0, 1)
mask = mask.transpose(0, 1)
# shape: (batch_size,)
numerator = self._compute_score(emissions, tags, mask)
# shape: (batch_size,)
denominator = self._compute_normalizer(emissions, mask)
# shape: (batch_size,)
llh = numerator - denominator
if reduction == 'none':
return llh
if reduction == 'sum':
return llh.sum()
if reduction == 'mean':
return llh.mean()
return llh.sum() / mask.float().sum()
def decode(self, emissions: torch.Tensor,
mask: Optional[torch.ByteTensor] = None,
nbest: Optional[int] = None,
pad_tag: Optional[int] = None) -> List[List[List[int]]]:
"""Find the most likely tag sequence using Viterbi algorithm.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
nbest (`int`): Number of most probable paths for each sequence
pad_tag (`int`): Tag at padded positions. Often input varies in length and
the length will be padded to the maximum length in the batch. Tags at
the padded positions will be assigned with a padding tag, i.e. `pad_tag`
Returns:
A PyTorch tensor of the best tag sequence for each batch of shape
(nbest, batch_size, seq_length)
"""
if nbest is None:
nbest = 1
if mask is None:
mask = torch.ones(emissions.shape[:2], dtype=torch.uint8,
device=emissions.device)
if mask.dtype != torch.uint8:
mask = mask.byte()
self._validate(emissions, mask=mask)
if self.batch_first:
emissions = emissions.transpose(0, 1)
mask = mask.transpose(0, 1)
if nbest == 1:
return self._viterbi_decode(emissions, mask, pad_tag).unsqueeze(0)
return self._viterbi_decode_nbest(emissions, mask, nbest, pad_tag)
def _validate(self, emissions: torch.Tensor,
tags: Optional[torch.LongTensor] = None,
mask: Optional[torch.ByteTensor] = None) -> None:
if emissions.dim() != 3:
raise ValueError(f'emissions must have dimension of 3, got {emissions.dim()}')
if emissions.size(2) != self.num_tags:
raise ValueError(
f'expected last dimension of emissions is {self.num_tags}, '
f'got {emissions.size(2)}')
if tags is not None:
if emissions.shape[:2] != tags.shape:
raise ValueError(
'the first two dimensions of emissions and tags must match, '
f'got {tuple(emissions.shape[:2])} and {tuple(tags.shape)}')
if mask is not None:
if emissions.shape[:2] != mask.shape:
raise ValueError(
'the first two dimensions of emissions and mask must match, '
f'got {tuple(emissions.shape[:2])} and {tuple(mask.shape)}')
no_empty_seq = not self.batch_first and mask[0].all()
no_empty_seq_bf = self.batch_first and mask[:, 0].all()
if not no_empty_seq and not no_empty_seq_bf:
raise ValueError('mask of the first timestep must all be on')
def _compute_score(self, emissions: torch.Tensor,
tags: torch.LongTensor,
mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# tags: (seq_length, batch_size)
# mask: (seq_length, batch_size)
seq_length, batch_size = tags.shape
mask = mask.float()
# Start transition score and first emission
# shape: (batch_size,)
score = self.start_transitions[tags[0]]
score += emissions[0, torch.arange(batch_size), tags[0]]
for i in range(1, seq_length):
# Transition score to next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += self.transitions[tags[i - 1], tags[i]] * mask[i]
# Emission score for next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]
# End transition score
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# shape: (batch_size,)
last_tags = tags[seq_ends, torch.arange(batch_size)]
# shape: (batch_size,)
score += self.end_transitions[last_tags]
return score
def _compute_normalizer(self, emissions: torch.Tensor,
mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
seq_length = emissions.size(0)
# Start transition score and first emission; score has size of
# (batch_size, num_tags) where for each batch, the j-th column stores
# the score that the first timestep has tag j
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
for i in range(1, seq_length):
# Broadcast score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emissions = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the sum of scores of all
# possible tag sequences so far that end with transitioning from tag i to tag j
# and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emissions
# Sum over all possible current tags, but we're in score space, so a sum
# becomes a log-sum-exp: for each sample, entry i stores the sum of scores of
# all possible tag sequences so far, that end in tag i
# shape: (batch_size, num_tags)
next_score = torch.logsumexp(next_score, dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Sum (log-sum-exp) over all possible tags
# shape: (batch_size,)
return torch.logsumexp(score, dim=1)
def _viterbi_decode(self, emissions: torch.FloatTensor,
mask: torch.ByteTensor,
pad_tag: Optional[int] = None) -> List[List[int]]:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
# return: (batch_size, seq_length)
if pad_tag is None:
pad_tag = 0
device = emissions.device
seq_length, batch_size = mask.shape
# Start transition and first emission
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
history_idx = torch.zeros((seq_length, batch_size, self.num_tags),
dtype=torch.long, device=device)
oor_idx = torch.zeros((batch_size, self.num_tags),
dtype=torch.long, device=device)
oor_tag = torch.full((seq_length, batch_size), pad_tag,
dtype=torch.long, device=device)
# - score is a tensor of size (batch_size, num_tags) where for every batch,
# value at column j stores the score of the best tag sequence so far that ends
# with tag j
# - history_idx saves where the best tags candidate transitioned from; this is used
# when we trace back the best tag sequence
# - oor_idx saves the best tags candidate transitioned from at the positions
# where mask is 0, i.e. out of range (oor)
# Viterbi algorithm recursive case: we compute the score of the best tag sequence
# for every possible next tag
for i in range(1, seq_length):
# Broadcast viterbi score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emission = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the score of the best
# tag sequence so far that ends with transitioning from tag i to tag j and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emission
# Find the maximum score over all possible current tag
# shape: (batch_size, num_tags)
next_score, indices = next_score.max(dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# and save the index that produces the next score
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(-1), next_score, score)
indices = torch.where(mask[i].unsqueeze(-1), indices, oor_idx)
history_idx[i - 1] = indices
# End transition score
# shape: (batch_size, num_tags)
end_score = score + self.end_transitions
_, end_tag = end_score.max(dim=1)
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# insert the best tag at each sequence end (last position with mask == 1)
history_idx = history_idx.transpose(1, 0).contiguous()
history_idx.scatter_(1, seq_ends.view(-1, 1, 1).expand(-1, 1, self.num_tags),
end_tag.view(-1, 1, 1).expand(-1, 1, self.num_tags))
history_idx = history_idx.transpose(1, 0).contiguous()
# The most probable path for each sequence
best_tags_arr = torch.zeros((seq_length, batch_size),
dtype=torch.long, device=device)
best_tags = torch.zeros(batch_size, 1, dtype=torch.long, device=device)
for idx in range(seq_length - 1, -1, -1):
best_tags = torch.gather(history_idx[idx], 1, best_tags)
best_tags_arr[idx] = best_tags.data.view(batch_size)
return torch.where(mask, best_tags_arr, oor_tag).transpose(0, 1)
def _viterbi_decode_nbest(self, emissions: torch.FloatTensor,
mask: torch.ByteTensor,
nbest: int,
pad_tag: Optional[int] = None) -> List[List[List[int]]]:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
# return: (nbest, batch_size, seq_length)
if pad_tag is None:
pad_tag = 0
device = emissions.device
seq_length, batch_size = mask.shape
# Start transition and first emission
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
history_idx = torch.zeros((seq_length, batch_size, self.num_tags, nbest),
dtype=torch.long, device=device)
oor_idx = torch.zeros((batch_size, self.num_tags, nbest),
dtype=torch.long, device=device)
oor_tag = torch.full((seq_length, batch_size, nbest), pad_tag,
dtype=torch.long, device=device)
# + score is a tensor of size (batch_size, num_tags) where for every batch,
# value at column j stores the score of the best tag sequence so far that ends
# with tag j
# + history_idx saves where the best tags candidate transitioned from; this is used
# when we trace back the best tag sequence
# - oor_idx saves the best tags candidate transitioned from at the positions
# where mask is 0, i.e. out of range (oor)
# Viterbi algorithm recursive case: we compute the score of the best tag sequence
# for every possible next tag
for i in range(1, seq_length):
if i == 1:
broadcast_score = score.unsqueeze(-1)
broadcast_emission = emissions[i].unsqueeze(1)
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emission
else:
broadcast_score = score.unsqueeze(-1)
broadcast_emission = emissions[i].unsqueeze(1).unsqueeze(2)
# shape: (batch_size, num_tags, nbest, num_tags)
next_score = broadcast_score + self.transitions.unsqueeze(1) + broadcast_emission
# Find the top `nbest` maximum score over all possible current tag
# shape: (batch_size, nbest, num_tags)
next_score, indices = next_score.view(batch_size, -1, self.num_tags).topk(nbest, dim=1)
if i == 1:
score = score.unsqueeze(-1).expand(-1, -1, nbest)
indices = indices * nbest
# convert to shape: (batch_size, num_tags, nbest)
next_score = next_score.transpose(2, 1)
indices = indices.transpose(2, 1)
# Set score to the next score if this timestep is valid (mask == 1)
# and save the index that produces the next score
# shape: (batch_size, num_tags, nbest)
score = torch.where(mask[i].unsqueeze(-1).unsqueeze(-1), next_score, score)
indices = torch.where(mask[i].unsqueeze(-1).unsqueeze(-1), indices, oor_idx)
history_idx[i - 1] = indices
# End transition score shape: (batch_size, num_tags, nbest)
end_score = score + self.end_transitions.unsqueeze(-1)
_, end_tag = end_score.view(batch_size, -1).topk(nbest, dim=1)
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# insert the best tag at each sequence end (last position with mask == 1)
history_idx = history_idx.transpose(1, 0).contiguous()
history_idx.scatter_(1, seq_ends.view(-1, 1, 1, 1).expand(-1, 1, self.num_tags, nbest),
end_tag.view(-1, 1, 1, nbest).expand(-1, 1, self.num_tags, nbest))
history_idx = history_idx.transpose(1, 0).contiguous()
# The most probable path for each sequence
best_tags_arr = torch.zeros((seq_length, batch_size, nbest),
dtype=torch.long, device=device)
best_tags = torch.arange(nbest, dtype=torch.long, device=device) \
.view(1, -1).expand(batch_size, -1)
for idx in range(seq_length - 1, -1, -1):
best_tags = torch.gather(history_idx[idx].view(batch_size, -1), 1, best_tags)
best_tags_arr[idx] = best_tags.data.view(batch_size, -1) // nbest
return torch.where(mask.unsqueeze(-1), best_tags_arr, oor_tag).permute(2, 1, 0)
| 19,972 | 47.596107 | 99 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/tagging_models/losses/label_smoothing.py
|
import torch.nn as nn
import torch.nn.functional as F
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self, eps=0.1, reduction='mean',ignore_index=-100):
super(LabelSmoothingCrossEntropy, self).__init__()
self.eps = eps
self.reduction = reduction
self.ignore_index = ignore_index
def forward(self, output, target):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
if self.reduction=='sum':
loss = -log_preds.sum()
else:
loss = -log_preds.sum(dim=-1)
if self.reduction=='mean':
loss = loss.mean()
return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target, reduction=self.reduction,
ignore_index=self.ignore_index)
| 841 | 39.095238 | 103 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.