repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_audio_dataset.py
|
# -*- coding: utf-8 -*-
import unittest
from onmt.inputters.audio_dataset import AudioSeqField, AudioDataReader
import itertools
import os
import shutil
import torch
import torchaudio
from onmt.tests.utils_for_tests import product_dict
class TestAudioField(unittest.TestCase):
INIT_CASES = list(product_dict(
pad_index=[0, 32],
batch_first=[False, True],
include_lengths=[True, False]))
PARAMS = list(product_dict(
batch_size=[1, 17],
max_len=[23],
full_length_seq=[0, 5, 16],
nfeats=[1, 5]))
@classmethod
def degenerate_case(cls, init_case, params):
if params["batch_size"] < params["full_length_seq"]:
return True
return False
@classmethod
def pad_inputs(cls, params):
lengths = torch.randint(1, params["max_len"],
(params["batch_size"],)).tolist()
lengths[params["full_length_seq"]] = params["max_len"]
fake_input = [
torch.randn((params["nfeats"], lengths[b]))
for b in range(params["batch_size"])]
return fake_input, lengths
@classmethod
def numericalize_inputs(cls, init_case, params):
bs = params["batch_size"]
max_len = params["max_len"]
lengths = torch.randint(1, max_len, (bs,))
lengths[params["full_length_seq"]] = max_len
nfeats = params["nfeats"]
fake_input = torch.full(
(bs, 1, nfeats, max_len), init_case["pad_index"])
for b in range(bs):
fake_input[b, :, :, :lengths[b]] = torch.randn(
(1, nfeats, lengths[b]))
if init_case["include_lengths"]:
fake_input = (fake_input, lengths)
return fake_input, lengths
def test_pad_shape_and_lengths(self):
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
if not self.degenerate_case(init_case, params):
field = AudioSeqField(**init_case)
fake_input, lengths = self.pad_inputs(params)
outp = field.pad(fake_input)
if init_case["include_lengths"]:
outp, _ = outp
expected_shape = (
params["batch_size"], 1, params["nfeats"],
params["max_len"])
self.assertEqual(outp.shape, expected_shape)
def test_pad_returns_correct_lengths(self):
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
if not self.degenerate_case(init_case, params) and \
init_case["include_lengths"]:
field = AudioSeqField(**init_case)
fake_input, lengths = self.pad_inputs(params)
_, outp_lengths = field.pad(fake_input)
self.assertEqual(outp_lengths, lengths)
def test_pad_pads_right_places_and_uses_correct_index(self):
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
if not self.degenerate_case(init_case, params):
field = AudioSeqField(**init_case)
fake_input, lengths = self.pad_inputs(params)
outp = field.pad(fake_input)
if init_case["include_lengths"]:
outp, _ = outp
for b in range(params["batch_size"]):
for s in range(lengths[b], params["max_len"]):
self.assertTrue(
outp[b, :, :, s].allclose(
torch.tensor(float(init_case["pad_index"]))))
def test_numericalize_shape(self):
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
if not self.degenerate_case(init_case, params):
field = AudioSeqField(**init_case)
fake_input, lengths = self.numericalize_inputs(
init_case, params)
outp = field.numericalize(fake_input)
if init_case["include_lengths"]:
outp, _ = outp
if init_case["batch_first"]:
expected_shape = (
params["batch_size"], 1,
params["nfeats"], params["max_len"])
else:
expected_shape = (
params["max_len"], params["batch_size"],
1, params["nfeats"])
self.assertEqual(expected_shape, outp.shape,
init_case.__str__())
def test_process_shape(self):
# tests pad and numericalize integration
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
if not self.degenerate_case(init_case, params):
field = AudioSeqField(**init_case)
fake_input, lengths = self.pad_inputs(params)
outp = field.process(fake_input)
if init_case["include_lengths"]:
outp, _ = outp
if init_case["batch_first"]:
expected_shape = (
params["batch_size"], 1,
params["nfeats"], params["max_len"])
else:
expected_shape = (
params["max_len"], params["batch_size"],
1, params["nfeats"])
self.assertEqual(expected_shape, outp.shape,
init_case.__str__())
def test_process_lengths(self):
# tests pad and numericalize integration
for init_case, params in itertools.product(
self.INIT_CASES, self.PARAMS):
if not self.degenerate_case(init_case, params):
if init_case["include_lengths"]:
field = AudioSeqField(**init_case)
fake_input, lengths = self.pad_inputs(params)
lengths = torch.tensor(lengths, dtype=torch.int)
_, outp_lengths = field.process(fake_input)
self.assertTrue(outp_lengths.eq(lengths).all())
class TestAudioDataReader(unittest.TestCase):
# this test touches the file system, so it could be considered an
# integration test
_THIS_DIR = os.path.dirname(os.path.abspath(__file__))
_AUDIO_DATA_DIRNAME = "test_audio_data"
_AUDIO_DATA_DIR = os.path.join(_THIS_DIR, _AUDIO_DATA_DIRNAME)
_AUDIO_DATA_FMT = "test_noise_{:d}.wav"
_AUDIO_DATA_PATH_FMT = os.path.join(_AUDIO_DATA_DIR, _AUDIO_DATA_FMT)
_AUDIO_LIST_DIR = "test_audio_filenames"
# file to hold full paths to audio data
_AUDIO_LIST_PATHS_FNAME = "test_files.txt"
_AUDIO_LIST_PATHS_PATH = os.path.join(
_AUDIO_LIST_DIR, _AUDIO_LIST_PATHS_FNAME)
# file to hold audio paths relative to _AUDIO_DATA_DIR (i.e. file names)
_AUDIO_LIST_FNAMES_FNAME = "test_fnames.txt"
_AUDIO_LIST_FNAMES_PATH = os.path.join(
_AUDIO_LIST_DIR, _AUDIO_LIST_FNAMES_FNAME)
# it's ok if non-audio files co-exist with audio files in the data dir
_JUNK_FILE = os.path.join(
_AUDIO_DATA_DIR, "this_is_junk.txt")
_N_EXAMPLES = 20
_SAMPLE_RATE = 48000
_N_CHANNELS = 2
@classmethod
def setUpClass(cls):
if not os.path.exists(cls._AUDIO_DATA_DIR):
os.makedirs(cls._AUDIO_DATA_DIR)
if not os.path.exists(cls._AUDIO_LIST_DIR):
os.makedirs(cls._AUDIO_LIST_DIR)
with open(cls._JUNK_FILE, "w") as f:
f.write("this is some garbage\nShould have no impact.")
with open(cls._AUDIO_LIST_PATHS_PATH, "w") as f_list_fnames, \
open(cls._AUDIO_LIST_FNAMES_PATH, "w") as f_list_paths:
lengths = torch.randint(int(.5e5), int(1.5e6), (cls._N_EXAMPLES,))
for i in range(cls._N_EXAMPLES):
# dividing gets the noise in [-1, 1]
white_noise = torch.randn((cls._N_CHANNELS, lengths[i])) / 10
f_path = cls._AUDIO_DATA_PATH_FMT.format(i)
torchaudio.save(f_path, white_noise, cls._SAMPLE_RATE)
f_name_short = cls._AUDIO_DATA_FMT.format(i)
f_list_fnames.write(f_name_short + "\n")
f_list_paths.write(f_path + "\n")
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls._AUDIO_DATA_DIR)
shutil.rmtree(cls._AUDIO_LIST_DIR)
def test_read_from_dir_and_data_file_containing_filenames(self):
rdr = AudioDataReader(self._SAMPLE_RATE, window="hamming",
window_size=0.02, window_stride=0.01)
i = 0 # initialize since there's a sanity check on i
for i, aud in enumerate(rdr.read(
self._AUDIO_LIST_FNAMES_PATH, "src", self._AUDIO_DATA_DIR)):
self.assertEqual(aud["src"].shape[0], 481)
self.assertEqual(aud["src_path"],
self._AUDIO_DATA_PATH_FMT.format(i))
self.assertGreater(i, 0, "No audio data was read.")
def test_read_from_dir_and_data_file_containing_paths(self):
rdr = AudioDataReader(self._SAMPLE_RATE, window="hamming",
window_size=0.02, window_stride=0.01)
i = 0 # initialize since there's a sanity check on i
for i, aud in enumerate(rdr.read(
self._AUDIO_LIST_PATHS_PATH, "src", self._AUDIO_DATA_DIR)):
self.assertEqual(aud["src"].shape[0], 481)
self.assertEqual(aud["src_path"],
self._AUDIO_DATA_FMT.format(i))
self.assertGreater(i, 0, "No audio data was read.")
| 9,704 | 41.565789 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_structured_attention.py
|
import unittest
from onmt.modules.structured_attention import MatrixTree
import torch
class TestStructuredAttention(unittest.TestCase):
def test_matrix_tree_marg_pdfs_sum_to_1(self):
dtree = MatrixTree()
q = torch.rand(1, 5, 5)
marg = dtree.forward(q)
self.assertTrue(
marg.sum(1).allclose(torch.tensor(1.0)))
| 361 | 24.857143 | 56 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_embeddings.py
|
import unittest
from onmt.modules.embeddings import Embeddings
import itertools
from copy import deepcopy
import torch
from onmt.tests.utils_for_tests import product_dict
class TestEmbeddings(unittest.TestCase):
INIT_CASES = list(product_dict(
word_vec_size=[172],
word_vocab_size=[319],
word_padding_idx=[17],
position_encoding=[False, True],
feat_merge=["first", "concat", "sum", "mlp"],
feat_vec_exponent=[-1, 1.1, 0.7],
feat_vec_size=[0, 200],
feat_padding_idx=[[], [29], [0, 1]],
feat_vocab_sizes=[[], [39], [401, 39]],
dropout=[0, 0.5],
fix_word_vecs=[False, True]
))
PARAMS = list(product_dict(
batch_size=[1, 14],
max_seq_len=[23]
))
@classmethod
def case_is_degenerate(cls, case):
no_feats = len(case["feat_vocab_sizes"]) == 0
if case["feat_merge"] != "first" and no_feats:
return True
if case["feat_merge"] == "first" and not no_feats:
return True
if case["feat_merge"] == "concat" and case["feat_vec_exponent"] != -1:
return True
if no_feats and case["feat_vec_exponent"] != -1:
return True
if len(case["feat_vocab_sizes"]) != len(case["feat_padding_idx"]):
return True
if case["feat_vec_size"] == 0 and case["feat_vec_exponent"] <= 0:
return True
if case["feat_merge"] == "sum":
if case["feat_vec_exponent"] != -1:
return True
if case["feat_vec_size"] != 0:
return True
if case["feat_vec_size"] != 0 and case["feat_vec_exponent"] != -1:
return True
return False
@classmethod
def cases(cls):
for case in cls.INIT_CASES:
if not cls.case_is_degenerate(case):
yield case
@classmethod
def dummy_inputs(cls, params, init_case):
max_seq_len = params["max_seq_len"]
batch_size = params["batch_size"]
fv_sizes = init_case["feat_vocab_sizes"]
n_words = init_case["word_vocab_size"]
voc_sizes = [n_words] + fv_sizes
pad_idxs = [init_case["word_padding_idx"]] + \
init_case["feat_padding_idx"]
lengths = torch.randint(0, max_seq_len, (batch_size,))
lengths[0] = max_seq_len
inps = torch.empty((max_seq_len, batch_size, len(voc_sizes)),
dtype=torch.long)
for f, (voc_size, pad_idx) in enumerate(zip(voc_sizes, pad_idxs)):
for b, len_ in enumerate(lengths):
inps[:len_, b, f] = torch.randint(0, voc_size-1, (len_,))
inps[len_:, b, f] = pad_idx
return inps
@classmethod
def expected_shape(cls, params, init_case):
wvs = init_case["word_vec_size"]
fvs = init_case["feat_vec_size"]
nf = len(init_case["feat_vocab_sizes"])
size = wvs
if init_case["feat_merge"] not in {"sum", "mlp"}:
size += nf * fvs
return params["max_seq_len"], params["batch_size"], size
def test_embeddings_forward_shape(self):
for params, init_case in itertools.product(self.PARAMS, self.cases()):
emb = Embeddings(**init_case)
dummy_in = self.dummy_inputs(params, init_case)
res = emb(dummy_in)
expected_shape = self.expected_shape(params, init_case)
self.assertEqual(res.shape, expected_shape, init_case.__str__())
def test_embeddings_trainable_params(self):
for params, init_case in itertools.product(self.PARAMS,
self.cases()):
emb = Embeddings(**init_case)
trainable_params = {n: p for n, p in emb.named_parameters()
if p.requires_grad}
# first check there's nothing unexpectedly not trainable
for key in emb.state_dict():
if key not in trainable_params:
if key.endswith("emb_luts.0.weight") and \
init_case["fix_word_vecs"]:
# ok: word embeddings shouldn't be trainable
# if word vecs are fixed
continue
if key.endswith(".pe.pe"):
# ok: positional encodings shouldn't be trainable
assert init_case["position_encoding"]
continue
else:
self.fail("Param {:s} is unexpectedly not "
"trainable.".format(key))
# then check nothing unexpectedly trainable
if init_case["fix_word_vecs"]:
self.assertFalse(
any(trainable_param.endswith("emb_luts.0.weight")
for trainable_param in trainable_params),
"Word embedding is trainable but word vecs are fixed.")
if init_case["position_encoding"]:
self.assertFalse(
any(trainable_p.endswith(".pe.pe")
for trainable_p in trainable_params),
"Positional encoding is trainable.")
def test_embeddings_trainable_params_update(self):
for params, init_case in itertools.product(self.PARAMS, self.cases()):
emb = Embeddings(**init_case)
trainable_params = {n: p for n, p in emb.named_parameters()
if p.requires_grad}
if len(trainable_params) > 0:
old_weights = deepcopy(trainable_params)
dummy_in = self.dummy_inputs(params, init_case)
res = emb(dummy_in)
pretend_loss = res.sum()
pretend_loss.backward()
dummy_optim = torch.optim.SGD(trainable_params.values(), 1)
dummy_optim.step()
for param_name in old_weights.keys():
self.assertTrue(
trainable_params[param_name]
.ne(old_weights[param_name]).any(),
param_name + " " + init_case.__str__())
| 6,207 | 40.66443 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/tests/test_attention.py
|
"""
Here come the tests for attention types and their compatibility
"""
import unittest
import torch
from torch.autograd import Variable
import onmt
class TestAttention(unittest.TestCase):
def test_masked_global_attention(self):
source_lengths = torch.IntTensor([7, 3, 5, 2])
# illegal_weights_mask = torch.ByteTensor([
# [0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 1, 1, 1, 1],
# [0, 0, 0, 0, 0, 1, 1],
# [0, 0, 1, 1, 1, 1, 1]])
batch_size = source_lengths.size(0)
dim = 20
memory_bank = Variable(torch.randn(batch_size,
source_lengths.max(), dim))
hidden = Variable(torch.randn(batch_size, dim))
attn = onmt.modules.GlobalAttention(dim)
_, alignments = attn(hidden, memory_bank,
memory_lengths=source_lengths)
# TODO: fix for pytorch 0.3
# illegal_weights = alignments.masked_select(illegal_weights_mask)
# self.assertEqual(0.0, illegal_weights.data.sum())
| 1,072 | 28 | 74 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/encoders/hierarchical_transformer.py
|
from onmt.modules.self_attention import MultiHeadSelfAttention
from onmt.encoders.encoder import EncoderBase
from onmt.utils.misc import nwise, aeq, sequence_mask
import torch, copy
import onmt
class ContainsNaN(Exception):
pass
def _check_for_nan(tensor, msg=''):
if (tensor!=tensor).any():
raise ContainsNaN(msg)
class FeedForward(torch.nn.Module):
def __init__(self, input_size, hidden_size, dropout):
super().__init__()
self.linear1 = torch.nn.Linear(input_size, hidden_size)
self.linear2 = torch.nn.Linear(hidden_size, input_size)
self.dropout = torch.nn.Dropout(dropout)
self.norm = torch.nn.LayerNorm(input_size)
def forward(self, src):
ret = self.linear1(self.norm(src))
ret = self.linear2(self.dropout(torch.nn.functional.relu(ret)))
return src + self.dropout(ret) # residual connetion
def update_dropout(self, dropout):
self.dropout.p = dropout
class TransformerEncoderLayer(torch.nn.Module):
r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000–6010.
Users may modify or implement in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model, nhead)
"""
def __init__(self, input_size, heads, dim_feedforward=2048, glu_depth=-1, dropout=0.1):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiHeadSelfAttention(input_size, heads,
dropout=dropout,
glu_depth=glu_depth)
self.norm = torch.nn.LayerNorm(input_size, dim_feedforward, dropout)
self.dropout = torch.nn.Dropout(dropout)
self.feedforward = FeedForward(input_size, dim_feedforward, dropout)
def forward(self, src, src_mask=None):
"""Pass the input through the layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
"""
src = src + self.dropout(self.self_attn(self.norm(src), attn_mask=src_mask)[0])
return self.feedforward(src)
def update_dropout(self, dropout):
self.feedforward.update_dropout(dropout)
self.dropout.p = dropout
class TransformerEncoder(torch.nn.Module):
"""TransformerEncoder is a stack of N transformer encoder layers
It is heavily inspired by pytorch's.
Args:
encoder_layer: an instance of the TransformerEncoderLayer class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
"""
def __init__(self, hidden_size, heads=8, num_layers=6, glu_depth=-1,
dim_feedforward=2048, dropout=0.1):
super().__init__()
self.layers = torch.nn.ModuleList([
TransformerEncoderLayer(input_size=hidden_size,
heads=heads,
dim_feedforward=dim_feedforward,
glu_depth=glu_depth,
dropout=dropout)
for _ in range(num_layers)
])
self.final_norm = torch.nn.LayerNorm(hidden_size)
def forward(self, src, mask=None):
r"""Pass the input through the all layers in turn.
Args:
src: the sequence to encode (required).
src_mask: the mask for the src sequence (optional).
"""
for encoder_layer in self.layers:
src = encoder_layer(src, mask)
return self.final_norm(src)
def update_dropout(self, dropout):
for layer in self.layers: layer.update_dropout(dropout)
def block_eye(n, size):
"""
Create a block_diagonal matrix of n blocks, where each block
is torch.eye(size)
"""
m1 = torch.ones(n, size, 1, size)
m2 = torch.eye(n).view(n, 1, n, 1)
return (m1*m2).view(n*size, n*size).to(torch.uint8)
def build_pad_mask(source, ent_size, pad_idx):
"""
[seq_len, n_ents, ent_size]
To be used in attention mechanism in decoder
"""
mask = source[:, :, 0]
mask = (mask.transpose(0, 1)
.squeeze()
.contiguous()
.view(source.size(1), -1, ent_size)
.eq(pad_idx))
mask[:, :, 0] = 1 # we also mask the <ent> token
return mask
def build_chunk_mask(lengths, ent_size):
"""
[bsz, n_ents, n_ents]
Filled with -inf where self-attention shouldn't attend, a zeros elsewhere.
"""
ones = lengths // ent_size
ones = sequence_mask(ones).unsqueeze(1).repeat(1, ones.max(), 1).to(lengths.device)
mask = torch.full(ones.shape, float('-inf')).to(lengths.device)
mask.masked_fill_(ones, 0)
return mask
class HierarchicalTransformerEncoder(EncoderBase):
"""
Two encoders, one on the unit level and one on the chunk level
"""
def __init__(self, embeddings, units_layers=2, chunks_layers=2,
units_heads=2, chunks_heads=2, dim_feedforward=1000,
units_glu_depth=-1, chunks_glu_depth=-1, dropout=.5):
super().__init__()
self.embeddings = embeddings
self.ent_size = onmt.ENT_SIZE
self.unit_encoder = TransformerEncoder(hidden_size=embeddings.embedding_size,
heads=units_heads,
num_layers=units_layers,
dim_feedforward=dim_feedforward,
glu_depth=units_glu_depth,
dropout=dropout)
self.chunk_encoder = TransformerEncoder(hidden_size=embeddings.embedding_size,
heads=chunks_heads,
num_layers=chunks_layers,
dim_feedforward=dim_feedforward,
glu_depth=chunks_glu_depth,
dropout=dropout)
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
dropout = opt.dropout[0] if type(opt.dropout) is list else opt.dropout
if isinstance(opt.enc_layers, int) and opt.enc_layers > 0:
print('opt.enc_layers is specified, over-riding units_layers and chunks_layers')
opt.units_layers = opt.enc_layers
opt.chunks_layers = opt.enc_layers
if isinstance(opt.heads, int) and opt.enc_layers > 0:
print('opt.heads is specified, over-riding units_heads and chunks_heads')
opt.units_heads = opt.heads
opt.chunks_heads = opt.heads
if isinstance(opt.glu_depth, int) and opt.glu_depth > 0:
print('opt.glu_depth is specified, over-riding units_glu_depth and chunks_glu_depth')
opt.units_glu_depth = opt.glu_depth
opt.chunks_glu_depth = opt.glu_depth
return cls(
embeddings=embeddings,
units_layers=opt.units_layers,
chunks_layers=opt.chunks_layers,
units_heads=opt.units_heads,
chunks_heads=opt.chunks_heads,
dim_feedforward=opt.transformer_ff,
units_glu_depth=opt.units_glu_depth,
chunks_glu_depth=opt.chunks_glu_depth,
dropout=dropout
)
def forward(self, src, lengths=None):
"""
See :func:`EncoderBase.forward()`
src (tensor) [seq_len, bs, 2]
2 <-- (value, type)
"""
self._check_args(src, lengths)
seq_len, bsz, _ = src.shape
n_ents = seq_len // self.ent_size
# sanity check
assert seq_len % n_ents == 0
assert seq_len == lengths.max()
# We build the masks for self attention and decoding
eye = block_eye(n_ents, self.ent_size).to(src.device)
self_attn_mask = torch.full((seq_len, seq_len), float('-inf')).to(src.device)
self_attn_mask.masked_fill_(eye.to(src.device), 0)
unit_mask = build_pad_mask(src, self.ent_size, self.embeddings.word_padding_idx).to(src.device)
chunk_mask = build_chunk_mask(lengths, self.ent_size).to(src.device)
# embs [seq_len, bs, hidden_size]
embs, pos_embs = self.embeddings(src)
_check_for_nan(embs, 'after embedding layer')
_check_for_nan(pos_embs, 'after embedding layer')
# units [seq_len, bs, hidden_size]
units = self.unit_encoder(embs, mask=self_attn_mask)
# chunks & units_tokens [n_units, bs, hidden_size]
units_tokens = units[range(0, seq_len, self.ent_size), :, :]
chunks = self.chunk_encoder(units_tokens, mask=chunk_mask)
# memory bank every thing we want to pass to the decoder
# all tensors should have dim(1) be the batch size
memory_bank = (
chunks,
units,
pos_embs,
unit_mask.transpose(0, 1),
chunk_mask[:, 0, :].unsqueeze(0).eq(float('-inf'))
)
# We average the units representation to give a final encoding
# and be inline with the onmt framework
encoder_final = chunks.mean(dim=0).unsqueeze(0)
# encoder_final = (encoder_final, encoder_final)
return encoder_final, memory_bank, lengths
def update_dropout(self, dropout):
self.unit_encoder.update_dropout(dropout)
self.chunk_encoder.update_dropout(dropout)
| 10,534 | 39.833333 | 103 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/encoders/mean_encoder.py
|
"""Define a minimal encoder."""
from onmt.encoders.encoder import EncoderBase
from onmt.utils.misc import sequence_mask
import torch
class MeanEncoder(EncoderBase):
"""A trivial non-recurrent encoder. Simply applies mean pooling.
Args:
num_layers (int): number of replicated layers
embeddings (onmt.modules.Embeddings): embedding module to use
"""
def __init__(self, num_layers, embeddings):
super(MeanEncoder, self).__init__()
self.num_layers = num_layers
self.embeddings = embeddings
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.enc_layers,
embeddings)
def forward(self, src, lengths=None):
"""See :func:`EncoderBase.forward()`"""
self._check_args(src, lengths)
emb = self.embeddings(src)
_, batch, emb_dim = emb.size()
if lengths is not None:
# we avoid padding while mean pooling
mask = sequence_mask(lengths).float()
mask = mask / lengths.unsqueeze(1).float()
mean = torch.bmm(mask.unsqueeze(1), emb.transpose(0, 1)).squeeze(1)
else:
mean = emb.mean(0)
mean = mean.expand(self.num_layers, batch, emb_dim)
memory_bank = emb
encoder_final = (mean, mean)
return encoder_final, memory_bank, lengths
| 1,404 | 29.543478 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/encoders/image_encoder.py
|
"""Image Encoder."""
import torch.nn as nn
import torch.nn.functional as F
import torch
from onmt.encoders.encoder import EncoderBase
class ImageEncoder(EncoderBase):
"""A simple encoder CNN -> RNN for image src.
Args:
num_layers (int): number of encoder layers.
bidirectional (bool): bidirectional encoder.
rnn_size (int): size of hidden states of the rnn.
dropout (float): dropout probablity.
"""
def __init__(self, num_layers, bidirectional, rnn_size, dropout,
image_chanel_size=3):
super(ImageEncoder, self).__init__()
self.num_layers = num_layers
self.num_directions = 2 if bidirectional else 1
self.hidden_size = rnn_size
self.layer1 = nn.Conv2d(image_chanel_size, 64, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer2 = nn.Conv2d(64, 128, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer3 = nn.Conv2d(128, 256, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer4 = nn.Conv2d(256, 256, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer5 = nn.Conv2d(256, 512, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer6 = nn.Conv2d(512, 512, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.batch_norm1 = nn.BatchNorm2d(256)
self.batch_norm2 = nn.BatchNorm2d(512)
self.batch_norm3 = nn.BatchNorm2d(512)
src_size = 512
dropout = dropout[0] if type(dropout) is list else dropout
self.rnn = nn.LSTM(src_size, int(rnn_size / self.num_directions),
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
self.pos_lut = nn.Embedding(1000, src_size)
@classmethod
def from_opt(cls, opt, embeddings=None):
"""Alternate constructor."""
if embeddings is not None:
raise ValueError("Cannot use embeddings with ImageEncoder.")
# why is the model_opt.__dict__ check necessary?
if "image_channel_size" not in opt.__dict__:
image_channel_size = 3
else:
image_channel_size = opt.image_channel_size
return cls(
opt.enc_layers,
opt.brnn,
opt.enc_rnn_size,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
image_channel_size
)
def load_pretrained_vectors(self, opt):
"""Pass in needed options only when modify function definition."""
pass
def forward(self, src, lengths=None):
"""See :func:`onmt.encoders.encoder.EncoderBase.forward()`"""
batch_size = src.size(0)
# (batch_size, 64, imgH, imgW)
# layer 1
src = F.relu(self.layer1(src[:, :, :, :] - 0.5), True)
# (batch_size, 64, imgH/2, imgW/2)
src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 128, imgH/2, imgW/2)
# layer 2
src = F.relu(self.layer2(src), True)
# (batch_size, 128, imgH/2/2, imgW/2/2)
src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer 3
# batch norm 1
src = F.relu(self.batch_norm1(self.layer3(src)), True)
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer4
src = F.relu(self.layer4(src), True)
# (batch_size, 256, imgH/2/2/2, imgW/2/2)
src = F.max_pool2d(src, kernel_size=(1, 2), stride=(1, 2))
# (batch_size, 512, imgH/2/2/2, imgW/2/2)
# layer 5
# batch norm 2
src = F.relu(self.batch_norm2(self.layer5(src)), True)
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
src = F.max_pool2d(src, kernel_size=(2, 1), stride=(2, 1))
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
src = F.relu(self.batch_norm3(self.layer6(src)), True)
# # (batch_size, 512, H, W)
all_outputs = []
for row in range(src.size(2)):
inp = src[:, :, row, :].transpose(0, 2) \
.transpose(1, 2)
row_vec = torch.Tensor(batch_size).type_as(inp.data) \
.long().fill_(row)
pos_emb = self.pos_lut(row_vec)
with_pos = torch.cat(
(pos_emb.view(1, pos_emb.size(0), pos_emb.size(1)), inp), 0)
outputs, hidden_t = self.rnn(with_pos)
all_outputs.append(outputs)
out = torch.cat(all_outputs, 0)
return hidden_t, out, lengths
def update_dropout(self, dropout):
self.rnn.dropout = dropout
| 4,839 | 35.666667 | 76 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/encoders/rnn_encoder.py
|
"""Define RNN-based encoders."""
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from onmt.encoders.encoder import EncoderBase
from onmt.utils.rnn_factory import rnn_factory
class RNNEncoder(EncoderBase):
""" A generic recurrent neural network encoder.
Args:
rnn_type (str):
style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]
bidirectional (bool) : use a bidirectional RNN
num_layers (int) : number of stacked layers
hidden_size (int) : hidden size of each layer
dropout (float) : dropout value for :class:`torch.nn.Dropout`
embeddings (onmt.modules.Embeddings): embedding module to use
"""
def __init__(self, rnn_type, bidirectional, num_layers,
hidden_size, dropout=0.0, embeddings=None,
use_bridge=False):
super(RNNEncoder, self).__init__()
assert embeddings is not None
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.embeddings = embeddings
self.rnn, self.no_pack_padded_seq = \
rnn_factory(rnn_type,
input_size=embeddings.embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
# Initialize the bridge layer
self.use_bridge = use_bridge
if self.use_bridge:
self._initialize_bridge(rnn_type,
hidden_size,
num_layers)
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.rnn_type,
opt.brnn,
opt.enc_layers,
opt.enc_rnn_size,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
embeddings,
opt.bridge)
def forward(self, src, lengths=None):
"""See :func:`EncoderBase.forward()`"""
self._check_args(src, lengths)
emb = self.embeddings(src)
# s_len, batch, emb_dim = emb.size()
packed_emb = emb
if lengths is not None and not self.no_pack_padded_seq:
# Lengths data is wrapped inside a Tensor.
lengths_list = lengths.view(-1).tolist()
packed_emb = pack(emb, lengths_list)
memory_bank, encoder_final = self.rnn(packed_emb)
if lengths is not None and not self.no_pack_padded_seq:
memory_bank = unpack(memory_bank)[0]
if self.use_bridge:
encoder_final = self._bridge(encoder_final)
return encoder_final, memory_bank, lengths
def _initialize_bridge(self, rnn_type,
hidden_size,
num_layers):
# LSTM has hidden and cell state, other only one
number_of_states = 2 if rnn_type == "LSTM" else 1
# Total number of states
self.total_hidden_dim = hidden_size * num_layers
# Build a linear layer for each
self.bridge = nn.ModuleList([nn.Linear(self.total_hidden_dim,
self.total_hidden_dim,
bias=True)
for _ in range(number_of_states)])
def _bridge(self, hidden):
"""Forward hidden state through bridge."""
def bottle_hidden(linear, states):
"""
Transform from 3D to 2D, apply linear and return initial size
"""
size = states.size()
result = linear(states.view(-1, self.total_hidden_dim))
return F.relu(result).view(size)
if isinstance(hidden, tuple): # LSTM
outs = tuple([bottle_hidden(layer, hidden[ix])
for ix, layer in enumerate(self.bridge)])
else:
outs = bottle_hidden(self.bridge[0], hidden)
return outs
def update_dropout(self, dropout):
self.rnn.dropout = dropout
| 4,274 | 34.92437 | 73 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/encoders/audio_encoder.py
|
"""Audio encoder"""
import math
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from onmt.utils.rnn_factory import rnn_factory
from onmt.encoders.encoder import EncoderBase
class AudioEncoder(EncoderBase):
"""A simple encoder CNN -> RNN for audio input.
Args:
rnn_type (str): Type of RNN (e.g. GRU, LSTM, etc).
enc_layers (int): Number of encoder layers.
dec_layers (int): Number of decoder layers.
brnn (bool): Bidirectional encoder.
enc_rnn_size (int): Size of hidden states of the rnn.
dec_rnn_size (int): Size of the decoder hidden states.
enc_pooling (str): A comma separated list either of length 1
or of length ``enc_layers`` specifying the pooling amount.
dropout (float): dropout probablity.
sample_rate (float): input spec
window_size (int): input spec
"""
def __init__(self, rnn_type, enc_layers, dec_layers, brnn,
enc_rnn_size, dec_rnn_size, enc_pooling, dropout,
sample_rate, window_size):
super(AudioEncoder, self).__init__()
self.enc_layers = enc_layers
self.rnn_type = rnn_type
self.dec_layers = dec_layers
num_directions = 2 if brnn else 1
self.num_directions = num_directions
assert enc_rnn_size % num_directions == 0
enc_rnn_size_real = enc_rnn_size // num_directions
assert dec_rnn_size % num_directions == 0
self.dec_rnn_size = dec_rnn_size
dec_rnn_size_real = dec_rnn_size // num_directions
self.dec_rnn_size_real = dec_rnn_size_real
self.dec_rnn_size = dec_rnn_size
input_size = int(math.floor((sample_rate * window_size) / 2) + 1)
enc_pooling = enc_pooling.split(',')
assert len(enc_pooling) == enc_layers or len(enc_pooling) == 1
if len(enc_pooling) == 1:
enc_pooling = enc_pooling * enc_layers
enc_pooling = [int(p) for p in enc_pooling]
self.enc_pooling = enc_pooling
if type(dropout) is not list:
dropout = [dropout]
if max(dropout) > 0:
self.dropout = nn.Dropout(dropout[0])
else:
self.dropout = None
self.W = nn.Linear(enc_rnn_size, dec_rnn_size, bias=False)
self.batchnorm_0 = nn.BatchNorm1d(enc_rnn_size, affine=True)
self.rnn_0, self.no_pack_padded_seq = \
rnn_factory(rnn_type,
input_size=input_size,
hidden_size=enc_rnn_size_real,
num_layers=1,
dropout=dropout[0],
bidirectional=brnn)
self.pool_0 = nn.MaxPool1d(enc_pooling[0])
for l in range(enc_layers - 1):
batchnorm = nn.BatchNorm1d(enc_rnn_size, affine=True)
rnn, _ = \
rnn_factory(rnn_type,
input_size=enc_rnn_size,
hidden_size=enc_rnn_size_real,
num_layers=1,
dropout=dropout[0],
bidirectional=brnn)
setattr(self, 'rnn_%d' % (l + 1), rnn)
setattr(self, 'pool_%d' % (l + 1),
nn.MaxPool1d(enc_pooling[l + 1]))
setattr(self, 'batchnorm_%d' % (l + 1), batchnorm)
@classmethod
def from_opt(cls, opt, embeddings=None):
"""Alternate constructor."""
if embeddings is not None:
raise ValueError("Cannot use embeddings with AudioEncoder.")
return cls(
opt.rnn_type,
opt.enc_layers,
opt.dec_layers,
opt.brnn,
opt.enc_rnn_size,
opt.dec_rnn_size,
opt.audio_enc_pooling,
opt.dropout,
opt.sample_rate,
opt.window_size)
def forward(self, src, lengths=None):
"""See :func:`onmt.encoders.encoder.EncoderBase.forward()`"""
batch_size, _, nfft, t = src.size()
src = src.transpose(0, 1).transpose(0, 3).contiguous() \
.view(t, batch_size, nfft)
orig_lengths = lengths
lengths = lengths.view(-1).tolist()
for l in range(self.enc_layers):
rnn = getattr(self, 'rnn_%d' % l)
pool = getattr(self, 'pool_%d' % l)
batchnorm = getattr(self, 'batchnorm_%d' % l)
stride = self.enc_pooling[l]
packed_emb = pack(src, lengths)
memory_bank, tmp = rnn(packed_emb)
memory_bank = unpack(memory_bank)[0]
t, _, _ = memory_bank.size()
memory_bank = memory_bank.transpose(0, 2)
memory_bank = pool(memory_bank)
lengths = [int(math.floor((length - stride) / stride + 1))
for length in lengths]
memory_bank = memory_bank.transpose(0, 2)
src = memory_bank
t, _, num_feat = src.size()
src = batchnorm(src.contiguous().view(-1, num_feat))
src = src.view(t, -1, num_feat)
if self.dropout and l + 1 != self.enc_layers:
src = self.dropout(src)
memory_bank = memory_bank.contiguous().view(-1, memory_bank.size(2))
memory_bank = self.W(memory_bank).view(-1, batch_size,
self.dec_rnn_size)
state = memory_bank.new_full((self.dec_layers * self.num_directions,
batch_size, self.dec_rnn_size_real), 0)
if self.rnn_type == 'LSTM':
# The encoder hidden is (layers*directions) x batch x dim.
encoder_final = (state, state)
else:
encoder_final = state
return encoder_final, memory_bank, orig_lengths.new_tensor(lengths)
def update_dropout(self, dropout):
self.dropout.p = dropout
for i in range(self.enc_layers - 1):
getattr(self, 'rnn_%d' % i).dropout = dropout
| 6,055 | 40.197279 | 77 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/encoders/encoder.py
|
"""Base class for encoders and generic multi encoders."""
import torch.nn as nn
from onmt.utils.misc import aeq
class EncoderBase(nn.Module):
"""
Base encoder class. Specifies the interface used by different encoder types
and required by :class:`onmt.Models.NMTModel`.
.. mermaid::
graph BT
A[Input]
subgraph RNN
C[Pos 1]
D[Pos 2]
E[Pos N]
end
F[Memory_Bank]
G[Final]
A-->C
A-->D
A-->E
C-->F
D-->F
E-->F
E-->G
"""
@classmethod
def from_opt(cls, opt, embeddings=None):
raise NotImplementedError
def _check_args(self, src, lengths=None, hidden=None):
n_batch = src.size(1)
if lengths is not None:
n_batch_, = lengths.size()
aeq(n_batch, n_batch_)
def forward(self, src, lengths=None):
"""
Args:
src (LongTensor):
padded sequences of sparse indices ``(src_len, batch, nfeat)``
lengths (LongTensor): length of each sequence ``(batch,)``
Returns:
(FloatTensor, FloatTensor):
* final encoder state, used to initialize decoder
* memory bank for attention, ``(src_len, batch, hidden)``
"""
raise NotImplementedError
| 1,383 | 22.457627 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/encoders/transformer.py
|
"""
Implementation of "Attention is All You Need"
"""
import torch.nn as nn
from onmt.encoders.encoder import EncoderBase
from onmt.modules import MultiHeadedAttention
from onmt.modules.position_ffn import PositionwiseFeedForward
from onmt.utils.misc import sequence_mask
class TransformerEncoderLayer(nn.Module):
"""
A single layer of the transformer encoder.
Args:
d_model (int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
heads (int): the number of head for MultiHeadedAttention.
d_ff (int): the second-layer of the PositionwiseFeedForward.
dropout (float): dropout probability(0-1.0).
"""
def __init__(self, d_model, heads, d_ff, dropout, attention_dropout,
max_relative_positions=0):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(
heads, d_model, dropout=attention_dropout,
max_relative_positions=max_relative_positions)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, inputs, mask):
"""
Args:
inputs (FloatTensor): ``(batch_size, src_len, model_dim)``
mask (LongTensor): ``(batch_size, 1, src_len)``
Returns:
(FloatTensor):
* outputs ``(batch_size, src_len, model_dim)``
"""
input_norm = self.layer_norm(inputs)
context, _ = self.self_attn(input_norm, input_norm, input_norm,
mask=mask, attn_type="self")
out = self.dropout(context) + inputs
return self.feed_forward(out)
def update_dropout(self, dropout, attention_dropout):
self.self_attn.update_dropout(attention_dropout)
self.feed_forward.update_dropout(dropout)
self.dropout.p = dropout
class TransformerEncoder(EncoderBase):
"""The Transformer encoder from "Attention is All You Need"
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`
.. mermaid::
graph BT
A[input]
B[multi-head self-attn]
C[feed forward]
O[output]
A --> B
B --> C
C --> O
Args:
num_layers (int): number of encoder layers
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
dropout (float): dropout parameters
embeddings (onmt.modules.Embeddings):
embeddings to use, should have positional encodings
Returns:
(torch.FloatTensor, torch.FloatTensor):
* embeddings ``(src_len, batch_size, model_dim)``
* memory_bank ``(src_len, batch_size, model_dim)``
"""
def __init__(self, num_layers, d_model, heads, d_ff, dropout,
attention_dropout, embeddings, max_relative_positions):
super(TransformerEncoder, self).__init__()
self.embeddings = embeddings
self.transformer = nn.ModuleList(
[TransformerEncoderLayer(
d_model, heads, d_ff, dropout, attention_dropout,
max_relative_positions=max_relative_positions)
for i in range(num_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.enc_layers,
opt.enc_rnn_size,
opt.heads,
opt.transformer_ff,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
opt.attention_dropout[0] if type(opt.attention_dropout)
is list else opt.attention_dropout,
embeddings,
opt.max_relative_positions)
def forward(self, src, lengths=None):
"""See :func:`EncoderBase.forward()`"""
self._check_args(src, lengths)
emb = self.embeddings(src)
out = emb.transpose(0, 1).contiguous()
mask = ~sequence_mask(lengths).unsqueeze(1)
# Run the forward pass of every layer of the tranformer.
for layer in self.transformer:
out = layer(out, mask)
out = self.layer_norm(out)
return emb, out.transpose(0, 1).contiguous(), lengths
def update_dropout(self, dropout, attention_dropout):
self.embeddings.update_dropout(dropout)
for layer in self.transformer:
layer.update_dropout(dropout, attention_dropout)
| 4,661 | 33.279412 | 75 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/encoders/__init__.py
|
"""Module defining encoders."""
from onmt.encoders.encoder import EncoderBase
from onmt.encoders.transformer import TransformerEncoder
from onmt.encoders.rnn_encoder import RNNEncoder
from onmt.encoders.cnn_encoder import CNNEncoder
from onmt.encoders.mean_encoder import MeanEncoder
from onmt.encoders.audio_encoder import AudioEncoder
from onmt.encoders.image_encoder import ImageEncoder
from onmt.encoders.hierarchical_transformer import HierarchicalTransformerEncoder
str2enc = {"rnn": RNNEncoder, "brnn": RNNEncoder, "cnn": CNNEncoder,
"transformer": TransformerEncoder, "img": ImageEncoder,
"audio": AudioEncoder, "mean": MeanEncoder, "htransformer": HierarchicalTransformerEncoder}
__all__ = ["EncoderBase", "TransformerEncoder", "RNNEncoder", "CNNEncoder",
"MeanEncoder", "str2enc", "HierarchicalTransformerEncoder"]
| 861 | 46.888889 | 102 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/encoders/cnn_encoder.py
|
"""
Implementation of "Convolutional Sequence to Sequence Learning"
"""
import torch.nn as nn
from onmt.encoders.encoder import EncoderBase
from onmt.utils.cnn_factory import shape_transform, StackedCNN
SCALE_WEIGHT = 0.5 ** 0.5
class CNNEncoder(EncoderBase):
"""Encoder based on "Convolutional Sequence to Sequence Learning"
:cite:`DBLP:journals/corr/GehringAGYD17`.
"""
def __init__(self, num_layers, hidden_size,
cnn_kernel_width, dropout, embeddings):
super(CNNEncoder, self).__init__()
self.embeddings = embeddings
input_size = embeddings.embedding_size
self.linear = nn.Linear(input_size, hidden_size)
self.cnn = StackedCNN(num_layers, hidden_size,
cnn_kernel_width, dropout)
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.enc_layers,
opt.enc_rnn_size,
opt.cnn_kernel_width,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
embeddings)
def forward(self, input, lengths=None, hidden=None):
"""See :class:`onmt.modules.EncoderBase.forward()`"""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
# s_len, batch, emb_dim = emb.size()
emb = emb.transpose(0, 1).contiguous()
emb_reshape = emb.view(emb.size(0) * emb.size(1), -1)
emb_remap = self.linear(emb_reshape)
emb_remap = emb_remap.view(emb.size(0), emb.size(1), -1)
emb_remap = shape_transform(emb_remap)
out = self.cnn(emb_remap)
return emb_remap.squeeze(3).transpose(0, 1).contiguous(), \
out.squeeze(3).transpose(0, 1).contiguous(), lengths
def update_dropout(self, dropout):
self.cnn.dropout.p = dropout
| 1,860 | 32.232143 | 73 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/translate/translation_server.py
|
#!/usr/bin/env python
"""REST Translation server."""
from __future__ import print_function
import codecs
import sys
import os
import time
import json
import threading
import re
import traceback
import importlib
import torch
import onmt.opts
from onmt.utils.logging import init_logger
from onmt.utils.misc import set_random_seed
from onmt.utils.misc import check_model_config
from onmt.utils.alignment import to_word_align
from onmt.utils.parse import ArgumentParser
from onmt.translate.translator import build_translator
def critical(func):
"""Decorator for critical section (mutually exclusive code)"""
def wrapper(server_model, *args, **kwargs):
if sys.version_info[0] == 3:
if not server_model.running_lock.acquire(True, 120):
raise ServerModelError("Model %d running lock timeout"
% server_model.model_id)
else:
# semaphore doesn't have a timeout arg in Python 2.7
server_model.running_lock.acquire(True)
try:
o = func(server_model, *args, **kwargs)
except (Exception, RuntimeError):
server_model.running_lock.release()
raise
server_model.running_lock.release()
return o
return wrapper
class Timer:
def __init__(self, start=False):
self.stime = -1
self.prev = -1
self.times = {}
if start:
self.start()
def start(self):
self.stime = time.time()
self.prev = self.stime
self.times = {}
def tick(self, name=None, tot=False):
t = time.time()
if not tot:
elapsed = t - self.prev
else:
elapsed = t - self.stime
self.prev = t
if name is not None:
self.times[name] = elapsed
return elapsed
class ServerModelError(Exception):
pass
class TranslationServer(object):
def __init__(self):
self.models = {}
self.next_id = 0
def start(self, config_file):
"""Read the config file and pre-/load the models."""
self.config_file = config_file
with open(self.config_file) as f:
self.confs = json.load(f)
self.models_root = self.confs.get('models_root', './available_models')
for i, conf in enumerate(self.confs["models"]):
if "models" not in conf:
if "model" in conf:
# backwards compatibility for confs
conf["models"] = [conf["model"]]
else:
raise ValueError("""Incorrect config file: missing 'models'
parameter for model #%d""" % i)
check_model_config(conf, self.models_root)
kwargs = {'timeout': conf.get('timeout', None),
'load': conf.get('load', None),
'preprocess_opt': conf.get('preprocess', None),
'tokenizer_opt': conf.get('tokenizer', None),
'postprocess_opt': conf.get('postprocess', None),
'on_timeout': conf.get('on_timeout', None),
'model_root': conf.get('model_root', self.models_root)
}
kwargs = {k: v for (k, v) in kwargs.items() if v is not None}
model_id = conf.get("id", None)
opt = conf["opt"]
opt["models"] = conf["models"]
self.preload_model(opt, model_id=model_id, **kwargs)
def clone_model(self, model_id, opt, timeout=-1):
"""Clone a model `model_id`.
Different options may be passed. If `opt` is None, it will use the
same set of options
"""
if model_id in self.models:
if opt is None:
opt = self.models[model_id].user_opt
opt["models"] = self.models[model_id].opt.models
return self.load_model(opt, timeout)
else:
raise ServerModelError("No such model '%s'" % str(model_id))
def load_model(self, opt, model_id=None, **model_kwargs):
"""Load a model given a set of options
"""
model_id = self.preload_model(opt, model_id=model_id, **model_kwargs)
load_time = self.models[model_id].load_time
return model_id, load_time
def preload_model(self, opt, model_id=None, **model_kwargs):
"""Preloading the model: updating internal datastructure
It will effectively load the model if `load` is set
"""
if model_id is not None:
if model_id in self.models.keys():
raise ValueError("Model ID %d already exists" % model_id)
else:
model_id = self.next_id
while model_id in self.models.keys():
model_id += 1
self.next_id = model_id + 1
print("Pre-loading model %d" % model_id)
model = ServerModel(opt, model_id, **model_kwargs)
self.models[model_id] = model
return model_id
def run(self, inputs):
"""Translate `inputs`
We keep the same format as the Lua version i.e.
``[{"id": model_id, "src": "sequence to translate"},{ ...}]``
We use inputs[0]["id"] as the model id
"""
model_id = inputs[0].get("id", 0)
if model_id in self.models and self.models[model_id] is not None:
return self.models[model_id].run(inputs)
else:
print("Error No such model '%s'" % str(model_id))
raise ServerModelError("No such model '%s'" % str(model_id))
def unload_model(self, model_id):
"""Manually unload a model.
It will free the memory and cancel the timer
"""
if model_id in self.models and self.models[model_id] is not None:
self.models[model_id].unload()
else:
raise ServerModelError("No such model '%s'" % str(model_id))
def list_models(self):
"""Return the list of available models
"""
models = []
for _, model in self.models.items():
models += [model.to_dict()]
return models
class ServerModel(object):
"""Wrap a model with server functionality.
Args:
opt (dict): Options for the Translator
model_id (int): Model ID
preprocess_opt (list): Options for preprocess processus or None
(extend for CJK)
tokenizer_opt (dict): Options for the tokenizer or None
postprocess_opt (list): Options for postprocess processus or None
(extend for CJK)
load (bool): whether to load the model during :func:`__init__()`
timeout (int): Seconds before running :func:`do_timeout()`
Negative values means no timeout
on_timeout (str): Options are ["to_cpu", "unload"]. Set what to do on
timeout (see :func:`do_timeout()`.)
model_root (str): Path to the model directory
it must contain the model and tokenizer file
"""
def __init__(self, opt, model_id, preprocess_opt=None, tokenizer_opt=None,
postprocess_opt=None, load=False, timeout=-1,
on_timeout="to_cpu", model_root="./"):
self.model_root = model_root
self.opt = self.parse_opt(opt)
self.model_id = model_id
self.preprocess_opt = preprocess_opt
self.tokenizer_opt = tokenizer_opt
self.postprocess_opt = postprocess_opt
self.timeout = timeout
self.on_timeout = on_timeout
self.unload_timer = None
self.user_opt = opt
self.tokenizer = None
if len(self.opt.log_file) > 0:
log_file = os.path.join(model_root, self.opt.log_file)
else:
log_file = None
self.logger = init_logger(log_file=log_file,
log_file_level=self.opt.log_file_level)
self.loading_lock = threading.Event()
self.loading_lock.set()
self.running_lock = threading.Semaphore(value=1)
set_random_seed(self.opt.seed, self.opt.cuda)
if load:
self.load()
def parse_opt(self, opt):
"""Parse the option set passed by the user using `onmt.opts`
Args:
opt (dict): Options passed by the user
Returns:
opt (argparse.Namespace): full set of options for the Translator
"""
prec_argv = sys.argv
sys.argv = sys.argv[:1]
parser = ArgumentParser()
onmt.opts.translate_opts(parser)
models = opt['models']
if not isinstance(models, (list, tuple)):
models = [models]
opt['models'] = [os.path.join(self.model_root, model)
for model in models]
opt['src'] = "dummy_src"
for (k, v) in opt.items():
if k == 'models':
sys.argv += ['-model']
sys.argv += [str(model) for model in v]
elif type(v) == bool:
sys.argv += ['-%s' % k]
else:
sys.argv += ['-%s' % k, str(v)]
opt = parser.parse_args()
ArgumentParser.validate_translate_opts(opt)
opt.cuda = opt.gpu > -1
sys.argv = prec_argv
return opt
@property
def loaded(self):
return hasattr(self, 'translator')
def load(self):
self.loading_lock.clear()
timer = Timer()
self.logger.info("Loading model %d" % self.model_id)
timer.start()
try:
self.translator = build_translator(self.opt,
report_score=False,
out_file=codecs.open(
os.devnull, "w", "utf-8"))
except RuntimeError as e:
raise ServerModelError("Runtime Error: %s" % str(e))
timer.tick("model_loading")
if self.preprocess_opt is not None:
self.logger.info("Loading preprocessor")
self.preprocessor = []
for function_path in self.preprocess_opt:
function = get_function_by_path(function_path)
self.preprocessor.append(function)
if self.tokenizer_opt is not None:
self.logger.info("Loading tokenizer")
if "type" not in self.tokenizer_opt:
raise ValueError(
"Missing mandatory tokenizer option 'type'")
if self.tokenizer_opt['type'] == 'sentencepiece':
if "model" not in self.tokenizer_opt:
raise ValueError(
"Missing mandatory tokenizer option 'model'")
import sentencepiece as spm
sp = spm.SentencePieceProcessor()
model_path = os.path.join(self.model_root,
self.tokenizer_opt['model'])
sp.Load(model_path)
self.tokenizer = sp
elif self.tokenizer_opt['type'] == 'pyonmttok':
if "params" not in self.tokenizer_opt:
raise ValueError(
"Missing mandatory tokenizer option 'params'")
import pyonmttok
if self.tokenizer_opt["mode"] is not None:
mode = self.tokenizer_opt["mode"]
else:
mode = None
# load can be called multiple times: modify copy
tokenizer_params = dict(self.tokenizer_opt["params"])
for key, value in self.tokenizer_opt["params"].items():
if key.endswith("path"):
tokenizer_params[key] = os.path.join(
self.model_root, value)
tokenizer = pyonmttok.Tokenizer(mode,
**tokenizer_params)
self.tokenizer = tokenizer
else:
raise ValueError("Invalid value for tokenizer type")
if self.postprocess_opt is not None:
self.logger.info("Loading postprocessor")
self.postprocessor = []
for function_path in self.postprocess_opt:
function = get_function_by_path(function_path)
self.postprocessor.append(function)
self.load_time = timer.tick()
self.reset_unload_timer()
self.loading_lock.set()
@critical
def run(self, inputs):
"""Translate `inputs` using this model
Args:
inputs (List[dict[str, str]]): [{"src": "..."},{"src": ...}]
Returns:
result (list): translations
times (dict): containing times
"""
self.stop_unload_timer()
timer = Timer()
timer.start()
self.logger.info("Running translation using %d" % self.model_id)
if not self.loading_lock.is_set():
self.logger.info(
"Model #%d is being loaded by another thread, waiting"
% self.model_id)
if not self.loading_lock.wait(timeout=30):
raise ServerModelError("Model %d loading timeout"
% self.model_id)
else:
if not self.loaded:
self.load()
timer.tick(name="load")
elif self.opt.cuda:
self.to_gpu()
timer.tick(name="to_gpu")
texts = []
head_spaces = []
tail_spaces = []
sslength = []
for i, inp in enumerate(inputs):
src = inp['src']
if src.strip() == "":
head_spaces.append(src)
texts.append("")
tail_spaces.append("")
else:
whitespaces_before, whitespaces_after = "", ""
match_before = re.search(r'^\s+', src)
match_after = re.search(r'\s+$', src)
if match_before is not None:
whitespaces_before = match_before.group(0)
if match_after is not None:
whitespaces_after = match_after.group(0)
head_spaces.append(whitespaces_before)
preprocessed_src = self.maybe_preprocess(src.strip())
tok = self.maybe_tokenize(preprocessed_src)
texts.append(tok)
sslength.append(len(tok.split()))
tail_spaces.append(whitespaces_after)
empty_indices = [i for i, x in enumerate(texts) if x == ""]
texts_to_translate = [x for x in texts if x != ""]
scores = []
predictions = []
if len(texts_to_translate) > 0:
try:
scores, predictions = self.translator.translate(
texts_to_translate,
batch_size=len(texts_to_translate)
if self.opt.batch_size == 0
else self.opt.batch_size)
except (RuntimeError, Exception) as e:
err = "Error: %s" % str(e)
self.logger.error(err)
self.logger.error("repr(text_to_translate): "
+ repr(texts_to_translate))
self.logger.error("model: #%s" % self.model_id)
self.logger.error("model opt: " + str(self.opt.__dict__))
self.logger.error(traceback.format_exc())
raise ServerModelError(err)
timer.tick(name="translation")
self.logger.info("""Using model #%d\t%d inputs
\ttranslation time: %f""" % (self.model_id, len(texts),
timer.times['translation']))
self.reset_unload_timer()
# NOTE: translator returns lists of `n_best` list
def flatten_list(_list): return sum(_list, [])
tiled_texts = [t for t in texts_to_translate
for _ in range(self.opt.n_best)]
results = flatten_list(predictions)
scores = [score_tensor.item()
for score_tensor in flatten_list(scores)]
results = [self.maybe_detokenize_with_align(result, src)
for result, src in zip(results, tiled_texts)]
aligns = [align for _, align in results]
results = [self.maybe_postprocess(seq) for seq, _ in results]
# build back results with empty texts
for i in empty_indices:
j = i * self.opt.n_best
results = results[:j] + [""] * self.opt.n_best + results[j:]
aligns = aligns[:j] + [None] * self.opt.n_best + aligns[j:]
scores = scores[:j] + [0] * self.opt.n_best + scores[j:]
head_spaces = [h for h in head_spaces for i in range(self.opt.n_best)]
tail_spaces = [h for h in tail_spaces for i in range(self.opt.n_best)]
results = ["".join(items)
for items in zip(head_spaces, results, tail_spaces)]
self.logger.info("Translation Results: %d", len(results))
return results, scores, self.opt.n_best, timer.times, aligns
def do_timeout(self):
"""Timeout function that frees GPU memory.
Moves the model to CPU or unloads it; depending on
attr`self.on_timemout` value
"""
if self.on_timeout == "unload":
self.logger.info("Timeout: unloading model %d" % self.model_id)
self.unload()
if self.on_timeout == "to_cpu":
self.logger.info("Timeout: sending model %d to CPU"
% self.model_id)
self.to_cpu()
@critical
def unload(self):
self.logger.info("Unloading model %d" % self.model_id)
del self.translator
if self.opt.cuda:
torch.cuda.empty_cache()
self.unload_timer = None
def stop_unload_timer(self):
if self.unload_timer is not None:
self.unload_timer.cancel()
def reset_unload_timer(self):
if self.timeout < 0:
return
self.stop_unload_timer()
self.unload_timer = threading.Timer(self.timeout, self.do_timeout)
self.unload_timer.start()
def to_dict(self):
hide_opt = ["models", "src"]
d = {"model_id": self.model_id,
"opt": {k: self.user_opt[k] for k in self.user_opt.keys()
if k not in hide_opt},
"models": self.user_opt["models"],
"loaded": self.loaded,
"timeout": self.timeout,
}
if self.tokenizer_opt is not None:
d["tokenizer"] = self.tokenizer_opt
return d
@critical
def to_cpu(self):
"""Move the model to CPU and clear CUDA cache."""
self.translator.model.cpu()
if self.opt.cuda:
torch.cuda.empty_cache()
def to_gpu(self):
"""Move the model to GPU."""
torch.cuda.set_device(self.opt.gpu)
self.translator.model.cuda()
def maybe_preprocess(self, sequence):
"""Preprocess the sequence (or not)
"""
if self.preprocess_opt is not None:
return self.preprocess(sequence)
return sequence
def preprocess(self, sequence):
"""Preprocess a single sequence.
Args:
sequence (str): The sequence to preprocess.
Returns:
sequence (str): The preprocessed sequence.
"""
if self.preprocessor is None:
raise ValueError("No preprocessor loaded")
for function in self.preprocessor:
sequence = function(sequence)
return sequence
def maybe_tokenize(self, sequence):
"""Tokenize the sequence (or not).
Same args/returns as `tokenize`
"""
if self.tokenizer_opt is not None:
return self.tokenize(sequence)
return sequence
def tokenize(self, sequence):
"""Tokenize a single sequence.
Args:
sequence (str): The sequence to tokenize.
Returns:
tok (str): The tokenized sequence.
"""
if self.tokenizer is None:
raise ValueError("No tokenizer loaded")
if self.tokenizer_opt["type"] == "sentencepiece":
tok = self.tokenizer.EncodeAsPieces(sequence)
tok = " ".join(tok)
elif self.tokenizer_opt["type"] == "pyonmttok":
tok, _ = self.tokenizer.tokenize(sequence)
tok = " ".join(tok)
return tok
@property
def tokenizer_marker(self):
marker = None
tokenizer_type = self.tokenizer_opt.get('type', None)
if tokenizer_type == "pyonmttok":
params = self.tokenizer_opt.get('params', None)
if params is not None:
if params.get("joiner_annotate", None) is not None:
marker = 'joiner'
elif params.get("spacer_annotate", None) is not None:
marker = 'spacer'
elif tokenizer_type == "sentencepiece":
marker = 'spacer'
return marker
def maybe_detokenize_with_align(self, sequence, src):
"""De-tokenize (or not) the sequence (with alignment).
Args:
sequence (str): The sequence to detokenize, possible with
alignment seperate by ` ||| `.
Returns:
sequence (str): The detokenized sequence.
align (str): The alignment correspand to detokenized src/tgt
sorted or None if no alignment in output.
"""
align = None
if self.opt.report_align:
# output contain alignment
sequence, align = sequence.split(' ||| ')
align = self.maybe_convert_align(src, sequence, align)
sequence = self.maybe_detokenize(sequence)
return (sequence, align)
def maybe_detokenize(self, sequence):
"""De-tokenize the sequence (or not)
Same args/returns as :func:`tokenize()`
"""
if self.tokenizer_opt is not None and ''.join(sequence.split()) != '':
return self.detokenize(sequence)
return sequence
def detokenize(self, sequence):
"""Detokenize a single sequence
Same args/returns as :func:`tokenize()`
"""
if self.tokenizer is None:
raise ValueError("No tokenizer loaded")
if self.tokenizer_opt["type"] == "sentencepiece":
detok = self.tokenizer.DecodePieces(sequence.split())
elif self.tokenizer_opt["type"] == "pyonmttok":
detok = self.tokenizer.detokenize(sequence.split())
return detok
def maybe_convert_align(self, src, tgt, align):
"""Convert alignment to match detokenized src/tgt (or not).
Args:
src (str): The tokenized source sequence.
tgt (str): The tokenized target sequence.
align (str): The alignment correspand to src/tgt pair.
Returns:
align (str): The alignment correspand to detokenized src/tgt.
"""
if self.tokenizer_marker is not None and ''.join(tgt.split()) != '':
return to_word_align(src, tgt, align, mode=self.tokenizer_marker)
return align
def maybe_postprocess(self, sequence):
"""Postprocess the sequence (or not)
"""
if self.postprocess_opt is not None:
return self.postprocess(sequence)
return sequence
def postprocess(self, sequence):
"""Preprocess a single sequence.
Args:
sequence (str): The sequence to process.
Returns:
sequence (str): The postprocessed sequence.
"""
if self.postprocessor is None:
raise ValueError("No postprocessor loaded")
for function in self.postprocessor:
sequence = function(sequence)
return sequence
def get_function_by_path(path, args=[], kwargs={}):
module_name = ".".join(path.split(".")[:-1])
function_name = path.split(".")[-1]
try:
module = importlib.import_module(module_name)
except ValueError as e:
print("Cannot import module '%s'" % module_name)
raise e
function = getattr(module, function_name)
return function
| 24,269 | 33.72103 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/translate/decode_strategy.py
|
import torch
class DecodeStrategy(object):
"""Base class for generation strategies.
Args:
pad (int): Magic integer in output vocab.
bos (int): Magic integer in output vocab.
eos (int): Magic integer in output vocab.
batch_size (int): Current batch size.
parallel_paths (int): Decoding strategies like beam search
use parallel paths. Each batch is repeated ``parallel_paths``
times in relevant state tensors.
min_length (int): Shortest acceptable generation, not counting
begin-of-sentence or end-of-sentence.
max_length (int): Longest acceptable sequence, not counting
begin-of-sentence (presumably there has been no EOS
yet if max_length is used as a cutoff).
block_ngram_repeat (int): Block beams where
``block_ngram_repeat``-grams repeat.
exclusion_tokens (set[int]): If a gram contains any of these
tokens, it may repeat.
return_attention (bool): Whether to work with attention too. If this
is true, it is assumed that the decoder is attentional.
Attributes:
pad (int): See above.
bos (int): See above.
eos (int): See above.
predictions (list[list[LongTensor]]): For each batch, holds a
list of beam prediction sequences.
scores (list[list[FloatTensor]]): For each batch, holds a
list of scores.
attention (list[list[FloatTensor or list[]]]): For each
batch, holds a list of attention sequence tensors
(or empty lists) having shape ``(step, inp_seq_len)`` where
``inp_seq_len`` is the length of the sample (not the max
length of all inp seqs).
alive_seq (LongTensor): Shape ``(B x parallel_paths, step)``.
This sequence grows in the ``step`` axis on each call to
:func:`advance()`.
is_finished (ByteTensor or NoneType): Shape
``(B, parallel_paths)``. Initialized to ``None``.
alive_attn (FloatTensor or NoneType): If tensor, shape is
``(step, B x parallel_paths, inp_seq_len)``, where ``inp_seq_len``
is the (max) length of the input sequence.
min_length (int): See above.
max_length (int): See above.
block_ngram_repeat (int): See above.
exclusion_tokens (set[int]): See above.
return_attention (bool): See above.
done (bool): See above.
"""
def __init__(self, pad, bos, eos, dot, batch_size, parallel_paths,
min_length, block_ngram_repeat, exclusion_tokens,
return_attention, max_length):
# magic indices
self.pad = pad
self.bos = bos
self.eos = eos
self.dot = dot
self.batch_size = batch_size
self.parallel_paths = parallel_paths
# result caching
self.predictions = [[] for _ in range(batch_size)]
self.scores = [[] for _ in range(batch_size)]
self.attention = [[] for _ in range(batch_size)]
self.alive_attn = None
self.min_length = min_length
self.max_length = max_length
self.block_ngram_repeat = block_ngram_repeat
n_paths = batch_size * parallel_paths
self.forbidden_tokens = [dict() for _ in range(n_paths)]
self.exclusion_tokens = exclusion_tokens
self.return_attention = return_attention
self.done = False
def initialize(self, memory_bank, src_lengths, src_map=None, device=None):
"""DecodeStrategy subclasses should override :func:`initialize()`.
`initialize` should be called before all actions.
used to prepare necessary ingredients for decode.
"""
if device is None:
device = torch.device('cpu')
self.alive_seq = torch.full(
[self.batch_size * self.parallel_paths, 1], self.bos,
dtype=torch.long, device=device)
self.is_finished = torch.zeros(
[self.batch_size, self.parallel_paths],
dtype=torch.uint8, device=device)
return None, memory_bank, src_lengths, src_map
def __len__(self):
return self.alive_seq.shape[1]
def ensure_min_length(self, log_probs):
"""
We set the proba of generating <eos> to 0 if the sequence
is not long enough.
We make sure that the sequence cannot end until the last
token generated is self.dot.
we get the idx of all rows where the last token is not self.dot
we set self.eos to -1e20 for those idx
"""
if len(self) <= self.min_length:
log_probs[:, self.eos] = -1e20
if self.dot is not None:
idx = self.alive_seq[:, -1].ne(self.dot).nonzero().squeeze(1)
log_probs[idx, self.eos] = -1e20
def ensure_max_length(self):
# add one to account for BOS. Don't account for EOS because hitting
# this implies it hasn't been found.
if len(self) == self.max_length + 1:
self.is_finished.fill_(1)
def block_ngram_repeats(self, log_probs):
"""
We prevent the beam from going in any direction that would repeat any
ngram of size <block_ngram_repeat> more thant once.
The way we do it: we maintain a list of all ngrams of size
<block_ngram_repeat> that is updated each time the beam advances, and
manually put any token that would lead to a repeated ngram to 0.
This improves on the previous version's complexity:
- previous version's complexity: batch_size * beam_size * len(self)
- current version's complexity: batch_size * beam_size
This improves on the previous version's accuracy;
- Previous version blocks the whole beam, whereas here we only
block specific tokens.
- Before the translation would fail when all beams contained
repeated ngrams. This is sure to never happen here.
"""
# we don't block nothing if the user doesn't want it
if self.block_ngram_repeat <= 0:
return
# we can't block nothing beam's too short
if len(self) < self.block_ngram_repeat:
return
n = self.block_ngram_repeat - 1
for path_idx in range(self.alive_seq.shape[0]):
# we check paths one by one
current_ngram = tuple(self.alive_seq[path_idx, -n:].tolist())
forbidden_tokens = self.forbidden_tokens[path_idx].get(
current_ngram, None)
if forbidden_tokens is not None:
log_probs[path_idx, list(forbidden_tokens)] = -10e20
def maybe_update_forbidden_tokens(self):
"""We complete and reorder the list of forbidden_tokens"""
# we don't forbid nothing if the user doesn't want it
if self.block_ngram_repeat <= 0:
return
# we can't forbid nothing if beam's too short
if len(self) < self.block_ngram_repeat:
return
n = self.block_ngram_repeat
forbidden_tokens = list()
for path_idx, seq in zip(self.select_indices, self.alive_seq):
# Reordering forbidden_tokens following beam selection
# We rebuild a dict to ensure we get the value and not the pointer
# Please note here that there is a mistake in the code and we should
# use deepcopy instead of dict. However, at the time of writting the
# paper I did not catch it and therefore I leave it as is here.
forbidden_tokens.append(
dict(self.forbidden_tokens[path_idx]))
# Grabing the newly selected tokens and associated ngram
current_ngram = tuple(seq[-n:].tolist())
# skip the blocking if any token in current_ngram is excluded
if set(current_ngram) & self.exclusion_tokens:
continue
forbidden_tokens[-1].setdefault(current_ngram[:-1], set())
forbidden_tokens[-1][current_ngram[:-1]].add(current_ngram[-1])
self.forbidden_tokens = forbidden_tokens
def advance(self, log_probs, attn):
"""DecodeStrategy subclasses should override :func:`advance()`.
Advance is used to update ``self.alive_seq``, ``self.is_finished``,
and, when appropriate, ``self.alive_attn``.
"""
raise NotImplementedError()
def update_finished(self):
"""DecodeStrategy subclasses should override :func:`update_finished()`.
``update_finished`` is used to update ``self.predictions``,
``self.scores``, and other "output" attributes.
"""
raise NotImplementedError()
| 8,761 | 38.647059 | 80 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/translate/translation.py
|
""" Translation main class """
from __future__ import unicode_literals, print_function
import torch
from onmt.inputters.text_dataset import TextMultiField
from onmt.utils.alignment import build_align_pharaoh
class TranslationBuilder(object):
"""
Build a word-based translation from the batch output
of translator and the underlying dictionaries.
Replacement based on "Addressing the Rare Word
Problem in Neural Machine Translation" :cite:`Luong2015b`
Args:
data (onmt.inputters.Dataset): Data.
fields (List[Tuple[str, torchtext.data.Field]]): data fields
n_best (int): number of translations produced
replace_unk (bool): replace unknown words using attention
has_tgt (bool): will the batch have gold targets
"""
def __init__(self, data, fields, n_best=1, replace_unk=False,
has_tgt=False, phrase_table=""):
self.data = data
self.fields = fields
self._has_text_src = isinstance(
dict(self.fields)["src"], TextMultiField)
self.n_best = n_best
self.replace_unk = replace_unk
self.phrase_table = phrase_table
self.has_tgt = has_tgt
def _build_target_tokens(self, src, src_vocab, src_raw, pred, attn):
"""Be carefull that this function has been hardcoded to translate RotoWire
and therefore splits token at '_'. Remove Line49 to have a generic function"""
tgt_field = dict(self.fields)["tgt"].base_field
vocab = tgt_field.vocab
tokens = []
for tok in pred:
if tok < len(vocab):
to_extend = vocab.itos[tok]
else:
to_extend = src_vocab.itos[tok - len(vocab)]
to_extend = to_extend.split('_')
#to_extend = [to_extend]
tokens.extend(to_extend)
if tokens[-1] == tgt_field.eos_token:
tokens = tokens[:-1]
break
if self.replace_unk and attn is not None and src is not None:
for i in range(len(tokens)):
if tokens[i] == tgt_field.unk_token:
_, max_index = attn[i][:len(src_raw)].max(0)
tokens[i] = src_raw[max_index.item()]
if self.phrase_table != "":
with open(self.phrase_table, "r") as f:
for line in f:
if line.startswith(src_raw[max_index.item()]):
tokens[i] = line.split('|||')[1].strip()
return tokens
def from_batch(self, translation_batch):
batch = translation_batch["batch"]
assert(len(translation_batch["gold_score"]) ==
len(translation_batch["predictions"]))
batch_size = batch.batch_size
preds, pred_score, attn, align, gold_score, indices = list(zip(
*sorted(zip(translation_batch["predictions"],
translation_batch["scores"],
translation_batch["attention"],
translation_batch["alignment"],
translation_batch["gold_score"],
batch.indices.data),
key=lambda x: x[-1])))
if not any(align): # when align is a empty nested list
align = [None] * batch_size
# Sorting
inds, perm = torch.sort(batch.indices)
if self._has_text_src:
src = batch.src[0][:, :, 0].index_select(1, perm)
else:
src = None
tgt = batch.tgt[:, :, 0].index_select(1, perm) \
if self.has_tgt else None
translations = []
for b in range(batch_size):
if self._has_text_src:
src_vocab = self.data.src_vocabs[inds[b]] \
if self.data.src_vocabs else None
src_raw = self.data.examples[inds[b]].src[0]
else:
src_vocab = None
src_raw = None
pred_sents = [self._build_target_tokens(
src[:, b] if src is not None else None,
src_vocab, src_raw,
preds[b][n], attn[b][n])
for n in range(self.n_best)]
gold_sent = None
if tgt is not None:
gold_sent = self._build_target_tokens(
src[:, b] if src is not None else None,
src_vocab, src_raw,
tgt[1:, b] if tgt is not None else None, None)
translation = Translation(
src[:, b] if src is not None else None,
src_raw, pred_sents, attn[b], pred_score[b],
gold_sent, gold_score[b], align[b]
)
translations.append(translation)
return translations
class Translation(object):
"""Container for a translated sentence.
Attributes:
src (LongTensor): Source word IDs.
src_raw (List[str]): Raw source words.
pred_sents (List[List[str]]): Words from the n-best translations.
pred_scores (List[List[float]]): Log-probs of n-best translations.
attns (List[FloatTensor]) : Attention distribution for each
translation.
gold_sent (List[str]): Words from gold translation.
gold_score (List[float]): Log-prob of gold translation.
word_aligns (List[FloatTensor]): Words Alignment distribution for
each translation.
"""
__slots__ = ["src", "src_raw", "pred_sents", "attns", "pred_scores",
"gold_sent", "gold_score", "word_aligns"]
def __init__(self, src, src_raw, pred_sents,
attn, pred_scores, tgt_sent, gold_score, word_aligns):
self.src = src
self.src_raw = src_raw
self.pred_sents = pred_sents
self.attns = attn
self.pred_scores = pred_scores
self.gold_sent = tgt_sent
self.gold_score = gold_score
self.word_aligns = word_aligns
def log(self, sent_number):
"""
Log translation.
"""
msg = ['\nSENT {}: {}\n'.format(sent_number, self.src_raw)]
best_pred = self.pred_sents[0]
best_score = self.pred_scores[0]
pred_sent = ' '.join(best_pred)
msg.append('PRED {}: {}\n'.format(sent_number, pred_sent))
msg.append("PRED SCORE: {:.4f}\n".format(best_score))
if self.word_aligns is not None:
pred_align = self.word_aligns[0]
pred_align_pharaoh = build_align_pharaoh(pred_align)
pred_align_sent = ' '.join(pred_align_pharaoh)
msg.append("ALIGN: {}\n".format(pred_align_sent))
if self.gold_sent is not None:
tgt_sent = ' '.join(self.gold_sent)
msg.append('GOLD {}: {}\n'.format(sent_number, tgt_sent))
msg.append(("GOLD SCORE: {:.4f}\n".format(self.gold_score)))
if len(self.pred_sents) > 1:
msg.append('\nBEST HYP:\n')
for score, sent in zip(self.pred_scores, self.pred_sents):
msg.append("[{:.4f}] {}\n".format(score, sent))
return "".join(msg)
| 7,226 | 37.854839 | 86 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/translate/greedy_search.py
|
import torch
from onmt.translate.decode_strategy import DecodeStrategy
def sample_with_temperature(logits, sampling_temp, keep_topk):
"""Select next tokens randomly from the top k possible next tokens.
Samples from a categorical distribution over the ``keep_topk`` words using
the category probabilities ``logits / sampling_temp``.
Args:
logits (FloatTensor): Shaped ``(batch_size, vocab_size)``.
These can be logits (``(-inf, inf)``) or log-probs (``(-inf, 0]``).
(The distribution actually uses the log-probabilities
``logits - logits.logsumexp(-1)``, which equals the logits if
they are log-probabilities summing to 1.)
sampling_temp (float): Used to scale down logits. The higher the
value, the more likely it is that a non-max word will be
sampled.
keep_topk (int): This many words could potentially be chosen. The
other logits are set to have probability 0.
Returns:
(LongTensor, FloatTensor):
* topk_ids: Shaped ``(batch_size, 1)``. These are
the sampled word indices in the output vocab.
* topk_scores: Shaped ``(batch_size, 1)``. These
are essentially ``(logits / sampling_temp)[topk_ids]``.
"""
if sampling_temp == 0.0 or keep_topk == 1:
# For temp=0.0, take the argmax to avoid divide-by-zero errors.
# keep_topk=1 is also equivalent to argmax.
topk_scores, topk_ids = logits.topk(1, dim=-1)
if sampling_temp > 0:
topk_scores /= sampling_temp
else:
logits = torch.div(logits, sampling_temp)
if keep_topk > 0:
top_values, top_indices = torch.topk(logits, keep_topk, dim=1)
kth_best = top_values[:, -1].view([-1, 1])
kth_best = kth_best.repeat([1, logits.shape[1]]).float()
# Set all logits that are not in the top-k to -10000.
# This puts the probabilities close to 0.
ignore = torch.lt(logits, kth_best)
logits = logits.masked_fill(ignore, -10000)
dist = torch.distributions.Multinomial(
logits=logits, total_count=1)
topk_ids = torch.argmax(dist.sample(), dim=1, keepdim=True)
topk_scores = logits.gather(dim=1, index=topk_ids)
return topk_ids, topk_scores
class GreedySearch(DecodeStrategy):
"""Select next tokens randomly from the top k possible next tokens.
The ``scores`` attribute's lists are the score, after applying temperature,
of the final prediction (either EOS or the final token in the event
that ``max_length`` is reached)
Args:
pad (int): See base.
bos (int): See base.
eos (int): See base.
batch_size (int): See base.
min_length (int): See base.
max_length (int): See base.
block_ngram_repeat (int): See base.
exclusion_tokens (set[int]): See base.
return_attention (bool): See base.
max_length (int): See base.
sampling_temp (float): See
:func:`~onmt.translate.greedy_search.sample_with_temperature()`.
keep_topk (int): See
:func:`~onmt.translate.greedy_search.sample_with_temperature()`.
"""
def __init__(self, pad, bos, eos, batch_size, min_length,
block_ngram_repeat, exclusion_tokens, return_attention,
max_length, sampling_temp, keep_topk):
assert block_ngram_repeat == 0
super(GreedySearch, self).__init__(
pad, bos, eos, batch_size, 1, min_length, block_ngram_repeat,
exclusion_tokens, return_attention, max_length)
self.sampling_temp = sampling_temp
self.keep_topk = keep_topk
self.topk_scores = None
def initialize(self, memory_bank, src_lengths, src_map=None, device=None):
"""Initialize for decoding."""
fn_map_state = None
if isinstance(memory_bank, tuple):
mb_device = memory_bank[0].device
else:
mb_device = memory_bank.device
if device is None:
device = mb_device
self.memory_lengths = src_lengths
super(GreedySearch, self).initialize(
memory_bank, src_lengths, src_map, device)
self.select_indices = torch.arange(
self.batch_size, dtype=torch.long, device=device)
self.original_batch_idx = torch.arange(
self.batch_size, dtype=torch.long, device=device)
return fn_map_state, memory_bank, self.memory_lengths, src_map
@property
def current_predictions(self):
return self.alive_seq[:, -1]
@property
def batch_offset(self):
return self.select_indices
def advance(self, log_probs, attn):
"""Select next tokens randomly from the top k possible next tokens.
Args:
log_probs (FloatTensor): Shaped ``(batch_size, vocab_size)``.
These can be logits (``(-inf, inf)``) or log-probs
(``(-inf, 0]``). (The distribution actually uses the
log-probabilities ``logits - logits.logsumexp(-1)``,
which equals the logits if they are log-probabilities summing
to 1.)
attn (FloatTensor): Shaped ``(1, B, inp_seq_len)``.
"""
self.ensure_min_length(log_probs)
self.block_ngram_repeats(log_probs)
topk_ids, self.topk_scores = sample_with_temperature(
log_probs, self.sampling_temp, self.keep_topk)
self.is_finished = topk_ids.eq(self.eos)
self.alive_seq = torch.cat([self.alive_seq, topk_ids], -1)
if self.return_attention:
if self.alive_attn is None:
self.alive_attn = attn
else:
self.alive_attn = torch.cat([self.alive_attn, attn], 0)
self.ensure_max_length()
def update_finished(self):
"""Finalize scores and predictions."""
# shape: (sum(~ self.is_finished), 1)
finished_batches = self.is_finished.view(-1).nonzero()
for b in finished_batches.view(-1):
b_orig = self.original_batch_idx[b]
self.scores[b_orig].append(self.topk_scores[b, 0])
self.predictions[b_orig].append(self.alive_seq[b, 1:])
self.attention[b_orig].append(
self.alive_attn[:, b, :self.memory_lengths[b]]
if self.alive_attn is not None else [])
self.done = self.is_finished.all()
if self.done:
return
is_alive = ~self.is_finished.view(-1)
self.alive_seq = self.alive_seq[is_alive]
if self.alive_attn is not None:
self.alive_attn = self.alive_attn[:, is_alive]
self.select_indices = is_alive.nonzero().view(-1)
self.original_batch_idx = self.original_batch_idx[is_alive]
| 6,843 | 39.258824 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/translate/beam_search.py
|
import torch
from onmt.translate import penalties
from onmt.translate.decode_strategy import DecodeStrategy
from onmt.utils.misc import tile
import warnings
class BeamSearch(DecodeStrategy):
"""Generation beam search.
Note that the attributes list is not exhaustive. Rather, it highlights
tensors to document their shape. (Since the state variables' "batch"
size decreases as beams finish, we denote this axis with a B rather than
``batch_size``).
Args:
beam_size (int): Number of beams to use (see base ``parallel_paths``).
batch_size (int): See base.
pad (int): See base.
bos (int): See base.
eos (int): See base.
n_best (int): Don't stop until at least this many beams have
reached EOS.
global_scorer (onmt.translate.GNMTGlobalScorer): Scorer instance.
min_length (int): See base.
max_length (int): See base.
return_attention (bool): See base.
block_ngram_repeat (int): See base.
exclusion_tokens (set[int]): See base.
Attributes:
top_beam_finished (ByteTensor): Shape ``(B,)``.
_batch_offset (LongTensor): Shape ``(B,)``.
_beam_offset (LongTensor): Shape ``(batch_size x beam_size,)``.
alive_seq (LongTensor): See base.
topk_log_probs (FloatTensor): Shape ``(B x beam_size,)``. These
are the scores used for the topk operation.
memory_lengths (LongTensor): Lengths of encodings. Used for
masking attentions.
select_indices (LongTensor or NoneType): Shape
``(B x beam_size,)``. This is just a flat view of the
``_batch_index``.
topk_scores (FloatTensor): Shape
``(B, beam_size)``. These are the
scores a sequence will receive if it finishes.
topk_ids (LongTensor): Shape ``(B, beam_size)``. These are the
word indices of the topk predictions.
_batch_index (LongTensor): Shape ``(B, beam_size)``.
_prev_penalty (FloatTensor or NoneType): Shape
``(B, beam_size)``. Initialized to ``None``.
_coverage (FloatTensor or NoneType): Shape
``(1, B x beam_size, inp_seq_len)``.
hypotheses (list[list[Tuple[Tensor]]]): Contains a tuple
of score (float), sequence (long), and attention (float or None).
"""
def __init__(self, beam_size, batch_size, pad, bos, eos, dot, n_best,
global_scorer, min_length, max_length, return_attention,
block_ngram_repeat, exclusion_tokens,
stepwise_penalty, ratio):
super(BeamSearch, self).__init__(
pad, bos, eos, dot, batch_size, beam_size, min_length,
block_ngram_repeat, exclusion_tokens, return_attention,
max_length)
# beam parameters
self.global_scorer = global_scorer
self.beam_size = beam_size
self.n_best = n_best
self.ratio = ratio
# result caching
self.hypotheses = [[] for _ in range(batch_size)]
# beam state
self.top_beam_finished = torch.zeros([batch_size], dtype=torch.uint8)
# BoolTensor was introduced in pytorch 1.2
try:
self.top_beam_finished = self.top_beam_finished.bool()
except AttributeError:
pass
self._batch_offset = torch.arange(batch_size, dtype=torch.long)
self.select_indices = None
self.done = False
# "global state" of the old beam
self._prev_penalty = None
self._coverage = None
self._stepwise_cov_pen = (
stepwise_penalty and self.global_scorer.has_cov_pen)
self._vanilla_cov_pen = (
not stepwise_penalty and self.global_scorer.has_cov_pen)
self._cov_pen = self.global_scorer.has_cov_pen
def initialize(self, memory_bank, src_lengths, src_map=None, device=None):
"""Initialize for decoding.
Repeat src objects `beam_size` times.
"""
def fn_map_state(state, dim):
return tile(state, self.beam_size, dim=dim)
if isinstance(memory_bank, tuple):
memory_bank = tuple(tile(x, self.beam_size, dim=1)
for x in memory_bank)
mb_device = memory_bank[0].device
else:
memory_bank = tile(memory_bank, self.beam_size, dim=1)
mb_device = memory_bank.device
if src_map is not None:
src_map = tile(src_map, self.beam_size, dim=1)
if device is None:
device = mb_device
self.memory_lengths = tile(src_lengths, self.beam_size)
super(BeamSearch, self).initialize(
memory_bank, self.memory_lengths, src_map, device)
self.best_scores = torch.full(
[self.batch_size], -1e10, dtype=torch.float, device=device)
self._beam_offset = torch.arange(
0, self.batch_size * self.beam_size, step=self.beam_size,
dtype=torch.long, device=device)
self.topk_log_probs = torch.tensor(
[0.0] + [float("-inf")] * (self.beam_size - 1), device=device
).repeat(self.batch_size)
# buffers for the topk scores and 'backpointer'
self.topk_scores = torch.empty((self.batch_size, self.beam_size),
dtype=torch.float, device=device)
self.topk_ids = torch.empty((self.batch_size, self.beam_size),
dtype=torch.long, device=device)
self._batch_index = torch.empty([self.batch_size, self.beam_size],
dtype=torch.long, device=device)
return fn_map_state, memory_bank, self.memory_lengths, src_map
@property
def current_predictions(self):
return self.alive_seq[:, -1]
@property
def current_backptr(self):
# for testing
return self.select_indices.view(self.batch_size, self.beam_size)\
.fmod(self.beam_size)
@property
def batch_offset(self):
return self._batch_offset
def advance(self, log_probs, attn, attn_key):
vocab_size = log_probs.size(-1)
# using integer division to get an integer _B without casting
_B = log_probs.shape[0] // self.beam_size
if self._stepwise_cov_pen and self._prev_penalty is not None:
self.topk_log_probs += self._prev_penalty
self.topk_log_probs -= self.global_scorer.cov_penalty(
self._coverage + attn, self.global_scorer.beta).view(
_B, self.beam_size)
# force the output to be longer than self.min_length
step = len(self)
self.ensure_min_length(log_probs)
# Multiply probs by the beam probability.
log_probs += self.topk_log_probs.view(_B * self.beam_size, 1)
# if the sequence ends now, then the penalty is the current
# length + 1, to include the EOS token
length_penalty = self.global_scorer.length_penalty(
step + 1, alpha=self.global_scorer.alpha)
curr_scores = log_probs / length_penalty
# Avoid any direction that would repeat unwanted ngrams
self.block_ngram_repeats(curr_scores)
# Flatten probs into a list of possibilities.
curr_scores = curr_scores.reshape(_B, self.beam_size * vocab_size)
torch.topk(curr_scores, self.beam_size, dim=-1,
out=(self.topk_scores, self.topk_ids))
# Recover log probs.
# Length penalty is just a scalar. It doesn't matter if it's applied
# before or after the topk.
torch.mul(self.topk_scores, length_penalty, out=self.topk_log_probs)
# Resolve beam origin and map to batch index flat representation.
torch.div(self.topk_ids, vocab_size, out=self._batch_index)
self._batch_index += self._beam_offset[:_B].unsqueeze(1)
self.select_indices = self._batch_index.view(_B * self.beam_size)
self.topk_ids.fmod_(vocab_size) # resolve true word ids
# Append last prediction.
self.alive_seq = torch.cat(
[self.alive_seq.index_select(0, self.select_indices),
self.topk_ids.view(_B * self.beam_size, 1)], -1)
self.maybe_update_forbidden_tokens()
if self.return_attention or self._cov_pen:
assert attn_key is not None
# we reorder the current attention dict
current_attn = {key: value.index_select(1, self.select_indices)
for key, value in attn.items()}
if step == 1:
self.alive_attn = current_attn
# update global state (step == 1)
if self._cov_pen: # coverage penalty
assert attn_key is not None
self._prev_penalty = torch.zeros_like(self.topk_log_probs)
self._coverage = current_attn[attn_key]
else:
# we reorder all attentions from the start
self.alive_attn = {key: value.index_select(1, self.select_indices)
for key, value in self.alive_attn.items()}
# We cat previous attentions with current
self.alive_attn = {
key: torch.cat([self.alive_attn[key], current_attn[key]], 0)
for key in list(current_attn)
}
# update global state (step > 1)
if self._cov_pen:
self._coverage = self._coverage.index_select(
1, self.select_indices)
self._coverage += current_attn[key_attn]
self._prev_penalty = self.global_scorer.cov_penalty(
self._coverage, beta=self.global_scorer.beta).view(
_B, self.beam_size)
if self._vanilla_cov_pen:
# shape: (batch_size x beam_size, 1)
cov_penalty = self.global_scorer.cov_penalty(
self._coverage,
beta=self.global_scorer.beta)
self.topk_scores -= cov_penalty.view(_B, self.beam_size).float()
self.is_finished = self.topk_ids.eq(self.eos)
self.ensure_max_length()
def update_finished(self):
# Penalize beams that finished.
_B_old = self.topk_log_probs.shape[0]
step = self.alive_seq.shape[-1] # 1 greater than the step in advance
self.topk_log_probs.masked_fill_(self.is_finished, -1e10)
# on real data (newstest2017) with the pretrained transformer,
# it's faster to not move this back to the original device
self.is_finished = self.is_finished.to('cpu')
self.top_beam_finished |= self.is_finished[:, 0].eq(1)
predictions = self.alive_seq.view(_B_old, self.beam_size, step)
if self.alive_attn is None:
attention = None
else:
attention = {
key: tensor.view(step - 1, _B_old, self.beam_size, tensor.size(-1))
for key, tensor in self.alive_attn.items()
}
non_finished_batch = []
for i in range(self.is_finished.size(0)): # Batch level
b = self._batch_offset[i]
finished_hyp = self.is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
for j in finished_hyp: # Beam level: finished beam j in batch i
if self.ratio > 0:
s = self.topk_scores[i, j] / (step + 1)
if self.best_scores[b] < s:
self.best_scores[b] = s
self.hypotheses[b].append((
self.topk_scores[i, j],
predictions[i, j, 1:], # Ignore start_token.
{
key: tensor[:, i, j, :self._memory_lengths[i]]
for key, tensor in attention.items()
}
if attention is not None else None))
# End condition is the top beam finished and we can return
# n_best hypotheses.
if self.ratio > 0:
pred_len = self.memory_lengths[i] * self.ratio
finish_flag = ((self.topk_scores[i, 0] / pred_len)
<= self.best_scores[b]) or \
self.is_finished[i].all()
else:
finish_flag = self.top_beam_finished[i] != 0
if finish_flag and len(self.hypotheses[b]) >= self.n_best:
best_hyp = sorted(
self.hypotheses[b], key=lambda x: x[0], reverse=True)
for n, (score, pred, attn) in enumerate(best_hyp):
if n >= self.n_best:
break
self.scores[b].append(score)
self.predictions[b].append(pred) # ``(batch, n_best,)``
self.attention[b].append(
attn if attn is not None else [])
else:
non_finished_batch.append(i)
non_finished = torch.tensor(non_finished_batch)
# If all sentences are translated, no need to go further.
if len(non_finished) == 0:
self.done = True
return
_B_new = non_finished.shape[0]
# Remove finished batches for the next step.
self.top_beam_finished = self.top_beam_finished.index_select(
0, non_finished)
self._batch_offset = self._batch_offset.index_select(0, non_finished)
non_finished = non_finished.to(self.topk_ids.device)
self.topk_log_probs = self.topk_log_probs.index_select(0,
non_finished)
self._batch_index = self._batch_index.index_select(0, non_finished)
self.select_indices = self._batch_index.view(_B_new * self.beam_size)
self.alive_seq = predictions.index_select(0, non_finished) \
.view(-1, self.alive_seq.size(-1))
self.topk_scores = self.topk_scores.index_select(0, non_finished)
self.topk_ids = self.topk_ids.index_select(0, non_finished)
if self.alive_attn is not None:
self.alive_attn = {
key: tensor.index_select(1, non_finished) \
.view(step - 1, _B_new * self.beam_size, tensor.size(-1))
for key, tensor in attention.items()}
if self._cov_pen:
self._coverage = self._coverage \
.view(1, _B_old, self.beam_size, inp_seq_len) \
.index_select(1, non_finished) \
.view(1, _B_new * self.beam_size, inp_seq_len)
if self._stepwise_cov_pen:
self._prev_penalty = self._prev_penalty.index_select(
0, non_finished)
class GNMTGlobalScorer(object):
"""NMT re-ranking.
Args:
alpha (float): Length parameter.
beta (float): Coverage parameter.
length_penalty (str): Length penalty strategy.
coverage_penalty (str): Coverage penalty strategy.
Attributes:
alpha (float): See above.
beta (float): See above.
length_penalty (callable): See :class:`penalties.PenaltyBuilder`.
coverage_penalty (callable): See :class:`penalties.PenaltyBuilder`.
has_cov_pen (bool): See :class:`penalties.PenaltyBuilder`.
has_len_pen (bool): See :class:`penalties.PenaltyBuilder`.
"""
@classmethod
def from_opt(cls, opt):
return cls(
opt.alpha,
opt.beta,
opt.length_penalty,
opt.coverage_penalty)
def __init__(self, alpha, beta, length_penalty, coverage_penalty):
self._validate(alpha, beta, length_penalty, coverage_penalty)
self.alpha = alpha
self.beta = beta
penalty_builder = penalties.PenaltyBuilder(coverage_penalty,
length_penalty)
self.has_cov_pen = penalty_builder.has_cov_pen
# Term will be subtracted from probability
self.cov_penalty = penalty_builder.coverage_penalty
self.has_len_pen = penalty_builder.has_len_pen
# Probability will be divided by this
self.length_penalty = penalty_builder.length_penalty
@classmethod
def _validate(cls, alpha, beta, length_penalty, coverage_penalty):
# these warnings indicate that either the alpha/beta
# forces a penalty to be a no-op, or a penalty is a no-op but
# the alpha/beta would suggest otherwise.
if length_penalty is None or length_penalty == "none":
if alpha != 0:
warnings.warn("Non-default `alpha` with no length penalty. "
"`alpha` has no effect.")
else:
# using some length penalty
if length_penalty == "wu" and alpha == 0.:
warnings.warn("Using length penalty Wu with alpha==0 "
"is equivalent to using length penalty none.")
if coverage_penalty is None or coverage_penalty == "none":
if beta != 0:
warnings.warn("Non-default `beta` with no coverage penalty. "
"`beta` has no effect.")
else:
# using some coverage penalty
if beta == 0.:
warnings.warn("Non-default coverage penalty with beta==0 "
"is equivalent to using coverage penalty none.")
| 17,605 | 43.015 | 83 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/translate/process_zh.py
|
from pyhanlp import HanLP
from snownlp import SnowNLP
import pkuseg
# Chinese segmentation
def zh_segmentator(line):
return " ".join(pkuseg.pkuseg().cut(line))
# Chinese simplify -> Chinese traditional standard
def zh_traditional_standard(line):
return HanLP.convertToTraditionalChinese(line)
# Chinese simplify -> Chinese traditional (HongKong)
def zh_traditional_hk(line):
return HanLP.s2hk(line)
# Chinese simplify -> Chinese traditional (Taiwan)
def zh_traditional_tw(line):
return HanLP.s2tw(line)
# Chinese traditional -> Chinese simplify (v1)
def zh_simplify(line):
return HanLP.convertToSimplifiedChinese(line)
# Chinese traditional -> Chinese simplify (v2)
def zh_simplify_v2(line):
return SnowNLP(line).han
| 753 | 21.176471 | 52 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/translate/__init__.py
|
""" Modules for translation """
from onmt.translate.translator import Translator
from onmt.translate.translation import Translation, TranslationBuilder
from onmt.translate.beam_search import BeamSearch, GNMTGlobalScorer
from onmt.translate.decode_strategy import DecodeStrategy
from onmt.translate.greedy_search import GreedySearch
from onmt.translate.penalties import PenaltyBuilder
from onmt.translate.translation_server import TranslationServer, \
ServerModelError
__all__ = ['Translator', 'Translation', 'BeamSearch',
'GNMTGlobalScorer', 'TranslationBuilder',
'PenaltyBuilder', 'TranslationServer', 'ServerModelError',
"DecodeStrategy", "GreedySearch"]
| 695 | 45.4 | 70 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/translate/penalties.py
|
from __future__ import division
import torch
class PenaltyBuilder(object):
"""Returns the Length and Coverage Penalty function for Beam Search.
Args:
length_pen (str): option name of length pen
cov_pen (str): option name of cov pen
Attributes:
has_cov_pen (bool): Whether coverage penalty is None (applying it
is a no-op). Note that the converse isn't true. Setting beta
to 0 should force coverage length to be a no-op.
has_len_pen (bool): Whether length penalty is None (applying it
is a no-op). Note that the converse isn't true. Setting alpha
to 1 should force length penalty to be a no-op.
coverage_penalty (callable[[FloatTensor, float], FloatTensor]):
Calculates the coverage penalty.
length_penalty (callable[[int, float], float]): Calculates
the length penalty.
"""
def __init__(self, cov_pen, length_pen):
self.has_cov_pen = not self._pen_is_none(cov_pen)
self.coverage_penalty = self._coverage_penalty(cov_pen)
self.has_len_pen = not self._pen_is_none(length_pen)
self.length_penalty = self._length_penalty(length_pen)
@staticmethod
def _pen_is_none(pen):
return pen == "none" or pen is None
def _coverage_penalty(self, cov_pen):
if cov_pen == "wu":
return self.coverage_wu
elif cov_pen == "summary":
return self.coverage_summary
elif self._pen_is_none(cov_pen):
return self.coverage_none
else:
raise NotImplementedError("No '{:s}' coverage penalty.".format(
cov_pen))
def _length_penalty(self, length_pen):
if length_pen == "wu":
return self.length_wu
elif length_pen == "avg":
return self.length_average
elif self._pen_is_none(length_pen):
return self.length_none
else:
raise NotImplementedError("No '{:s}' length penalty.".format(
length_pen))
# Below are all the different penalty terms implemented so far.
# Subtract coverage penalty from topk log probs.
# Divide topk log probs by length penalty.
def coverage_wu(self, cov, beta=0.):
"""GNMT coverage re-ranking score.
See "Google's Neural Machine Translation System" :cite:`wu2016google`.
``cov`` is expected to be sized ``(*, seq_len)``, where ``*`` is
probably ``batch_size x beam_size`` but could be several
dimensions like ``(batch_size, beam_size)``. If ``cov`` is attention,
then the ``seq_len`` axis probably sums to (almost) 1.
"""
penalty = -torch.min(cov, cov.clone().fill_(1.0)).log().sum(-1)
return beta * penalty
def coverage_summary(self, cov, beta=0.):
"""Our summary penalty."""
penalty = torch.max(cov, cov.clone().fill_(1.0)).sum(-1)
penalty -= cov.size(-1)
return beta * penalty
def coverage_none(self, cov, beta=0.):
"""Returns zero as penalty"""
none = torch.zeros((1,), device=cov.device,
dtype=torch.float)
if cov.dim() == 3:
none = none.unsqueeze(0)
return none
def length_wu(self, cur_len, alpha=0.):
"""GNMT length re-ranking score.
See "Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
return ((5 + cur_len) / 6.0) ** alpha
def length_average(self, cur_len, alpha=0.):
"""Returns the current sequence length."""
return cur_len
def length_none(self, cur_len, alpha=0.):
"""Returns unmodified scores."""
return 1.0
| 3,710 | 35.029126 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/translate/translator.py
|
#!/usr/bin/env python
""" Translator Class and builder """
from __future__ import print_function
import codecs
import os
import time
import numpy as np
from itertools import count, zip_longest
import torch
import onmt.model_builder
import onmt.inputters as inputters
import onmt.decoders.ensemble
from onmt.translate.beam_search import BeamSearch
from onmt.translate.greedy_search import GreedySearch
from onmt.utils.misc import tile, set_random_seed, report_matrix
from onmt.utils.alignment import extract_alignment, build_align_pharaoh
from onmt.modules.copy_generator import collapse_copy_scores
def build_translator(opt, report_score=True, logger=None, out_file=None):
if out_file is None:
out_file = codecs.open(opt.output, 'w+', 'utf-8')
load_test_model = onmt.decoders.ensemble.load_test_model \
if len(opt.models) > 1 else onmt.model_builder.load_test_model
fields, model, model_opt = load_test_model(opt)
scorer = onmt.translate.GNMTGlobalScorer.from_opt(opt)
translator = Translator.from_opt(
model,
fields,
opt,
model_opt,
global_scorer=scorer,
out_file=out_file,
report_align=opt.report_align,
report_score=report_score,
logger=logger
)
return translator
def _tally_parameters(model):
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
else:
dec += param.nelement()
return enc + dec, enc, dec
def max_tok_len(new, count, sofar):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch # this is a hack
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
# max_tgt_in_batch = 0
# Src: [<bos> w1 ... wN <eos>]
max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)
# Tgt: [w1 ... wM <eos>]
src_elements = count * max_src_in_batch
return src_elements
class Translator(object):
"""Translate a batch of sentences with a saved model.
Args:
model (onmt.modules.NMTModel): NMT model to use for translation
fields (dict[str, torchtext.data.Field]): A dict
mapping each side to its list of name-Field pairs.
src_reader (onmt.inputters.DataReaderBase): Source reader.
tgt_reader (onmt.inputters.TextDataReader): Target reader.
gpu (int): GPU device. Set to negative for no GPU.
n_best (int): How many beams to wait for.
min_length (int): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
max_length (int): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
beam_size (int): Number of beams.
random_sampling_topk (int): See
:class:`onmt.translate.greedy_search.GreedySearch`.
random_sampling_temp (int): See
:class:`onmt.translate.greedy_search.GreedySearch`.
stepwise_penalty (bool): Whether coverage penalty is applied every step
or not.
dump_beam (bool): Debugging option.
block_ngram_repeat (int): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
ignore_when_blocking (set or frozenset): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
replace_unk (bool): Replace unknown token.
data_type (str): Source data type.
verbose (bool): Print/log every translation.
report_time (bool): Print/log total time/frequency.
copy_attn (bool): Use copy attention.
global_scorer (onmt.translate.GNMTGlobalScorer): Translation
scoring/reranking object.
out_file (TextIO or codecs.StreamReaderWriter): Output file.
report_score (bool) : Whether to report scores
logger (logging.Logger or NoneType): Logger.
"""
def __init__(
self,
model,
fields,
src_reader,
tgt_reader,
gpu=-1,
n_best=1,
min_length=0,
max_length=100,
ratio=0.,
beam_size=30,
random_sampling_topk=1,
random_sampling_temp=1,
stepwise_penalty=None,
dump_beam=False,
block_ngram_repeat=0,
ignore_when_blocking=frozenset(),
replace_unk=False,
phrase_table="",
data_type="text",
verbose=False,
report_time=False,
copy_attn=False,
global_scorer=None,
out_file=None,
report_align=False,
report_score=True,
dump_attn=False,
logger=None,
seed=-1):
self.model = model
self.fields = fields
tgt_field = dict(self.fields)["tgt"].base_field
self._tgt_vocab = tgt_field.vocab
self._tgt_eos_idx = self._tgt_vocab.stoi[tgt_field.eos_token]
self._tgt_pad_idx = self._tgt_vocab.stoi[tgt_field.pad_token]
self._tgt_bos_idx = self._tgt_vocab.stoi[tgt_field.init_token]
self._tgt_unk_idx = self._tgt_vocab.stoi[tgt_field.unk_token]
self._tgt_dot_idx = self._tgt_vocab.stoi['.'] # custom rule
self._tgt_vocab_len = len(self._tgt_vocab)
self._gpu = gpu
self._use_cuda = gpu > -1
self._dev = torch.device("cuda", self._gpu) \
if self._use_cuda else torch.device("cpu")
self.n_best = n_best
self.max_length = max_length
self.beam_size = beam_size
self.random_sampling_temp = random_sampling_temp
self.sample_from_topk = random_sampling_topk
self.min_length = min_length
self.ratio = ratio
self.stepwise_penalty = stepwise_penalty
self.dump_beam = dump_beam
self.dump_attn = dump_attn
self.block_ngram_repeat = block_ngram_repeat
self.ignore_when_blocking = ignore_when_blocking
self._exclusion_idxs = {
self._tgt_vocab.stoi[t] for t in self.ignore_when_blocking}
self.src_reader = src_reader
self.tgt_reader = tgt_reader
self.replace_unk = replace_unk
if self.replace_unk and not self.model.decoder.attentional:
raise ValueError(
"replace_unk requires an attentional decoder.")
self.phrase_table = phrase_table
self.data_type = data_type
self.verbose = verbose
self.report_time = report_time
self.copy_attn = copy_attn
self.global_scorer = global_scorer
if self.global_scorer.has_cov_pen and \
not self.model.decoder.attentional:
raise ValueError(
"Coverage penalty requires an attentional decoder.")
self.out_file = out_file
self.report_align = report_align
self.report_score = report_score
self.logger = logger
self.use_filter_pred = False
self._filter_pred = None
# for debugging
self.beam_trace = self.dump_beam != ""
self.beam_accum = None
if self.beam_trace:
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
set_random_seed(seed, self._use_cuda)
# Logging construction of translator
n_params, enc, dec = _tally_parameters(model)
self._log(f'Built a translator with {n_params} parameters')
self._log(f'Important params are:')
self._log(f' -beam size:{self.beam_size}')
self._log(f' -blk:{self.block_ngram_repeat}')
self._log(f' -dumping attention at: {self.dump_attn}')
@classmethod
def from_opt(
cls,
model,
fields,
opt,
model_opt,
global_scorer=None,
out_file=None,
report_align=False,
report_score=True,
logger=None):
"""Alternate constructor.
Args:
model (onmt.modules.NMTModel): See :func:`__init__()`.
fields (dict[str, torchtext.data.Field]): See
:func:`__init__()`.
opt (argparse.Namespace): Command line options
model_opt (argparse.Namespace): Command line options saved with
the model checkpoint.
global_scorer (onmt.translate.GNMTGlobalScorer): See
:func:`__init__()`..
out_file (TextIO or codecs.StreamReaderWriter): See
:func:`__init__()`.
report_align (bool) : See :func:`__init__()`.
report_score (bool) : See :func:`__init__()`.
logger (logging.Logger or NoneType): See :func:`__init__()`.
"""
src_reader = inputters.str2reader[opt.data_type].from_opt(opt)
tgt_reader = inputters.str2reader["text"].from_opt(opt)
return cls(
model,
fields,
src_reader,
tgt_reader,
gpu=opt.gpu,
n_best=opt.n_best,
min_length=opt.min_length,
max_length=opt.max_length,
ratio=opt.ratio,
beam_size=opt.beam_size,
random_sampling_topk=opt.random_sampling_topk,
random_sampling_temp=opt.random_sampling_temp,
stepwise_penalty=opt.stepwise_penalty,
dump_beam=opt.dump_beam,
block_ngram_repeat=opt.block_ngram_repeat,
ignore_when_blocking=set(opt.ignore_when_blocking),
replace_unk=opt.replace_unk,
phrase_table=opt.phrase_table,
data_type=opt.data_type,
verbose=opt.verbose,
report_time=opt.report_time,
copy_attn=model_opt.copy_attn,
global_scorer=global_scorer,
out_file=out_file,
report_align=report_align,
report_score=report_score,
dump_attn=opt.dump_attn,
logger=logger,
seed=opt.seed)
def _log(self, msg):
if self.logger:
self.logger.info(msg)
else:
print(msg)
def _gold_score(self, batch, memory_bank, src_lengths, src_vocabs,
use_src_map, enc_states, batch_size, src):
if "tgt" in batch.__dict__:
gs = self._score_target(
batch, memory_bank, src_lengths, src_vocabs,
batch.src_map if use_src_map else None)
self.model.decoder.init_state(src, memory_bank, enc_states)
else:
gs = [0] * batch_size
return gs
def translate(
self,
src,
tgt=None,
src_dir=None,
batch_size=None,
batch_type="sents",
attn_debug=False,
align_debug=False,
phrase_table=""):
"""Translate content of ``src`` and get gold scores from ``tgt``.
Args:
src: See :func:`self.src_reader.read()`.
tgt: See :func:`self.tgt_reader.read()`.
src_dir: See :func:`self.src_reader.read()` (only relevant
for certain types of data).
batch_size (int): size of examples per mini-batch
attn_debug (bool): enables the attention logging
align_debug (bool): enables the word alignment logging
Returns:
(`list`, `list`)
* all_scores is a list of `batch_size` lists of `n_best` scores
* all_predictions is a list of `batch_size` lists
of `n_best` predictions
"""
if batch_size is None:
raise ValueError("batch_size must be set")
src_data = {"reader": self.src_reader, "data": src, "dir": src_dir}
tgt_data = {"reader": self.tgt_reader, "data": tgt, "dir": None}
_readers, _data, _dir = inputters.Dataset.config(
[('src', src_data), ('tgt', tgt_data)])
data = inputters.Dataset(
self.fields, readers=_readers, data=_data, dirs=_dir,
sort_key=inputters.str2sortkey[self.data_type],
filter_pred=self._filter_pred
)
data_iter = inputters.OrderedIterator(
dataset=data,
device=self._dev,
batch_size=batch_size,
batch_size_fn=max_tok_len if batch_type == "tokens" else None,
train=False,
sort=False,
sort_within_batch=True,
shuffle=False
)
xlation_builder = onmt.translate.TranslationBuilder(
data, self.fields, self.n_best, self.replace_unk, tgt,
self.phrase_table
)
# Statistics
counter = count(1)
pred_score_total, pred_words_total = 0, 0
gold_score_total, gold_words_total = 0, 0
all_scores = []
all_predictions = []
start_time = time.time()
for _iter, batch in enumerate(data_iter, 1):
if _iter == 1:
n_params, enc, dec = _tally_parameters(self.model)
self._log(f'Built a translator with {n_params} parameters')
self._log(f'Important params are:')
self._log(f' -beam size:{self.beam_size}')
self._log(f' -blk:{self.block_ngram_repeat}')
self._log(f' -dumping attention at: {self.dump_attn}')
self._log(f'Translating batch {_iter} (batch_size is {batch_size})')
return_attention = True if attn_debug or self.dump_attn else False
batch_data = self.translate_batch(
batch, data.src_vocabs, return_attention
)
translations = xlation_builder.from_batch(batch_data)
if self.dump_attn:
# save the attention for this batch in this folder
torch.save(batch_data["attention"], os.path.join(self.dump_attn, f'batch_{_iter}'))
for trans in translations:
all_scores += [trans.pred_scores[:self.n_best]]
pred_score_total += trans.pred_scores[0]
pred_words_total += len(trans.pred_sents[0])
if tgt is not None:
gold_score_total += trans.gold_score
gold_words_total += len(trans.gold_sent) + 1
n_best_preds = [" ".join(pred)
for pred in trans.pred_sents[:self.n_best]]
if self.report_align:
align_pharaohs = [build_align_pharaoh(align) for align
in trans.word_aligns[:self.n_best]]
n_best_preds_align = [" ".join(align) for align
in align_pharaohs]
n_best_preds = [pred + " ||| " + align
for pred, align in zip(
n_best_preds, n_best_preds_align)]
all_predictions += [n_best_preds]
self.out_file.write('\n'.join(n_best_preds) + '\n')
self.out_file.flush()
if self.verbose:
sent_number = next(counter)
output = trans.log(sent_number)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
if attn_debug:
preds = trans.pred_sents[0]
preds.append('</s>')
attns = trans.attns[0].tolist()
if self.data_type == 'text':
srcs = trans.src_raw
else:
srcs = [str(item) for item in range(len(attns[0]))]
output = report_matrix(srcs, preds, attns)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
if align_debug:
if trans.gold_sent is not None:
tgts = trans.gold_sent
else:
tgts = trans.pred_sents[0]
align = trans.word_aligns[0].tolist()
if self.data_type == 'text':
srcs = trans.src_raw
else:
srcs = [str(item) for item in range(len(align[0]))]
output = report_matrix(srcs, tgts, align)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
end_time = time.time()
if self.report_score:
msg = self._report_score('PRED', pred_score_total,
pred_words_total)
self._log(msg)
if tgt is not None:
msg = self._report_score('GOLD', gold_score_total,
gold_words_total)
self._log(msg)
if self.report_time:
total_time = end_time - start_time
self._log("Total translation time (s): %f" % total_time)
self._log("Average translation time (s): %f" % (
total_time / len(all_predictions)))
self._log("Tokens per second: %f" % (
pred_words_total / total_time))
if self.dump_beam:
import json
json.dump(self.translator.beam_accum,
codecs.open(self.dump_beam, 'w', 'utf-8'))
return all_scores, all_predictions
def _align_pad_prediction(self, predictions, bos, pad):
"""
Padding predictions in batch and add BOS.
Args:
predictions (List[List[Tensor]]): `(batch, n_best,)`, for each src
sequence contain n_best tgt predictions all of which ended with
eos id.
bos (int): bos index to be used.
pad (int): pad index to be used.
Return:
batched_nbest_predict (torch.LongTensor): `(batch, n_best, tgt_l)`
"""
dtype, device = predictions[0][0].dtype, predictions[0][0].device
flatten_tgt = [best.tolist() for bests in predictions
for best in bests]
paded_tgt = torch.tensor(
list(zip_longest(*flatten_tgt, fillvalue=pad)),
dtype=dtype, device=device).T
bos_tensor = torch.full([paded_tgt.size(0), 1], bos,
dtype=dtype, device=device)
full_tgt = torch.cat((bos_tensor, paded_tgt), dim=-1)
batched_nbest_predict = full_tgt.view(
len(predictions), -1, full_tgt.size(-1)) # (batch, n_best, tgt_l)
return batched_nbest_predict
def _align_forward(self, batch, predictions):
"""
For a batch of input and its prediction, return a list of batch predict
alignment src indice Tensor in size ``(batch, n_best,)``.
"""
# (0) add BOS and padding to tgt prediction
if hasattr(batch, 'tgt'):
batch_tgt_idxs = batch.tgt.transpose(1, 2).transpose(0, 2)
else:
batch_tgt_idxs = self._align_pad_prediction(
predictions, bos=self._tgt_bos_idx, pad=self._tgt_pad_idx)
tgt_mask = (batch_tgt_idxs.eq(self._tgt_pad_idx) |
batch_tgt_idxs.eq(self._tgt_eos_idx) |
batch_tgt_idxs.eq(self._tgt_bos_idx))
n_best = batch_tgt_idxs.size(1)
# (1) Encoder forward.
src, enc_states, memory_bank, src_lengths = self._run_encoder(batch)
# (2) Repeat src objects `n_best` times.
# We use batch_size x n_best, get ``(src_len, batch * n_best, nfeat)``
src = tile(src, n_best, dim=1)
enc_states = tile(enc_states, n_best, dim=1)
if isinstance(memory_bank, tuple):
memory_bank = tuple(tile(x, n_best, dim=1) for x in memory_bank)
else:
memory_bank = tile(memory_bank, n_best, dim=1)
src_lengths = tile(src_lengths, n_best) # ``(batch * n_best,)``
# (3) Init decoder with n_best src,
self.model.decoder.init_state(src, memory_bank, enc_states)
# reshape tgt to ``(len, batch * n_best, nfeat)``
tgt = batch_tgt_idxs.view(-1, batch_tgt_idxs.size(-1)).T.unsqueeze(-1)
dec_in = tgt[:-1] # exclude last target from inputs
_, attns = self.model.decoder(
dec_in, memory_bank, memory_lengths=src_lengths, with_align=True)
alignment_attn = attns["align"] # ``(B, tgt_len-1, src_len)``
# masked_select
align_tgt_mask = tgt_mask.view(-1, tgt_mask.size(-1))
prediction_mask = align_tgt_mask[:, 1:] # exclude bos to match pred
# get aligned src id for each prediction's valid tgt tokens
alignement = extract_alignment(
alignment_attn, prediction_mask, src_lengths, n_best)
return alignement
def translate_batch(self, batch, src_vocabs, attn_debug):
"""Translate a batch of sentences."""
with torch.no_grad():
if self.beam_size == 1:
decode_strategy = GreedySearch(
pad=self._tgt_pad_idx,
bos=self._tgt_bos_idx,
eos=self._tgt_eos_idx,
batch_size=batch.batch_size,
min_length=self.min_length, max_length=self.max_length,
block_ngram_repeat=self.block_ngram_repeat,
exclusion_tokens=self._exclusion_idxs,
return_attention=attn_debug or self.replace_unk,
sampling_temp=self.random_sampling_temp,
keep_topk=self.sample_from_topk)
else:
# TODO: support these blacklisted features
assert not self.dump_beam
decode_strategy = BeamSearch(
self.beam_size,
batch_size=batch.batch_size,
pad=self._tgt_pad_idx,
bos=self._tgt_bos_idx,
eos=self._tgt_eos_idx,
dot=self._tgt_dot_idx,
n_best=self.n_best,
global_scorer=self.global_scorer,
min_length=self.min_length, max_length=self.max_length,
return_attention=attn_debug or self.replace_unk,
block_ngram_repeat=self.block_ngram_repeat,
exclusion_tokens=self._exclusion_idxs,
stepwise_penalty=self.stepwise_penalty,
ratio=self.ratio)
return self._translate_batch_with_strategy(batch, src_vocabs,
decode_strategy)
def _run_encoder(self, batch):
src, src_lengths = batch.src if isinstance(batch.src, tuple) \
else (batch.src, None)
enc_states, memory_bank, src_lengths = self.model.encoder(
src, src_lengths)
if src_lengths is None:
assert not isinstance(memory_bank, tuple), \
'Ensemble decoding only supported for text data'
src_lengths = torch.Tensor(batch.batch_size) \
.type_as(memory_bank) \
.long() \
.fill_(memory_bank.size(0))
return src, enc_states, memory_bank, src_lengths
def _decode_and_generate(
self,
decoder_in,
memory_bank,
batch,
src_vocabs,
memory_lengths,
src_map=None,
step=None,
batch_offset=None):
if self.copy_attn:
# Turn any copied words into UNKs.
decoder_in = decoder_in.masked_fill(
decoder_in.gt(self._tgt_vocab_len - 1), self._tgt_unk_idx
)
# Decoder forward, takes [tgt_len, batch, nfeats] as input
# and [src_len, batch, hidden] as memory_bank
# in case of inference tgt_len = 1, batch = beam times batch_size
# in case of Gold Scoring tgt_len = actual length, batch = 1 batch
dec_out, dec_attn = self.model.decoder(
decoder_in, memory_bank, memory_lengths=memory_lengths, step=step
)
# Generator forward.
if not self.copy_attn:
if "std" in dec_attn:
attn = dec_attn["std"]
attn_key = 'std'
else:
attn = None
attn_key = None
log_probs = self.model.generator(dec_out.squeeze(0))
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
else:
attn = dec_attn["copy"]
attn_key = 'copy'
scores = self.model.generator(dec_out.view(-1, dec_out.size(2)),
attn.view(-1, attn.size(2)),
src_map)
# here we have scores [tgt_lenxbatch, vocab] or [beamxbatch, vocab]
if batch_offset is None:
scores = scores.view(-1, batch.batch_size, scores.size(-1))
scores = scores.transpose(0, 1).contiguous()
else:
scores = scores.view(-1, self.beam_size, scores.size(-1))
scores = collapse_copy_scores(
scores,
batch,
self._tgt_vocab,
src_vocabs,
batch_dim=0,
batch_offset=batch_offset
)
scores = scores.view(decoder_in.size(0), -1, scores.size(-1))
log_probs = scores.squeeze(0).log()
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
return log_probs, attn, attn_key
def _translate_batch_with_strategy(
self,
batch,
src_vocabs,
decode_strategy):
"""Translate a batch of sentences step by step using cache.
Args:
batch: a batch of sentences, yield by data iterator.
src_vocabs (list): list of torchtext.data.Vocab if can_copy.
decode_strategy (DecodeStrategy): A decode strategy to use for
generate translation step by step.
Returns:
results (dict): The translation results.
"""
# (0) Prep the components of the search.
use_src_map = self.copy_attn
parallel_paths = decode_strategy.parallel_paths # beam_size
batch_size = batch.batch_size
# (1) Run the encoder on the src.
src, enc_states, memory_bank, src_lengths = self._run_encoder(batch)
self.model.decoder.init_state(src, memory_bank, enc_states)
results = {
"predictions": None,
"scores": None,
"attention": None,
"batch": batch,
"gold_score": self._gold_score(
batch, memory_bank, src_lengths, src_vocabs, use_src_map,
enc_states, batch_size, src)}
# (2) prep decode_strategy. Possibly repeat src objects.
src_map = batch.src_map if use_src_map else None
fn_map_state, memory_bank, memory_lengths, src_map = \
decode_strategy.initialize(memory_bank, src_lengths, src_map)
if fn_map_state is not None:
self.model.decoder.map_state(fn_map_state)
# (3) Begin decoding step by step:
for step in range(decode_strategy.max_length):
decoder_input = decode_strategy.current_predictions.view(1, -1, 1)
log_probs, attn, attn_key = self._decode_and_generate(
decoder_input,
memory_bank,
batch,
src_vocabs,
memory_lengths=memory_lengths,
src_map=src_map,
step=step,
batch_offset=decode_strategy.batch_offset)
decode_strategy.advance(log_probs, attn, attn_key)
any_finished = decode_strategy.is_finished.any()
if any_finished:
decode_strategy.update_finished()
if decode_strategy.done:
break
select_indices = decode_strategy.select_indices
if any_finished:
# Reorder states.
if isinstance(memory_bank, tuple):
memory_bank = tuple(x.index_select(1, select_indices)
for x in memory_bank)
else:
memory_bank = memory_bank.index_select(1, select_indices)
memory_lengths = memory_lengths.index_select(0, select_indices)
if src_map is not None:
src_map = src_map.index_select(1, select_indices)
if parallel_paths > 1 or any_finished:
self.model.decoder.map_state(
lambda state, dim: state.index_select(dim, select_indices))
results["scores"] = decode_strategy.scores
results["predictions"] = decode_strategy.predictions
results["attention"] = decode_strategy.attention
if self.report_align:
results["alignment"] = self._align_forward(
batch, decode_strategy.predictions)
else:
results["alignment"] = [[] for _ in range(batch_size)]
return results
def _score_target(self, batch, memory_bank, src_lengths,
src_vocabs, src_map):
tgt = batch.tgt
tgt_in = tgt[:-1]
log_probs, attn = self._decode_and_generate(
tgt_in, memory_bank, batch, src_vocabs,
memory_lengths=src_lengths, src_map=src_map)
log_probs[:, :, self._tgt_pad_idx] = 0
gold = tgt[1:]
gold_scores = log_probs.gather(2, gold)
gold_scores = gold_scores.sum(dim=0).view(-1)
return gold_scores
def _report_score(self, name, score_total, words_total):
if words_total == 0:
msg = "%s No words predicted" % (name,)
else:
avg_score = score_total / words_total
ppl = np.exp(-score_total.item() / words_total)
msg = ("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, avg_score,
name, ppl))
return msg
| 30,859 | 38.262087 | 99 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/rnn_factory.py
|
"""
RNN tools
"""
import torch.nn as nn
import onmt.models
def rnn_factory(rnn_type, **kwargs):
""" rnn factory, Use pytorch version when available. """
no_pack_padded_seq = False
if rnn_type == "SRU":
# SRU doesn't support PackedSequence.
no_pack_padded_seq = True
rnn = onmt.models.sru.SRU(**kwargs)
else:
rnn = getattr(nn, rnn_type)(**kwargs)
return rnn, no_pack_padded_seq
| 432 | 23.055556 | 60 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/optimizers.py
|
""" Optimizers class """
import torch
import torch.optim as optim
from torch.nn.utils import clip_grad_norm_
import operator
import functools
from copy import copy
from math import sqrt
import types
import importlib
from onmt.utils.misc import fn_args
def build_torch_optimizer(model, opt):
"""Builds the PyTorch optimizer.
We use the default parameters for Adam that are suggested by
the original paper https://arxiv.org/pdf/1412.6980.pdf
These values are also used by other established implementations,
e.g. https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
https://keras.io/optimizers/
Recently there are slightly different values used in the paper
"Attention is all you need"
https://arxiv.org/pdf/1706.03762.pdf, particularly the value beta2=0.98
was used there however, beta2=0.999 is still arguably the more
established value, so we use that here as well
Args:
model: The model to optimize.
opt. The dictionary of options.
Returns:
A ``torch.optim.Optimizer`` instance.
"""
params = [p for p in model.parameters() if p.requires_grad]
betas = [opt.adam_beta1, opt.adam_beta2]
if opt.optim == 'sgd':
optimizer = optim.SGD(params, lr=opt.learning_rate)
elif opt.optim == 'adagrad':
optimizer = optim.Adagrad(
params,
lr=opt.learning_rate,
initial_accumulator_value=opt.adagrad_accumulator_init)
elif opt.optim == 'adadelta':
optimizer = optim.Adadelta(params, lr=opt.learning_rate)
elif opt.optim == 'adafactor':
optimizer = AdaFactor(
params,
non_constant_decay=True,
enable_factorization=True,
weight_decay=0)
elif opt.optim == 'adam':
optimizer = optim.Adam(
params,
lr=opt.learning_rate,
betas=betas,
eps=1e-9)
elif opt.optim == 'sparseadam':
dense = []
sparse = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# TODO: Find a better way to check for sparse gradients.
if 'embed' in name:
sparse.append(param)
else:
dense.append(param)
optimizer = MultipleOptimizer(
[optim.Adam(
dense,
lr=opt.learning_rate,
betas=betas,
eps=1e-8),
optim.SparseAdam(
sparse,
lr=opt.learning_rate,
betas=betas,
eps=1e-8)])
elif opt.optim == 'fusedadam':
# we use here a FusedAdam() copy of an old Apex repo
optimizer = FusedAdam(
params,
lr=opt.learning_rate,
betas=betas)
else:
raise ValueError('Invalid optimizer type: ' + opt.optim)
if opt.model_dtype == 'fp16':
import apex
if opt.optim != 'fusedadam':
# In this case use the new AMP API from apex
loss_scale = "dynamic" if opt.loss_scale == 0 else opt.loss_scale
model, optimizer = apex.amp.initialize(
[model, model.generator],
optimizer,
opt_level=opt.apex_opt_level,
loss_scale=loss_scale,
keep_batchnorm_fp32=None)
else:
# In this case use the old FusedAdam with FP16_optimizer wrapper
static_loss_scale = opt.loss_scale
dynamic_loss_scale = opt.loss_scale == 0
optimizer = apex.optimizers.FP16_Optimizer(
optimizer,
static_loss_scale=static_loss_scale,
dynamic_loss_scale=dynamic_loss_scale)
return optimizer
def make_learning_rate_decay_fn(opt):
"""Returns the learning decay function from options."""
if opt.decay_method == 'noam':
return functools.partial(
noam_decay,
warmup_steps=opt.warmup_steps,
model_size=opt.rnn_size)
elif opt.decay_method == 'noamwd':
return functools.partial(
noamwd_decay,
warmup_steps=opt.warmup_steps,
model_size=opt.rnn_size,
rate=opt.learning_rate_decay,
decay_steps=opt.decay_steps,
start_step=opt.start_decay_steps)
elif opt.decay_method == 'rsqrt':
return functools.partial(
rsqrt_decay, warmup_steps=opt.warmup_steps)
elif opt.start_decay_steps is not None:
return functools.partial(
exponential_decay,
rate=opt.learning_rate_decay,
decay_steps=opt.decay_steps,
start_step=opt.start_decay_steps)
def noam_decay(step, warmup_steps, model_size):
"""Learning rate schedule described in
https://arxiv.org/pdf/1706.03762.pdf.
"""
return (
model_size ** (-0.5) *
min(step ** (-0.5), step * warmup_steps**(-1.5)))
def noamwd_decay(step, warmup_steps,
model_size, rate, decay_steps, start_step=0):
"""Learning rate schedule optimized for huge batches
"""
return (
model_size ** (-0.5) *
min(step ** (-0.5), step * warmup_steps**(-1.5)) *
rate ** (max(step - start_step + decay_steps, 0) // decay_steps))
def exponential_decay(step, rate, decay_steps, start_step=0):
"""A standard exponential decay, scaling the learning rate by :obj:`rate`
every :obj:`decay_steps` steps.
"""
return rate ** (max(step - start_step + decay_steps, 0) // decay_steps)
def rsqrt_decay(step, warmup_steps):
"""Decay based on the reciprocal of the step square root."""
return 1.0 / sqrt(max(step, warmup_steps))
class MultipleOptimizer(object):
""" Implement multiple optimizers needed for sparse adam """
def __init__(self, op):
""" ? """
self.optimizers = op
@property
def param_groups(self):
param_groups = []
for optimizer in self.optimizers:
param_groups.extend(optimizer.param_groups)
return param_groups
def zero_grad(self):
""" ? """
for op in self.optimizers:
op.zero_grad()
def step(self):
""" ? """
for op in self.optimizers:
op.step()
@property
def state(self):
""" ? """
return {k: v for op in self.optimizers for k, v in op.state.items()}
def state_dict(self):
""" ? """
return [op.state_dict() for op in self.optimizers]
def load_state_dict(self, state_dicts):
""" ? """
assert len(state_dicts) == len(self.optimizers)
for i in range(len(state_dicts)):
self.optimizers[i].load_state_dict(state_dicts[i])
class Optimizer(object):
"""
Controller class for optimization. Mostly a thin
wrapper for `optim`, but also useful for implementing
rate scheduling beyond what is currently available.
Also implements necessary methods for training RNNs such
as grad manipulations.
"""
def __init__(self,
optimizer,
learning_rate,
learning_rate_decay_fn=None,
max_grad_norm=None):
"""Initializes the controller.
Args:
optimizer: A ``torch.optim.Optimizer`` instance.
learning_rate: The initial learning rate.
learning_rate_decay_fn: An optional callable taking the current step
as argument and return a learning rate scaling factor.
max_grad_norm: Clip gradients to this global norm.
"""
self._optimizer = optimizer
self._learning_rate = learning_rate
self._learning_rate_decay_fn = learning_rate_decay_fn
self._max_grad_norm = max_grad_norm or 0
self._training_step = 1
self._decay_step = 1
self._fp16 = None
@classmethod
def from_opt(cls, model, opt, checkpoint=None):
"""Builds the optimizer from options.
Args:
cls: The ``Optimizer`` class to instantiate.
model: The model to optimize.
opt: The dict of user options.
checkpoint: An optional checkpoint to load states from.
Returns:
An ``Optimizer`` instance.
"""
optim_opt = opt
optim_state_dict = None
if opt.train_from and checkpoint is not None:
optim = checkpoint['optim']
ckpt_opt = checkpoint['opt']
ckpt_state_dict = {}
if isinstance(optim, Optimizer): # Backward compatibility.
ckpt_state_dict['training_step'] = optim._step + 1
ckpt_state_dict['decay_step'] = optim._step + 1
ckpt_state_dict['optimizer'] = optim.optimizer.state_dict()
else:
ckpt_state_dict = optim
if opt.reset_optim == 'none':
# Load everything from the checkpoint.
optim_opt = ckpt_opt
optim_state_dict = ckpt_state_dict
elif opt.reset_optim == 'all':
# Build everything from scratch.
pass
elif opt.reset_optim == 'states':
# Reset optimizer, keep options.
optim_opt = ckpt_opt
optim_state_dict = ckpt_state_dict
del optim_state_dict['optimizer']
elif opt.reset_optim == 'keep_states':
# Reset options, keep optimizer.
optim_state_dict = ckpt_state_dict
optimizer = cls(
build_torch_optimizer(model, optim_opt),
optim_opt.learning_rate,
learning_rate_decay_fn=make_learning_rate_decay_fn(optim_opt),
max_grad_norm=optim_opt.max_grad_norm)
if opt.model_dtype == "fp16":
if opt.optim == "fusedadam":
optimizer._fp16 = "legacy"
else:
optimizer._fp16 = "amp"
if optim_state_dict:
optimizer.load_state_dict(optim_state_dict)
return optimizer
@property
def training_step(self):
"""The current training step."""
return self._training_step
def learning_rate(self):
"""Returns the current learning rate."""
if self._learning_rate_decay_fn is None:
return self._learning_rate
scale = self._learning_rate_decay_fn(self._decay_step)
return scale * self._learning_rate
def state_dict(self):
return {
'training_step': self._training_step,
'decay_step': self._decay_step,
'optimizer': self._optimizer.state_dict()
}
def load_state_dict(self, state_dict):
self._training_step = state_dict['training_step']
# State can be partially restored.
if 'decay_step' in state_dict:
self._decay_step = state_dict['decay_step']
if 'optimizer' in state_dict:
self._optimizer.load_state_dict(state_dict['optimizer'])
def zero_grad(self):
"""Zero the gradients of optimized parameters."""
self._optimizer.zero_grad()
def backward(self, loss):
"""Wrapper for backward pass. Some optimizer requires ownership of the
backward pass."""
if self._fp16 == "amp":
import apex
with apex.amp.scale_loss(loss, self._optimizer) as scaled_loss:
scaled_loss.backward()
elif self._fp16 == "legacy":
kwargs = {}
if "update_master_grads" in fn_args(self._optimizer.backward):
kwargs["update_master_grads"] = True
self._optimizer.backward(loss, **kwargs)
else:
loss.backward()
def step(self):
"""Update the model parameters based on current gradients.
Optionally, will employ gradient modification or update learning
rate.
"""
learning_rate = self.learning_rate()
if self._fp16 == "legacy":
if hasattr(self._optimizer, "update_master_grads"):
self._optimizer.update_master_grads()
if hasattr(self._optimizer, "clip_master_grads") and \
self._max_grad_norm > 0:
self._optimizer.clip_master_grads(self._max_grad_norm)
for group in self._optimizer.param_groups:
group['lr'] = learning_rate
if self._fp16 is None and self._max_grad_norm > 0:
clip_grad_norm_(group['params'], self._max_grad_norm)
self._optimizer.step()
self._decay_step += 1
self._training_step += 1
# Code below is an implementation of https://arxiv.org/pdf/1804.04235.pdf
# inspired but modified from https://github.com/DeadAt0m/adafactor-pytorch
class AdaFactor(torch.optim.Optimizer):
def __init__(self, params, lr=None, beta1=0.9, beta2=0.999, eps1=1e-30,
eps2=1e-3, cliping_threshold=1, non_constant_decay=True,
enable_factorization=True, ams_grad=True, weight_decay=0):
enable_momentum = beta1 != 0
if non_constant_decay:
ams_grad = False
defaults = dict(lr=lr, beta1=beta1, beta2=beta2, eps1=eps1,
eps2=eps2, cliping_threshold=cliping_threshold,
weight_decay=weight_decay, ams_grad=ams_grad,
enable_factorization=enable_factorization,
enable_momentum=enable_momentum,
non_constant_decay=non_constant_decay)
super(AdaFactor, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaFactor, self).__setstate__(state)
def _experimental_reshape(self, shape):
temp_shape = shape[2:]
if len(temp_shape) == 1:
new_shape = (shape[0], shape[1]*shape[2])
else:
tmp_div = len(temp_shape) // 2 + len(temp_shape) % 2
new_shape = (shape[0]*functools.reduce(operator.mul,
temp_shape[tmp_div:], 1),
shape[1]*functools.reduce(operator.mul,
temp_shape[:tmp_div], 1))
return new_shape, copy(shape)
def _check_shape(self, shape):
'''
output1 - True - algorithm for matrix, False - vector;
output2 - need reshape
'''
if len(shape) > 2:
return True, True
elif len(shape) == 2:
return True, False
elif len(shape) == 2 and (shape[0] == 1 or shape[1] == 1):
return False, False
else:
return False, False
def _rms(self, x):
return sqrt(torch.mean(x.pow(2)))
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse \
gradients, use SparseAdam instead')
is_matrix, is_need_reshape = self._check_shape(grad.size())
new_shape = p.data.size()
if is_need_reshape and group['enable_factorization']:
new_shape, old_shape = \
self._experimental_reshape(p.data.size())
grad = grad.view(new_shape)
state = self.state[p]
if len(state) == 0:
state['step'] = 0
if group['enable_momentum']:
state['exp_avg'] = torch.zeros(new_shape,
dtype=torch.float32,
device=p.grad.device)
if is_matrix and group['enable_factorization']:
state['exp_avg_sq_R'] = \
torch.zeros((1, new_shape[1]),
dtype=torch.float32,
device=p.grad.device)
state['exp_avg_sq_C'] = \
torch.zeros((new_shape[0], 1),
dtype=torch.float32,
device=p.grad.device)
else:
state['exp_avg_sq'] = torch.zeros(new_shape,
dtype=torch.float32,
device=p.grad.device)
if group['ams_grad']:
state['exp_avg_sq_hat'] = \
torch.zeros(new_shape, dtype=torch.float32,
device=p.grad.device)
if group['enable_momentum']:
exp_avg = state['exp_avg']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r = state['exp_avg_sq_R']
exp_avg_sq_c = state['exp_avg_sq_C']
else:
exp_avg_sq = state['exp_avg_sq']
if group['ams_grad']:
exp_avg_sq_hat = state['exp_avg_sq_hat']
state['step'] += 1
lr_t = group['lr']
lr_t *= max(group['eps2'], self._rms(p.data))
if group['enable_momentum']:
if group['non_constant_decay']:
beta1_t = group['beta1'] * \
(1 - group['beta1'] ** (state['step'] - 1)) \
/ (1 - group['beta1'] ** state['step'])
else:
beta1_t = group['beta1']
exp_avg.mul_(beta1_t).add_(1 - beta1_t, grad)
if group['non_constant_decay']:
beta2_t = group['beta2'] * \
(1 - group['beta2'] ** (state['step'] - 1)) / \
(1 - group['beta2'] ** state['step'])
else:
beta2_t = group['beta2']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r.mul_(beta2_t). \
add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).
add_(group['eps1']),
dim=0, keepdim=True))
exp_avg_sq_c.mul_(beta2_t). \
add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).
add_(group['eps1']),
dim=1, keepdim=True))
v = torch.mul(exp_avg_sq_c,
exp_avg_sq_r).div_(torch.sum(exp_avg_sq_r))
else:
exp_avg_sq.mul_(beta2_t). \
addcmul_(1 - beta2_t, grad, grad). \
add_((1 - beta2_t)*group['eps1'])
v = exp_avg_sq
g = grad
if group['enable_momentum']:
g = torch.div(exp_avg, 1 - beta1_t ** state['step'])
if group['ams_grad']:
torch.max(exp_avg_sq_hat, v, out=exp_avg_sq_hat)
v = exp_avg_sq_hat
u = torch.div(g, (torch.div(v, 1 - beta2_t **
state['step'])).sqrt().add_(group['eps1']))
else:
u = torch.div(g, v.sqrt())
u.div_(max(1, self._rms(u) / group['cliping_threshold']))
p.data.add_(-lr_t * (u.view(old_shape) if is_need_reshape and
group['enable_factorization'] else u))
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * lr_t, p.data)
return loss
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm. Currently GPU-only.
Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square.
(default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params,
lr=1e-3, bias_correction=True,
betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False,
weight_decay=0., max_grad_norm=0., amsgrad=False):
global fused_adam_cuda
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
if amsgrad:
raise RuntimeError('AMSGrad variant not supported.')
defaults = dict(lr=lr, bias_correction=bias_correction,
betas=betas, eps=eps, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(FusedAdam, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
def step(self, closure=None, grads=None, output_params=None,
scale=1., grad_norms=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients.
(default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None]*len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
if output_params is None:
output_params_group = [None]*len(self.param_groups)
elif isinstance(output_params, types.GeneratorType):
output_params_group = [output_params]
elif type(output_params[0]) != list:
output_params_group = [output_params]
else:
output_params_group = output_params
if grad_norms is None:
grad_norms = [None]*len(self.param_groups)
for group, grads_this_group, output_params_this_group, \
grad_norm in zip(self.param_groups, grads_group,
output_params_group, grad_norms):
if grads_this_group is None:
grads_this_group = [None]*len(group['params'])
if output_params_this_group is None:
output_params_this_group = [None]*len(group['params'])
# compute combined scale factor for this group
combined_scale = scale
if group['max_grad_norm'] > 0:
# norm is in fact norm*scale
clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm']
if clip > 1:
combined_scale = clip * scale
bias_correction = 1 if group['bias_correction'] else 0
for p, grad, output_param in zip(group['params'],
grads_this_group,
output_params_this_group):
# note: p.grad should not ever be set for correct operation of
# mixed precision optimizer that sometimes sends None gradients
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('FusedAdam does not support sparse \
gradients, please consider \
SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
out_p = torch.tensor([], dtype=torch.float) if output_param \
is None else output_param
fused_adam_cuda.adam(p.data,
out_p,
exp_avg,
exp_avg_sq,
grad,
group['lr'],
beta1,
beta2,
group['eps'],
combined_scale,
state['step'],
self.eps_mode,
bias_correction,
group['weight_decay'])
return loss
| 27,188 | 38.461538 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/logging.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
from logging.handlers import RotatingFileHandler
logger = logging.getLogger()
def init_logger(log_file=None, log_file_level=logging.NOTSET):
log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
logger.handlers = [console_handler]
if log_file and log_file != '':
file_handler = RotatingFileHandler(
log_file, maxBytes=1000, backupCount=10)
file_handler.setLevel(log_file_level)
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
return logger
| 784 | 29.192308 | 77 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/loss.py
|
"""
This includes: LossComputeBase and the standard NMTLossCompute, and
sharded loss compute stuff.
"""
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.modules.sparse_losses import SparsemaxLoss
from onmt.modules.sparse_activations import LogSparsemax
def build_loss_compute(model, tgt_field, opt, train=True):
"""
Returns a LossCompute subclass which wraps around an nn.Module subclass
(such as nn.NLLLoss) which defines the loss criterion. The LossCompute
object allows this loss to be computed in shards and passes the relevant
data to a Statistics object which handles training/validation logging.
Currently, the NMTLossCompute class handles all loss computation except
for when using a copy mechanism.
"""
device = torch.device("cuda" if onmt.utils.misc.use_gpu(opt) else "cpu")
padding_idx = tgt_field.vocab.stoi[tgt_field.pad_token]
unk_idx = tgt_field.vocab.stoi[tgt_field.unk_token]
if opt.lambda_coverage != 0:
assert opt.coverage_attn, "--coverage_attn needs to be set in " \
"order to use --lambda_coverage != 0"
if opt.copy_attn:
criterion = onmt.modules.CopyGeneratorLoss(
len(tgt_field.vocab), opt.copy_attn_force,
unk_index=unk_idx, ignore_index=padding_idx
)
elif opt.label_smoothing > 0 and train:
criterion = LabelSmoothingLoss(
opt.label_smoothing, len(tgt_field.vocab), ignore_index=padding_idx
)
elif isinstance(model.generator[-1], LogSparsemax):
criterion = SparsemaxLoss(ignore_index=padding_idx, reduction='sum')
else:
criterion = nn.NLLLoss(ignore_index=padding_idx, reduction='sum')
# if the loss function operates on vectors of raw logits instead of
# probabilities, only the first part of the generator needs to be
# passed to the NMTLossCompute. At the moment, the only supported
# loss function of this kind is the sparsemax loss.
use_raw_logits = isinstance(criterion, SparsemaxLoss)
loss_gen = model.generator[0] if use_raw_logits else model.generator
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
criterion, loss_gen, tgt_field.vocab, opt.copy_loss_by_seqlength,
lambda_coverage=opt.lambda_coverage
)
else:
compute = NMTLossCompute(
criterion, loss_gen, lambda_coverage=opt.lambda_coverage,
lambda_align=opt.lambda_align)
compute.to(device)
return compute
class LossComputeBase(nn.Module):
"""
Class for managing efficient loss computation. Handles
sharding next step predictions and accumulating multiple
loss computations
Users can implement their own loss computation strategy by making
subclass of this one. Users need to implement the _compute_loss()
and make_shard_state() methods.
Args:
generator (:obj:`nn.Module`) :
module that maps the output of the decoder to a
distribution over the target vocabulary.
tgt_vocab (:obj:`Vocab`) :
torchtext vocab object representing the target output
normalzation (str): normalize by "sents" or "tokens"
"""
def __init__(self, criterion, generator):
super(LossComputeBase, self).__init__()
self.criterion = criterion
self.generator = generator
@property
def padding_idx(self):
return self.criterion.ignore_index
def _make_shard_state(self, batch, output, range_, attns=None):
"""
Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model.
"""
return NotImplementedError
def _compute_loss(self, batch, output, target, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
return NotImplementedError
def __call__(self,
batch,
output,
attns,
normalization=1.0,
shard_size=0,
trunc_start=0,
trunc_size=None):
"""Compute the forward loss, possibly in shards in which case this
method also runs the backward pass and returns ``None`` as the loss
value.
Also supports truncated BPTT for long sequences by taking a
range in the decoder output sequence to back propagate in.
Range is from `(trunc_start, trunc_start + trunc_size)`.
Note sharding is an exact efficiency trick to relieve memory
required for the generation buffers. Truncation is an
approximate efficiency trick to relieve the memory required
in the RNN buffers.
Args:
batch (batch) : batch of labeled examples
output (:obj:`FloatTensor`) :
output of decoder model `[tgt_len x batch x hidden]`
attns (dict) : dictionary of attention distributions
`[tgt_len x batch x src_len]`
normalization: Optional normalization factor.
shard_size (int) : maximum number of examples in a shard
trunc_start (int) : starting position of truncation window
trunc_size (int) : length of truncation window
Returns:
A tuple with the loss and a :obj:`onmt.utils.Statistics` instance.
"""
if trunc_size is None:
trunc_size = batch.tgt.size(0) - trunc_start
trunc_range = (trunc_start, trunc_start + trunc_size)
shard_state = self._make_shard_state(batch, output, trunc_range, attns)
if shard_size == 0:
loss, stats = self._compute_loss(batch, **shard_state)
return loss / float(normalization), stats
batch_stats = onmt.utils.Statistics()
for shard in shards(shard_state, shard_size):
loss, stats = self._compute_loss(batch, **shard)
loss.div(float(normalization)).backward()
batch_stats.update(stats)
return None, batch_stats
def _stats(self, loss, scores, target):
"""
Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets
Returns:
:obj:`onmt.utils.Statistics` : statistics for this batch.
"""
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target).masked_select(non_padding).sum().item()
num_non_padding = non_padding.sum().item()
return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct)
def _bottle(self, _v):
return _v.view(-1, _v.size(2))
def _unbottle(self, _v, batch_size):
return _v.view(-1, batch_size, _v.size(1))
class LabelSmoothingLoss(nn.Module):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing, tgt_vocab_size, ignore_index=-100):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__()
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
def forward(self, output, target):
"""
output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size
"""
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='sum')
class NMTLossCompute(LossComputeBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, criterion, generator, normalization="sents",
lambda_coverage=0.0, lambda_align=0.0):
super(NMTLossCompute, self).__init__(criterion, generator)
self.lambda_coverage = lambda_coverage
self.lambda_align = lambda_align
def _make_shard_state(self, batch, output, range_, attns=None):
shard_state = {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1], :, 0],
}
if self.lambda_coverage != 0.0:
coverage = attns.get("coverage", None)
std = attns.get("std", None)
assert attns is not None
assert std is not None, "lambda_coverage != 0.0 requires " \
"attention mechanism"
assert coverage is not None, "lambda_coverage != 0.0 requires " \
"coverage attention"
shard_state.update({
"std_attn": attns.get("std"),
"coverage_attn": coverage
})
if self.lambda_align != 0.0:
# attn_align should be in (batch_size, pad_tgt_size, pad_src_size)
attn_align = attns.get("align", None)
# align_idx should be a Tensor in size([N, 3]), N is total number
# of align src-tgt pair in current batch, each as
# ['sent_N°_in_batch', 'tgt_id+1', 'src_id'] (check AlignField)
align_idx = batch.align
assert attns is not None
assert attn_align is not None, "lambda_align != 0.0 requires " \
"alignement attention head"
assert align_idx is not None, "lambda_align != 0.0 requires " \
"provide guided alignement"
pad_tgt_size, batch_size, _ = batch.tgt.size()
pad_src_size = batch.src[0].size(0)
align_matrix_size = [batch_size, pad_tgt_size, pad_src_size]
ref_align = onmt.utils.make_batch_align_matrix(
align_idx, align_matrix_size, normalize=True)
# NOTE: tgt-src ref alignement that in range_ of shard
# (coherent with batch.tgt)
shard_state.update({
"align_head": attn_align,
"ref_align": ref_align[:, range_[0] + 1: range_[1], :]
})
return shard_state
def _compute_loss(self, batch, output, target, std_attn=None,
coverage_attn=None, align_head=None, ref_align=None):
bottled_output = self._bottle(output)
scores = self.generator(bottled_output)
gtruth = target.view(-1)
loss = self.criterion(scores, gtruth)
if self.lambda_coverage != 0.0:
coverage_loss = self._compute_coverage_loss(
std_attn=std_attn, coverage_attn=coverage_attn)
loss += coverage_loss
if self.lambda_align != 0.0:
if align_head.dtype != loss.dtype: # Fix FP16
align_head = align_head.to(loss.dtype)
if ref_align.dtype != loss.dtype:
ref_align = ref_align.to(loss.dtype)
align_loss = self._compute_alignement_loss(
align_head=align_head, ref_align=ref_align)
loss += align_loss
stats = self._stats(loss.clone(), scores, gtruth)
return loss, stats
def _compute_coverage_loss(self, std_attn, coverage_attn):
covloss = torch.min(std_attn, coverage_attn).sum()
covloss *= self.lambda_coverage
return covloss
def _compute_alignement_loss(self, align_head, ref_align):
"""Compute loss between 2 partial alignment matrix."""
# align_head contains value in [0, 1) presenting attn prob,
# 0 was resulted by the context attention src_pad_mask
# So, the correspand position in ref_align should also be 0
# Therefore, clip align_head to > 1e-18 should be bias free.
align_loss = -align_head.clamp(min=1e-18).log().mul(ref_align).sum()
align_loss *= self.lambda_align
return align_loss
def filter_shard_state(state, shard_size=None):
for k, v in state.items():
if shard_size is None:
yield k, v
if v is not None:
v_split = []
if isinstance(v, torch.Tensor):
for v_chunk in torch.split(v, shard_size):
v_chunk = v_chunk.data.clone()
v_chunk.requires_grad = v.requires_grad
v_split.append(v_chunk)
yield k, (v, v_split)
def shards(state, shard_size, eval_only=False):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval_only: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval_only:
yield filter_shard_state(state)
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state, shard_size))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, [v_chunk for v_chunk in v_split])
for k, (_, v_split) in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = []
for k, (v, v_split) in non_none.items():
if isinstance(v, torch.Tensor) and state[k].requires_grad:
variables.extend(zip(torch.split(state[k], shard_size),
[v_chunk.grad for v_chunk in v_split]))
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads)
| 15,317 | 39.099476 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/misc.py
|
# -*- coding: utf-8 -*-
import torch
import random
import inspect
from itertools import islice, repeat
import os
def split_corpus(path, shard_size, default=None):
"""yield a `list` containing `shard_size` line of `path`,
or repeatly generate `default` if `path` is None.
"""
if path is not None:
return _split_corpus(path, shard_size)
else:
return repeat(default)
def _split_corpus(path, shard_size):
"""Yield a `list` containing `shard_size` line of `path`.
"""
with open(path, "rb") as f:
if shard_size <= 0:
yield f.readlines()
else:
while True:
shard = list(islice(f, shard_size))
if not shard:
break
yield shard
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments), \
"Not all arguments have the same value: " + str(args)
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return (torch.arange(0, max_len, device=lengths.device)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1)))
def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x
def use_gpu(opt):
"""
Creates a boolean if gpu used
"""
return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \
(hasattr(opt, 'gpu') and opt.gpu > -1)
def set_random_seed(seed, is_cuda):
"""Sets the random seed."""
if seed > 0:
torch.manual_seed(seed)
# this one is needed for torchtext random call (shuffled iterator)
# in multi gpu it ensures datasets are read in the same order
random.seed(seed)
# some cudnn methods can be random even after fixing the seed
# unless you tell it to be deterministic
torch.backends.cudnn.deterministic = True
if is_cuda and seed > 0:
# These ensure same initialization in multi gpu mode
torch.cuda.manual_seed(seed)
def generate_relative_positions_matrix(length, max_relative_positions,
cache=False):
"""Generate the clipped relative positions matrix
for a given length and maximum relative positions"""
if cache:
distance_mat = torch.arange(-length+1, 1, 1).unsqueeze(0)
else:
range_vec = torch.arange(length)
range_mat = range_vec.unsqueeze(-1).expand(-1, length).transpose(0, 1)
distance_mat = range_mat - range_mat.transpose(0, 1)
distance_mat_clipped = torch.clamp(distance_mat,
min=-max_relative_positions,
max=max_relative_positions)
# Shift values to be >= 0
final_mat = distance_mat_clipped + max_relative_positions
return final_mat
def relative_matmul(x, z, transpose):
"""Helper function for relative positions attention."""
batch_size = x.shape[0]
heads = x.shape[1]
length = x.shape[2]
x_t = x.permute(2, 0, 1, 3)
x_t_r = x_t.reshape(length, heads * batch_size, -1)
if transpose:
z_t = z.transpose(1, 2)
x_tz_matmul = torch.matmul(x_t_r, z_t)
else:
x_tz_matmul = torch.matmul(x_t_r, z)
x_tz_matmul_r = x_tz_matmul.reshape(length, batch_size, heads, -1)
x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)
return x_tz_matmul_r_t
def fn_args(fun):
"""Returns the list of function arguments name."""
return inspect.getfullargspec(fun).args
def nwise(iterable, n=2):
iterables = tee(iterable, n)
[next(iterables[i]) for i in range(n) for j in range(i)]
return zip(*iterables)
def report_matrix(row_label, column_label, matrix):
header_format = "{:>10.10} " + "{:>10.7} " * len(row_label)
row_format = "{:>10.10} " + "{:>10.7f} " * len(row_label)
output = header_format.format("", *row_label) + '\n'
for word, row in zip(column_label, matrix):
max_index = row.index(max(row))
row_format = row_format.replace(
"{:>10.7f} ", "{:*>10.7f} ", max_index + 1)
row_format = row_format.replace(
"{:*>10.7f} ", "{:>10.7f} ", max_index)
output += row_format.format(word, *row) + '\n'
row_format = "{:>10.10} " + "{:>10.7f} " * len(row_label)
return output
def check_model_config(model_config, root):
# we need to check the model path + any tokenizer path
for model in model_config["models"]:
model_path = os.path.join(root, model)
if not os.path.exists(model_path):
raise FileNotFoundError(
"{} from model {} does not exist".format(
model_path, model_config["id"]))
if "tokenizer" in model_config.keys():
if "params" in model_config["tokenizer"].keys():
for k, v in model_config["tokenizer"]["params"].items():
if k.endswith("path"):
tok_path = os.path.join(root, v)
if not os.path.exists(tok_path):
raise FileNotFoundError(
"{} from model {} does not exist".format(
tok_path, model_config["id"]))
| 5,885 | 31.7 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/report_manager.py
|
""" Report manager utility """
from __future__ import print_function
import time
from datetime import datetime
import onmt
from onmt.utils.logging import logger
def build_report_manager(opt, gpu_rank):
if opt.tensorboard and gpu_rank == 0:
from torch.utils.tensorboard import SummaryWriter
tensorboard_log_dir = opt.tensorboard_log_dir
if not opt.train_from:
tensorboard_log_dir += datetime.now().strftime("/%b-%d_%H-%M-%S")
writer = SummaryWriter(tensorboard_log_dir, comment="Unmt")
else:
writer = None
report_mgr = ReportMgr(opt.report_every, start_time=-1,
tensorboard_writer=writer)
return report_mgr
class ReportMgrBase(object):
"""
Report Manager Base class
Inherited classes should override:
* `_report_training`
* `_report_step`
"""
def __init__(self, report_every, start_time=-1.):
"""
Args:
report_every(int): Report status every this many sentences
start_time(float): manually set report start time. Negative values
means that you will need to set it later or use `start()`
"""
self.report_every = report_every
self.start_time = start_time
def start(self):
self.start_time = time.time()
def log(self, *args, **kwargs):
logger.info(*args, **kwargs)
def report_training(self, step, num_steps, learning_rate,
report_stats, multigpu=False):
"""
This is the user-defined batch-level traing progress
report function.
Args:
step(int): current step count.
num_steps(int): total number of batches.
learning_rate(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance.
"""
if self.start_time < 0:
raise ValueError("""ReportMgr needs to be started
(set 'start_time' or use 'start()'""")
if step % self.report_every == 0:
if multigpu:
report_stats = \
onmt.utils.Statistics.all_gather_stats(report_stats)
self._report_training(
step, num_steps, learning_rate, report_stats)
return onmt.utils.Statistics()
else:
return report_stats
def _report_training(self, *args, **kwargs):
""" To be overridden """
raise NotImplementedError()
def report_step(self, lr, step, train_stats=None, valid_stats=None):
"""
Report stats of a step
Args:
train_stats(Statistics): training stats
valid_stats(Statistics): validation stats
lr(float): current learning rate
"""
self._report_step(
lr, step, train_stats=train_stats, valid_stats=valid_stats)
def _report_step(self, *args, **kwargs):
raise NotImplementedError()
class ReportMgr(ReportMgrBase):
def __init__(self, report_every, start_time=-1., tensorboard_writer=None):
"""
A report manager that writes statistics on standard output as well as
(optionally) TensorBoard
Args:
report_every(int): Report status every this many sentences
tensorboard_writer(:obj:`tensorboard.SummaryWriter`):
The TensorBoard Summary writer to use or None
"""
super(ReportMgr, self).__init__(report_every, start_time)
self.tensorboard_writer = tensorboard_writer
def maybe_log_tensorboard(self, stats, prefix, learning_rate, step):
if self.tensorboard_writer is not None:
stats.log_tensorboard(
prefix, self.tensorboard_writer, learning_rate, step)
def _report_training(self, step, num_steps, learning_rate,
report_stats):
"""
See base class method `ReportMgrBase.report_training`.
"""
report_stats.output(step, num_steps,
learning_rate, self.start_time)
self.maybe_log_tensorboard(report_stats,
"progress",
learning_rate,
step)
report_stats = onmt.utils.Statistics()
return report_stats
def _report_step(self, lr, step, train_stats=None, valid_stats=None):
"""
See base class method `ReportMgrBase.report_step`.
"""
if train_stats is not None:
self.log('Train perplexity: %g' % train_stats.ppl())
self.log('Train accuracy: %g' % train_stats.accuracy())
self.maybe_log_tensorboard(train_stats,
"train",
lr,
step)
if valid_stats is not None:
self.log('Validation perplexity: %g' % valid_stats.ppl())
self.log('Validation accuracy: %g' % valid_stats.accuracy())
self.maybe_log_tensorboard(valid_stats,
"valid",
lr,
step)
| 5,323 | 33.128205 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/cnn_factory.py
|
"""
Implementation of "Convolutional Sequence to Sequence Learning"
"""
import torch
import torch.nn as nn
import torch.nn.init as init
import onmt.modules
SCALE_WEIGHT = 0.5 ** 0.5
def shape_transform(x):
""" Tranform the size of the tensors to fit for conv input. """
return torch.unsqueeze(torch.transpose(x, 1, 2), 3)
class GatedConv(nn.Module):
""" Gated convolution for CNN class """
def __init__(self, input_size, width=3, dropout=0.2, nopad=False):
super(GatedConv, self).__init__()
self.conv = onmt.modules.WeightNormConv2d(
input_size, 2 * input_size, kernel_size=(width, 1), stride=(1, 1),
padding=(width // 2 * (1 - nopad), 0))
init.xavier_uniform_(self.conv.weight, gain=(4 * (1 - dropout))**0.5)
self.dropout = nn.Dropout(dropout)
def forward(self, x_var):
x_var = self.dropout(x_var)
x_var = self.conv(x_var)
out, gate = x_var.split(int(x_var.size(1) / 2), 1)
out = out * torch.sigmoid(gate)
return out
class StackedCNN(nn.Module):
""" Stacked CNN class """
def __init__(self, num_layers, input_size, cnn_kernel_width=3,
dropout=0.2):
super(StackedCNN, self).__init__()
self.dropout = dropout
self.num_layers = num_layers
self.layers = nn.ModuleList()
for _ in range(num_layers):
self.layers.append(
GatedConv(input_size, cnn_kernel_width, dropout))
def forward(self, x):
for conv in self.layers:
x = x + conv(x)
x *= SCALE_WEIGHT
return x
| 1,620 | 28.472727 | 78 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/statistics.py
|
""" Statistics calculation utility """
from __future__ import division
import time
import math
import sys
from onmt.utils.logging import logger
class Statistics(object):
"""
Accumulator for loss statistics.
Currently calculates:
* accuracy
* perplexity
* elapsed time
"""
def __init__(self, loss=0, n_words=0, n_correct=0):
self.loss = loss
self.n_words = n_words
self.n_correct = n_correct
self.n_src_words = 0
self.start_time = time.time()
@staticmethod
def all_gather_stats(stat, max_size=4096):
"""
Gather a `Statistics` object accross multiple process/nodes
Args:
stat(:obj:Statistics): the statistics object to gather
accross all processes/nodes
max_size(int): max buffer size to use
Returns:
`Statistics`, the update stats object
"""
stats = Statistics.all_gather_stats_list([stat], max_size=max_size)
return stats[0]
@staticmethod
def all_gather_stats_list(stat_list, max_size=4096):
"""
Gather a `Statistics` list accross all processes/nodes
Args:
stat_list(list([`Statistics`])): list of statistics objects to
gather accross all processes/nodes
max_size(int): max buffer size to use
Returns:
our_stats(list([`Statistics`])): list of updated stats
"""
from torch.distributed import get_rank
from onmt.utils.distributed import all_gather_list
# Get a list of world_size lists with len(stat_list) Statistics objects
all_stats = all_gather_list(stat_list, max_size=max_size)
our_rank = get_rank()
our_stats = all_stats[our_rank]
for other_rank, stats in enumerate(all_stats):
if other_rank == our_rank:
continue
for i, stat in enumerate(stats):
our_stats[i].update(stat, update_n_src_words=True)
return our_stats
def update(self, stat, update_n_src_words=False):
"""
Update statistics by suming values with another `Statistics` object
Args:
stat: another statistic object
update_n_src_words(bool): whether to update (sum) `n_src_words`
or not
"""
self.loss += stat.loss
self.n_words += stat.n_words
self.n_correct += stat.n_correct
if update_n_src_words:
self.n_src_words += stat.n_src_words
def accuracy(self):
""" compute accuracy """
return 100 * (self.n_correct / self.n_words)
def xent(self):
""" compute cross entropy """
return self.loss / self.n_words
def ppl(self):
""" compute perplexity """
return math.exp(min(self.loss / self.n_words, 100))
def elapsed_time(self):
""" compute elapsed time """
return time.time() - self.start_time
def output(self, step, num_steps, learning_rate, start):
"""Write out statistics to stdout.
Args:
step (int): current step
n_batch (int): total batches
start (int): start time of step.
"""
t = self.elapsed_time()
step_fmt = "%2d" % step
if num_steps > 0:
step_fmt = "%s/%5d" % (step_fmt, num_steps)
logger.info(
("Step %s; acc: %6.2f; ppl: %5.2f; xent: %4.2f; " +
"lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec")
% (step_fmt,
self.accuracy(),
self.ppl(),
self.xent(),
learning_rate,
self.n_src_words / (t + 1e-5),
self.n_words / (t + 1e-5),
time.time() - start))
sys.stdout.flush()
def log_tensorboard(self, prefix, writer, learning_rate, step):
""" display statistics to tensorboard """
t = self.elapsed_time()
writer.add_scalar(prefix + "/xent", self.xent(), step)
writer.add_scalar(prefix + "/ppl", self.ppl(), step)
writer.add_scalar(prefix + "/accuracy", self.accuracy(), step)
writer.add_scalar(prefix + "/tgtper", self.n_words / t, step)
writer.add_scalar(prefix + "/lr", learning_rate, step)
| 4,291 | 30.328467 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/distributed.py
|
""" Pytorch Distributed utils
This piece of code was heavily inspired by the equivalent of Fairseq-py
https://github.com/pytorch/fairseq
"""
from __future__ import print_function
import math
import pickle
import torch.distributed
from onmt.utils.logging import logger
def is_master(opt, device_id):
return opt.gpu_ranks[device_id] == 0
def multi_init(opt, device_id):
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip=opt.master_ip,
master_port=opt.master_port)
dist_world_size = opt.world_size
torch.distributed.init_process_group(
backend=opt.gpu_backend, init_method=dist_init_method,
world_size=dist_world_size, rank=opt.gpu_ranks[device_id])
gpu_rank = torch.distributed.get_rank()
if not is_master(opt, device_id):
logger.disabled = True
return gpu_rank
def all_reduce_and_rescale_tensors(tensors, rescale_denom,
buffer_size=10485760):
"""All-reduce and rescale tensors in chunks of the specified size.
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
buffer_size: all-reduce chunk size in bytes
"""
# buffer size in bytes, determine equiv. # of elements based on data type
buffer_t = tensors[0].new(
math.ceil(buffer_size / tensors[0].element_size())).zero_()
buffer = []
def all_reduce_buffer():
# copy tensors into buffer_t
offset = 0
for t in buffer:
numel = t.numel()
buffer_t[offset:offset+numel].copy_(t.view(-1))
offset += numel
# all-reduce and rescale
torch.distributed.all_reduce(buffer_t[:offset])
buffer_t.div_(rescale_denom)
# copy all-reduced buffer back into tensors
offset = 0
for t in buffer:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset+numel])
offset += numel
filled = 0
for t in tensors:
sz = t.numel() * t.element_size()
if sz > buffer_size:
# tensor is bigger than buffer, all-reduce and rescale directly
torch.distributed.all_reduce(t)
t.div_(rescale_denom)
elif filled + sz > buffer_size:
# buffer is full, all-reduce and replace buffer with grad
all_reduce_buffer()
buffer = [t]
filled = sz
else:
# add tensor to buffer
buffer.append(t)
filled += sz
if len(buffer) > 0:
all_reduce_buffer()
def all_gather_list(data, max_size=4096):
"""Gathers arbitrary data from all nodes into a list."""
world_size = torch.distributed.get_world_size()
if not hasattr(all_gather_list, '_in_buffer') or \
max_size != all_gather_list._in_buffer.size():
all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)
all_gather_list._out_buffers = [
torch.cuda.ByteTensor(max_size)
for i in range(world_size)
]
in_buffer = all_gather_list._in_buffer
out_buffers = all_gather_list._out_buffers
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + 2 > max_size:
raise ValueError(
'encoded data exceeds max_size: {}'.format(enc_size + 2))
assert max_size < 255*256
in_buffer[0] = enc_size // 255 # this encoding works for max_size < 65k
in_buffer[1] = enc_size % 255
in_buffer[2:enc_size+2] = torch.ByteTensor(list(enc))
torch.distributed.all_gather(out_buffers, in_buffer.cuda())
results = []
for i in range(world_size):
out_buffer = out_buffers[i]
size = (255 * out_buffer[0].item()) + out_buffer[1].item()
bytes_list = bytes(out_buffer[2:size+2].tolist())
result = pickle.loads(bytes_list)
results.append(result)
return results
| 3,926 | 30.926829 | 77 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/alignment.py
|
# -*- coding: utf-8 -*-
import torch
from itertools import accumulate
def make_batch_align_matrix(index_tensor, size=None, normalize=False):
"""
Convert a sparse index_tensor into a batch of alignment matrix,
with row normalize to the sum of 1 if set normalize.
Args:
index_tensor (LongTensor): ``(N, 3)`` of [batch_id, tgt_id, src_id]
size (List[int]): Size of the sparse tensor.
normalize (bool): if normalize the 2nd dim of resulting tensor.
"""
n_fill, device = index_tensor.size(0), index_tensor.device
value_tensor = torch.ones([n_fill], dtype=torch.float)
dense_tensor = torch.sparse_coo_tensor(
index_tensor.t(), value_tensor, size=size, device=device).to_dense()
if normalize:
row_sum = dense_tensor.sum(-1, keepdim=True) # sum by row(tgt)
# threshold on 1 to avoid div by 0
torch.nn.functional.threshold(row_sum, 1, 1, inplace=True)
dense_tensor.div_(row_sum)
return dense_tensor
def extract_alignment(align_matrix, tgt_mask, src_lens, n_best):
"""
Extract a batched align_matrix into its src indice alignment lists,
with tgt_mask to filter out invalid tgt position as EOS/PAD.
BOS already excluded from tgt_mask in order to match prediction.
Args:
align_matrix (Tensor): ``(B, tgt_len, src_len)``,
attention head normalized by Softmax(dim=-1)
tgt_mask (BoolTensor): ``(B, tgt_len)``, True for EOS, PAD.
src_lens (LongTensor): ``(B,)``, containing valid src length
n_best (int): a value indicating number of parallel translation.
* B: denote flattened batch as B = batch_size * n_best.
Returns:
alignments (List[List[FloatTensor]]): ``(batch_size, n_best,)``,
containing valid alignment matrix for each translation.
"""
batch_size_n_best = align_matrix.size(0)
assert batch_size_n_best % n_best == 0
alignments = [[] for _ in range(batch_size_n_best // n_best)]
# treat alignment matrix one by one as each have different lengths
for i, (am_b, tgt_mask_b, src_len) in enumerate(
zip(align_matrix, tgt_mask, src_lens)):
valid_tgt = ~tgt_mask_b
valid_tgt_len = valid_tgt.sum()
# get valid alignment (sub-matrix from full paded aligment matrix)
am_valid_tgt = am_b.masked_select(valid_tgt.unsqueeze(-1)) \
.view(valid_tgt_len, -1)
valid_alignment = am_valid_tgt[:, :src_len] # only keep valid src
alignments[i // n_best].append(valid_alignment)
return alignments
def build_align_pharaoh(valid_alignment):
"""Convert valid alignment matrix to i-j Pharaoh format.(0 indexed)"""
align_pairs = []
tgt_align_src_id = valid_alignment.argmax(dim=-1)
for tgt_id, src_id in enumerate(tgt_align_src_id.tolist()):
align_pairs.append(str(src_id) + "-" + str(tgt_id))
align_pairs.sort(key=lambda x: int(x.split('-')[-1])) # sort by tgt_id
align_pairs.sort(key=lambda x: int(x.split('-')[0])) # sort by src_id
return align_pairs
def to_word_align(src, tgt, subword_align, mode):
"""Convert subword alignment to word alignment.
Args:
src (string): tokenized sentence in source language.
tgt (string): tokenized sentence in target language.
subword_align (string): align_pharaoh correspond to src-tgt.
mode (string): tokenization mode used by src and tgt,
choose from ["joiner", "spacer"].
Returns:
word_align (string): converted alignments correspand to
detokenized src-tgt.
"""
src, tgt = src.strip().split(), tgt.strip().split()
subword_align = {(int(a), int(b)) for a, b in (x.split("-")
for x in subword_align.split())}
if mode == 'joiner':
src_map = subword_map_by_joiner(src, marker='■')
tgt_map = subword_map_by_joiner(tgt, marker='■')
elif mode == 'spacer':
src_map = subword_map_by_spacer(src, marker='▁')
tgt_map = subword_map_by_spacer(tgt, marker='▁')
else:
raise ValueError("Invalid value for argument mode!")
word_align = list({"{}-{}".format(src_map[a], tgt_map[b])
for a, b in subword_align})
word_align.sort(key=lambda x: int(x.split('-')[-1])) # sort by tgt_id
word_align.sort(key=lambda x: int(x.split('-')[0])) # sort by src_id
return " ".join(word_align)
def subword_map_by_joiner(subwords, marker='■'):
"""Return word id for each subword token (annotate by joiner)."""
flags = [0] * len(subwords)
for i, tok in enumerate(subwords):
if tok.endswith(marker):
flags[i] = 1
if tok.startswith(marker):
assert i >= 1 and flags[i-1] != 1, \
"Sentence `{}` not correct!".format(" ".join(subwords))
flags[i-1] = 1
marker_acc = list(accumulate([0] + flags[:-1]))
word_group = [(i - maker_sofar) for i, maker_sofar
in enumerate(marker_acc)]
return word_group
def subword_map_by_spacer(subwords, marker='▁'):
"""Return word id for each subword token (annotate by spacer)."""
word_group = list(accumulate([int(marker in x) for x in subwords]))
if word_group[0] == 1: # when dummy prefix is set
word_group = [item - 1 for item in word_group]
return word_group
| 5,370 | 39.689394 | 76 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/__init__.py
|
"""Module defining various utilities."""
from onmt.utils.misc import split_corpus, aeq, use_gpu, set_random_seed
from onmt.utils.alignment import make_batch_align_matrix
from onmt.utils.report_manager import ReportMgr, build_report_manager
from onmt.utils.statistics import Statistics
from onmt.utils.optimizers import MultipleOptimizer, \
Optimizer, AdaFactor
from onmt.utils.earlystopping import EarlyStopping, scorers_from_opts
__all__ = ["split_corpus", "aeq", "use_gpu", "set_random_seed", "ReportMgr",
"build_report_manager", "Statistics",
"MultipleOptimizer", "Optimizer", "AdaFactor", "EarlyStopping",
"scorers_from_opts", "make_batch_align_matrix"]
| 696 | 48.785714 | 76 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/earlystopping.py
|
from enum import Enum
from onmt.utils.logging import logger
class PatienceEnum(Enum):
IMPROVING = 0
DECREASING = 1
STOPPED = 2
class Scorer(object):
def __init__(self, best_score, name):
self.best_score = best_score
self.name = name
def is_improving(self, stats):
raise NotImplementedError()
def is_decreasing(self, stats):
raise NotImplementedError()
def update(self, stats):
self.best_score = self._caller(stats)
def __call__(self, stats, **kwargs):
return self._caller(stats)
def _caller(self, stats):
raise NotImplementedError()
class PPLScorer(Scorer):
def __init__(self):
super(PPLScorer, self).__init__(float("inf"), "ppl")
def is_improving(self, stats):
return stats.ppl() < self.best_score
def is_decreasing(self, stats):
return stats.ppl() > self.best_score
def _caller(self, stats):
return stats.ppl()
class AccuracyScorer(Scorer):
def __init__(self):
super(AccuracyScorer, self).__init__(float("-inf"), "acc")
def is_improving(self, stats):
return stats.accuracy() > self.best_score
def is_decreasing(self, stats):
return stats.accuracy() < self.best_score
def _caller(self, stats):
return stats.accuracy()
DEFAULT_SCORERS = [PPLScorer(), AccuracyScorer()]
SCORER_BUILDER = {
"ppl": PPLScorer,
"accuracy": AccuracyScorer
}
def scorers_from_opts(opt):
if opt.early_stopping_criteria is None:
return DEFAULT_SCORERS
else:
scorers = []
for criterion in set(opt.early_stopping_criteria):
assert criterion in SCORER_BUILDER.keys(), \
"Criterion {} not found".format(criterion)
scorers.append(SCORER_BUILDER[criterion]())
return scorers
class EarlyStopping(object):
def __init__(self, tolerance, scorers=DEFAULT_SCORERS):
"""
Callable class to keep track of early stopping.
Args:
tolerance(int): number of validation steps without improving
scorer(fn): list of scorers to validate performance on dev
"""
self.tolerance = tolerance
self.stalled_tolerance = self.tolerance
self.current_tolerance = self.tolerance
self.early_stopping_scorers = scorers
self.status = PatienceEnum.IMPROVING
self.current_step_best = 0
def __call__(self, valid_stats, step):
"""
Update the internal state of early stopping mechanism, whether to
continue training or stop the train procedure.
Checks whether the scores from all pre-chosen scorers improved. If
every metric improve, then the status is switched to improving and the
tolerance is reset. If every metric deteriorate, then the status is
switched to decreasing and the tolerance is also decreased; if the
tolerance reaches 0, then the status is changed to stopped.
Finally, if some improved and others not, then it's considered stalled;
after tolerance number of stalled, the status is switched to stopped.
:param valid_stats: Statistics of dev set
"""
if self.status == PatienceEnum.STOPPED:
# Don't do anything
return
if all([scorer.is_improving(valid_stats) for scorer
in self.early_stopping_scorers]):
self._update_increasing(valid_stats, step)
elif all([scorer.is_decreasing(valid_stats) for scorer
in self.early_stopping_scorers]):
self._update_decreasing()
else:
self._update_stalled()
def _update_stalled(self):
self.stalled_tolerance -= 1
logger.info(
"Stalled patience: {}/{}".format(self.stalled_tolerance,
self.tolerance))
if self.stalled_tolerance == 0:
logger.info(
"Training finished after stalled validations. Early Stop!"
)
self._log_best_step()
self._decreasing_or_stopped_status_update(self.stalled_tolerance)
def _update_increasing(self, valid_stats, step):
self.current_step_best = step
for scorer in self.early_stopping_scorers:
logger.info(
"Model is improving {}: {:g} --> {:g}.".format(
scorer.name, scorer.best_score, scorer(valid_stats))
)
# Update best score of each criteria
scorer.update(valid_stats)
# Reset tolerance
self.current_tolerance = self.tolerance
self.stalled_tolerance = self.tolerance
# Update current status
self.status = PatienceEnum.IMPROVING
def _update_decreasing(self):
# Decrease tolerance
self.current_tolerance -= 1
# Log
logger.info(
"Decreasing patience: {}/{}".format(self.current_tolerance,
self.tolerance)
)
# Log
if self.current_tolerance == 0:
logger.info("Training finished after not improving. Early Stop!")
self._log_best_step()
self._decreasing_or_stopped_status_update(self.current_tolerance)
def _log_best_step(self):
logger.info("Best model found at step {}".format(
self.current_step_best))
def _decreasing_or_stopped_status_update(self, tolerance):
self.status = PatienceEnum.DECREASING \
if tolerance > 0 \
else PatienceEnum.STOPPED
def is_improving(self):
return self.status == PatienceEnum.IMPROVING
def has_stopped(self):
return self.status == PatienceEnum.STOPPED
| 5,794 | 28.717949 | 79 |
py
|
data-to-text-hierarchical
|
data-to-text-hierarchical-master/onmt/utils/parse.py
|
import configargparse as cfargparse
import os
import torch
import onmt.opts as opts
from onmt.utils.logging import logger
class ArgumentParser(cfargparse.ArgumentParser):
def __init__(
self,
config_file_parser_class=cfargparse.YAMLConfigFileParser,
formatter_class=cfargparse.ArgumentDefaultsHelpFormatter,
**kwargs):
super(ArgumentParser, self).__init__(
config_file_parser_class=config_file_parser_class,
formatter_class=formatter_class,
**kwargs)
@classmethod
def defaults(cls, *args):
"""Get default arguments added to a parser by all ``*args``."""
dummy_parser = cls()
for callback in args:
callback(dummy_parser)
defaults = dummy_parser.parse_known_args([])[0]
return defaults
@classmethod
def update_model_opts(cls, model_opt):
if model_opt.word_vec_size > 0:
model_opt.src_word_vec_size = model_opt.word_vec_size
model_opt.tgt_word_vec_size = model_opt.word_vec_size
if model_opt.layers > 0:
model_opt.enc_layers = model_opt.layers
model_opt.dec_layers = model_opt.layers
if model_opt.rnn_size > 0:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
model_opt.brnn = model_opt.encoder_type == "brnn"
if model_opt.copy_attn_type is None:
model_opt.copy_attn_type = model_opt.global_attention
if model_opt.alignment_layer is None:
model_opt.alignment_layer = -2
model_opt.lambda_align = 0.0
model_opt.full_context_alignment = False
@classmethod
def validate_model_opts(cls, model_opt):
assert model_opt.model_type in ["text", "table", "img", "audio", "vec"], \
"Unsupported model type %s" % model_opt.model_type
# this check is here because audio allows the encoder and decoder to
# be different sizes, but other model types do not yet
same_size = model_opt.enc_rnn_size == model_opt.dec_rnn_size
assert model_opt.model_type == 'audio' or same_size, \
"The encoder and decoder rnns must be the same size for now"
assert model_opt.rnn_type != "SRU" or model_opt.gpu_ranks, \
"Using SRU requires -gpu_ranks set."
if model_opt.share_embeddings:
if model_opt.model_type not in ["text", "table"]:
raise AssertionError(
"--share_embeddings requires --model_type text.")
if model_opt.lambda_align > 0.0:
assert model_opt.decoder_type == 'transformer', \
"Only transformer is supported to joint learn alignment."
assert model_opt.alignment_layer < model_opt.dec_layers and \
model_opt.alignment_layer >= -model_opt.dec_layers, \
"N° alignment_layer should be smaller than number of layers."
logger.info("Joint learn alignment at layer [{}] "
"with {} heads in full_context '{}'.".format(
model_opt.alignment_layer,
model_opt.alignment_heads,
model_opt.full_context_alignment))
@classmethod
def ckpt_model_opts(cls, ckpt_opt):
# Load default opt values, then overwrite with the opts in
# the checkpoint. That way, if there are new options added,
# the defaults are used.
opt = cls.defaults(opts.model_opts)
opt.__dict__.update(ckpt_opt.__dict__)
return opt
@classmethod
def validate_train_opts(cls, opt):
if opt.epochs:
raise AssertionError(
"-epochs is deprecated please use -train_steps.")
if opt.truncated_decoder > 0 and max(opt.accum_count) > 1:
raise AssertionError("BPTT is not compatible with -accum > 1")
if opt.gpuid:
raise AssertionError(
"gpuid is deprecated see world_size and gpu_ranks")
if torch.cuda.is_available() and not opt.gpu_ranks:
logger.warn("You have a CUDA device, should run with -gpu_ranks")
if opt.world_size < len(opt.gpu_ranks):
raise AssertionError(
"parameter counts of -gpu_ranks must be less or equal "
"than -world_size.")
if opt.world_size == len(opt.gpu_ranks) and \
min(opt.gpu_ranks) > 0:
raise AssertionError(
"-gpu_ranks should have master(=0) rank "
"unless -world_size is greater than len(gpu_ranks).")
assert len(opt.data_ids) == len(opt.data_weights), \
"Please check -data_ids and -data_weights options!"
assert len(opt.dropout) == len(opt.dropout_steps), \
"Number of dropout values must match accum_steps values"
assert len(opt.attention_dropout) == len(opt.dropout_steps), \
"Number of attention_dropout values must match accum_steps values"
@classmethod
def validate_translate_opts(cls, opt):
if opt.beam_size != 1 and opt.random_sampling_topk != 1:
raise ValueError('Can either do beam search OR random sampling.')
@classmethod
def validate_preprocess_args(cls, opt):
assert opt.max_shard_size == 0, \
"-max_shard_size is deprecated. Please use \
-shard_size (number of examples) instead."
assert opt.shuffle == 0, \
"-shuffle is not implemented. Please shuffle \
your data before pre-processing."
assert len(opt.train_src) == len(opt.train_tgt), \
"Please provide same number of src and tgt train files!"
assert len(opt.train_src) == len(opt.train_ids), \
"Please provide proper -train_ids for your data!"
for file in opt.train_src + opt.train_tgt:
assert os.path.isfile(file), "Please check path of %s" % file
if len(opt.train_align) == 1 and opt.train_align[0] is None:
opt.train_align = [None] * len(opt.train_src)
else:
assert len(opt.train_align) == len(opt.train_src), \
"Please provide same number of word alignment train \
files as src/tgt!"
for file in opt.train_align:
assert os.path.isfile(file), "Please check path of %s" % file
assert not opt.valid_align or os.path.isfile(opt.valid_align), \
"Please check path of your valid alignment file!"
assert not opt.valid_src or os.path.isfile(opt.valid_src), \
"Please check path of your valid src file!"
assert not opt.valid_tgt or os.path.isfile(opt.valid_tgt), \
"Please check path of your valid tgt file!"
assert not opt.src_vocab or os.path.isfile(opt.src_vocab), \
"Please check path of your src vocab!"
assert not opt.tgt_vocab or os.path.isfile(opt.tgt_vocab), \
"Please check path of your tgt vocab!"
| 7,089 | 41.45509 | 82 |
py
|
triton
|
triton-main/.github/workflows/torchinductor/scripts/check_acc.py
|
import csv
import sys
file_path = sys.argv[1]
with open(file_path) as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i == 0:
continue
if row[3] != "pass":
print(f"{row[1]} failed on device {row[0]} with batch size {row[2]}")
| 289 | 23.166667 | 81 |
py
|
triton
|
triton-main/.github/workflows/torchinductor/scripts/check_perf.py
|
import argparse
import csv
from collections import namedtuple
# Create a named tuple for the output of the benchmark
BenchmarkOutput = namedtuple(
'BenchmarkOutput', ['dev', 'name', 'batch_size', 'speedup', 'latency'])
def parse_output(file_path: str) -> dict:
entries = {}
with open(file_path) as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i == 0 or len(row) < 5:
continue
dev = row[0]
name = row[1]
batch_size = row[2]
speedup = float(row[3])
latency = float(row[4])
entries[name] = BenchmarkOutput(
dev, name, batch_size, speedup, latency)
return entries
def compare(baseline: dict, new: dict, threshold: float,
geomean_threshold: float) -> bool:
baseline_geomean = 1.0
new_geomean = 1.0
for key in new:
if key not in baseline:
print(f"New benchmark {key} not found in baseline")
baseline_latency = baseline[key].latency
new_latency = new[key].latency
if new_latency < baseline_latency * (1 - threshold):
print(
f"New benchmark {key} is faster than baseline: {new_latency} vs {baseline_latency}")
elif new_latency > baseline_latency * (1 + threshold):
print(
f"New benchmark {key} is slower than baseline: {new_latency} vs {baseline_latency}")
baseline_geomean *= baseline[key].speedup
new_geomean *= new[key].speedup
baseline_geomean = baseline_geomean ** (1 / len(baseline))
new_geomean = new_geomean ** (1 / len(new))
print(f"Baseline geomean: {baseline_geomean}")
print(f"New geomean: {new_geomean}")
assert new_geomean > baseline_geomean * (1 - geomean_threshold), \
f"New geomean is slower than baseline: {new_geomean} vs {baseline_geomean}"
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--baseline', required=True)
parser.add_argument('--new', required=True)
parser.add_argument('--threshold', type=float, default=0.1)
parser.add_argument('--geomean-threshold', type=float, default=0.02)
args = parser.parse_args()
baseline = parse_output(args.baseline)
new = parse_output(args.new)
compare(baseline, new, args.threshold, args.geomean_threshold)
main()
| 2,374 | 34.984848 | 100 |
py
|
triton
|
triton-main/python/setup.py
|
import os
import platform
import re
import shutil
import subprocess
import sys
import sysconfig
import tarfile
import tempfile
import urllib.request
from pathlib import Path
from typing import NamedTuple
from setuptools import Extension, setup
from setuptools.command.build_ext import build_ext
from setuptools.command.build_py import build_py
# Taken from https://github.com/pytorch/pytorch/blob/master/tools/setup_helpers/env.py
def check_env_flag(name: str, default: str = "") -> bool:
return os.getenv(name, default).upper() in ["ON", "1", "YES", "TRUE", "Y"]
def get_build_type():
if check_env_flag("DEBUG"):
return "Debug"
elif check_env_flag("REL_WITH_DEB_INFO"):
return "RelWithDebInfo"
elif check_env_flag("TRITON_REL_BUILD_WITH_ASSERTS"):
return "TritonRelBuildWithAsserts"
else:
# TODO: change to release when stable enough
return "TritonRelBuildWithAsserts"
def get_codegen_backends():
backends = []
env_prefix = "TRITON_CODEGEN_"
for name, _ in os.environ.items():
if name.startswith(env_prefix) and check_env_flag(name):
assert name.count(env_prefix) <= 1
backends.append(name.replace(env_prefix, '').lower())
return backends
# --- third party packages -----
class Package(NamedTuple):
package: str
name: str
url: str
include_flag: str
lib_flag: str
syspath_var_name: str
# pybind11
def get_pybind11_package_info():
name = "pybind11-2.10.0"
url = "https://github.com/pybind/pybind11/archive/refs/tags/v2.10.0.tar.gz"
return Package("pybind11", name, url, "PYBIND11_INCLUDE_DIR", "", "PYBIND11_SYSPATH")
# llvm
def get_llvm_package_info():
# added statement for Apple Silicon
system = platform.system()
arch = 'x86_64'
if system == "Darwin":
system_suffix = "apple-darwin"
cpu_type = os.popen('sysctl machdep.cpu.brand_string').read()
if "apple" in cpu_type.lower():
arch = 'arm64'
elif system == "Linux":
vglibc = tuple(map(int, platform.libc_ver()[1].split('.')))
vglibc = vglibc[0] * 100 + vglibc[1]
linux_suffix = 'ubuntu-18.04' if vglibc > 217 else 'centos-7'
system_suffix = f"linux-gnu-{linux_suffix}"
else:
return Package("llvm", "LLVM-C.lib", "", "LLVM_INCLUDE_DIRS", "LLVM_LIBRARY_DIR", "LLVM_SYSPATH")
use_assert_enabled_llvm = check_env_flag("TRITON_USE_ASSERT_ENABLED_LLVM", "False")
release_suffix = "assert" if use_assert_enabled_llvm else "release"
name = f'llvm+mlir-17.0.0-{arch}-{system_suffix}-{release_suffix}'
version = "llvm-17.0.0-c5dede880d17"
url = f"https://github.com/ptillet/triton-llvm-releases/releases/download/{version}/{name}.tar.xz"
return Package("llvm", name, url, "LLVM_INCLUDE_DIRS", "LLVM_LIBRARY_DIR", "LLVM_SYSPATH")
def get_thirdparty_packages(triton_cache_path):
packages = [get_pybind11_package_info(), get_llvm_package_info()]
thirdparty_cmake_args = []
for p in packages:
package_root_dir = os.path.join(triton_cache_path, p.package)
package_dir = os.path.join(package_root_dir, p.name)
if p.syspath_var_name in os.environ:
package_dir = os.environ[p.syspath_var_name]
version_file_path = os.path.join(package_dir, "version.txt")
if p.syspath_var_name not in os.environ and\
(not os.path.exists(version_file_path) or Path(version_file_path).read_text() != p.url):
try:
shutil.rmtree(package_root_dir)
except Exception:
pass
os.makedirs(package_root_dir, exist_ok=True)
print(f'downloading and extracting {p.url} ...')
ftpstream = urllib.request.urlopen(p.url)
file = tarfile.open(fileobj=ftpstream, mode="r|*")
file.extractall(path=package_root_dir)
# write version url to package_dir
with open(os.path.join(package_dir, "version.txt"), "w") as f:
f.write(p.url)
if p.include_flag:
thirdparty_cmake_args.append(f"-D{p.include_flag}={package_dir}/include")
if p.lib_flag:
thirdparty_cmake_args.append(f"-D{p.lib_flag}={package_dir}/lib")
return thirdparty_cmake_args
# ---- package data ---
def download_and_copy_ptxas():
base_dir = os.path.dirname(__file__)
src_path = "bin/ptxas"
version = "12.2.91"
url = f"https://conda.anaconda.org/nvidia/label/cuda-12.2.0/linux-64/cuda-nvcc-{version}-0.tar.bz2"
dst_prefix = os.path.join(base_dir, "triton")
dst_suffix = os.path.join("third_party", "cuda", src_path)
dst_path = os.path.join(dst_prefix, dst_suffix)
is_linux = platform.system() == "Linux"
download = False
if is_linux:
download = True
if os.path.exists(dst_path):
curr_version = subprocess.check_output([dst_path, "--version"]).decode("utf-8").strip()
curr_version = re.search(r"V([.|\d]+)", curr_version).group(1)
download = curr_version != version
if download:
print(f'downloading and extracting {url} ...')
ftpstream = urllib.request.urlopen(url)
file = tarfile.open(fileobj=ftpstream, mode="r|*")
with tempfile.TemporaryDirectory() as temp_dir:
file.extractall(path=temp_dir)
src_path = os.path.join(temp_dir, src_path)
os.makedirs(os.path.split(dst_path)[0], exist_ok=True)
shutil.copy(src_path, dst_path)
return dst_suffix
# ---- cmake extension ----
class CMakeBuildPy(build_py):
def run(self) -> None:
self.run_command('build_ext')
return super().run()
class CMakeExtension(Extension):
def __init__(self, name, path, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
self.path = path
class CMakeBuild(build_ext):
user_options = build_ext.user_options + [('base-dir=', None, 'base directory of Triton')]
def initialize_options(self):
build_ext.initialize_options(self)
self.base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def finalize_options(self):
build_ext.finalize_options(self)
def run(self):
try:
out = subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " + ", ".join(e.name for e in self.extensions)
)
match = re.search(r"version\s*(?P<major>\d+)\.(?P<minor>\d+)([\d.]+)?", out.decode())
cmake_major, cmake_minor = int(match.group("major")), int(match.group("minor"))
if (cmake_major, cmake_minor) < (3, 18):
raise RuntimeError("CMake >= 3.18.0 is required")
for ext in self.extensions:
self.build_extension(ext)
def get_cmake_dir(self):
plat_name = sysconfig.get_platform()
python_version = sysconfig.get_python_version()
dir_name = f"cmake.{plat_name}-{sys.implementation.name}-{python_version}"
cmake_dir = Path(self.base_dir) / "python" / "build" / dir_name
cmake_dir.mkdir(parents=True, exist_ok=True)
return cmake_dir
def build_extension(self, ext):
lit_dir = shutil.which('lit')
user_home = os.getenv("HOME") or os.getenv("USERPROFILE") or \
os.getenv("HOMEPATH") or None
if not user_home:
raise RuntimeError("Could not find user home directory")
triton_cache_path = os.path.join(user_home, ".triton")
# lit is used by the test suite
thirdparty_cmake_args = get_thirdparty_packages(triton_cache_path)
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.path)))
# create build directories
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# python directories
python_include_dir = sysconfig.get_path("platinclude")
cmake_args = [
"-DCMAKE_EXPORT_COMPILE_COMMANDS=ON",
"-DLLVM_ENABLE_WERROR=ON",
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir,
"-DTRITON_BUILD_TUTORIALS=OFF",
"-DTRITON_BUILD_PYTHON_MODULE=ON",
"-DPython3_EXECUTABLE:FILEPATH=" + sys.executable,
"-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON",
"-DPYTHON_INCLUDE_DIRS=" + python_include_dir,
]
if lit_dir is not None:
cmake_args.append("-DLLVM_EXTERNAL_LIT=" + lit_dir)
cmake_args.extend(thirdparty_cmake_args)
# configuration
cfg = get_build_type()
build_args = ["--config", cfg]
codegen_backends = get_codegen_backends()
if len(codegen_backends) > 0:
all_codegen_backends = ';'.join(codegen_backends)
cmake_args += ["-DTRITON_CODEGEN_BACKENDS=" + all_codegen_backends]
if platform.system() == "Windows":
cmake_args += [f"-DCMAKE_RUNTIME_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}"]
if sys.maxsize > 2**32:
cmake_args += ["-A", "x64"]
build_args += ["--", "/m"]
else:
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
max_jobs = os.getenv("MAX_JOBS", str(2 * os.cpu_count()))
build_args += ['-j' + max_jobs]
if check_env_flag("TRITON_BUILD_WITH_CLANG_LLD"):
cmake_args += ["-DCMAKE_C_COMPILER=clang",
"-DCMAKE_CXX_COMPILER=clang++",
"-DCMAKE_LINKER=lld",
"-DCMAKE_EXE_LINKER_FLAGS=-fuse-ld=lld",
"-DCMAKE_MODULE_LINKER_FLAGS=-fuse-ld=lld",
"-DCMAKE_SHARED_LINKER_FLAGS=-fuse-ld=lld"]
env = os.environ.copy()
cmake_dir = self.get_cmake_dir()
subprocess.check_call(["cmake", self.base_dir] + cmake_args, cwd=cmake_dir, env=env)
subprocess.check_call(["cmake", "--build", "."] + build_args, cwd=cmake_dir)
download_and_copy_ptxas()
setup(
name="triton",
version="2.1.0",
author="Philippe Tillet",
author_email="[email protected]",
description="A language and compiler for custom Deep Learning operations",
long_description="",
packages=[
"triton",
"triton/_C",
"triton/common",
"triton/compiler",
"triton/interpreter",
"triton/language",
"triton/language/extra",
"triton/ops",
"triton/ops/blocksparse",
"triton/runtime",
"triton/runtime/backends",
"triton/third_party",
"triton/tools",
],
install_requires=[
"filelock",
],
include_package_data=True,
ext_modules=[CMakeExtension("triton", "triton/_C/")],
cmdclass={"build_ext": CMakeBuild, "build_py": CMakeBuildPy},
zip_safe=False,
# for PyPI
keywords=["Compiler", "Deep Learning"],
url="https://github.com/openai/triton/",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
],
test_suite="tests",
extras_require={
"build": [
"cmake>=3.18",
"lit",
],
"tests": [
"autopep8",
"flake8",
"isort",
"numpy",
"pytest",
"scipy>=1.7.1",
],
"tutorials": [
"matplotlib",
"pandas",
"tabulate",
],
},
)
| 11,910 | 34.876506 | 122 |
py
|
triton
|
triton-main/python/tutorials/05-layer-norm.py
|
"""
Layer Normalization
====================
In this tutorial, you will write a high-performance layer normalization
kernel that runs faster than the PyTorch implementation.
In doing so, you will learn about:
* Implementing backward pass in Triton.
* Implementing parallel reduction in Triton.
"""
# %%
# Motivations
# -----------
#
# The *LayerNorm* operator was first introduced in [BA2016]_ as a way to improve the performance
# of sequential models (e.g., Transformers) or neural networks with small batch size.
# It takes a vector :math:`x` as input and produces a vector :math:`y` of the same shape as output.
# The normalization is performed by subtracting the mean and dividing by the standard deviation of :math:`x`.
# After the normalization, a learnable linear transformation with weights :math:`w` and biases :math:`b` is applied.
# The forward pass can be expressed as follows:
#
# .. math::
# y = \frac{ x - \text{E}[x] }{ \sqrt{\text{Var}(x) + \epsilon} } * w + b
#
# where :math:`\epsilon` is a small constant added to the denominator for numerical stability.
# Let’s first take a look at the forward pass implementation.
import torch
import triton
import triton.language as tl
try:
# This is https://github.com/NVIDIA/apex, NOT the apex on PyPi, so it
# should not be added to extras_require in setup.py.
import apex
HAS_APEX = True
except ModuleNotFoundError:
HAS_APEX = False
@triton.jit
def _layer_norm_fwd_fused(
X, # pointer to the input
Y, # pointer to the output
W, # pointer to the weights
B, # pointer to the biases
Mean, # pointer to the mean
Rstd, # pointer to the 1/std
stride, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
eps, # epsilon to avoid division by zero
BLOCK_SIZE: tl.constexpr,
):
# Map the program id to the row of X and Y it should compute.
row = tl.program_id(0)
Y += row * stride
X += row * stride
# Compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# Compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
x = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32)
x = tl.where(cols < N, x - mean, 0.)
_var += x * x
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# Write mean / rstd
tl.store(Mean + row, mean)
tl.store(Rstd + row, rstd)
# Normalize and apply linear transformation
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
w = tl.load(W + cols, mask=mask)
b = tl.load(B + cols, mask=mask)
x = tl.load(X + cols, mask=mask, other=0.).to(tl.float32)
x_hat = (x - mean) * rstd
y = x_hat * w + b
# Write output
tl.store(Y + cols, y, mask=mask)
# %%
# Backward pass
# -------------
#
# The backward pass for the layer normalization operator is a bit more involved than the forward pass.
# Let :math:`\hat{x}` be the normalized inputs :math:`\frac{ x - \text{E}[x] }{ \sqrt{\text{Var}(x) + \epsilon} }` before the linear transformation,
# the Vector-Jacobian Products (VJP) :math:`\nabla_{x}` of :math:`x` are given by:
#
# .. math::
# \nabla_{x} = \frac{1}{\sigma}\Big( \nabla_{y} \odot w - \underbrace{ \big( \frac{1}{N} \hat{x} \cdot (\nabla_{y} \odot w) \big) }_{c_1} \odot \hat{x} - \underbrace{ \frac{1}{N} \nabla_{y} \cdot w }_{c_2} \Big)
#
# where :math:`\odot` denotes the element-wise multiplication, :math:`\cdot` denotes the dot product, and :math:`\sigma` is the standard deviation.
# :math:`c_1` and :math:`c_2` are intermediate constants that improve the readability of the following implementation.
#
# For the weights :math:`w` and biases :math:`b`, the VJPs :math:`\nabla_{w}` and :math:`\nabla_{b}` are more straightforward:
#
# .. math::
# \nabla_{w} = \nabla_{y} \odot \hat{x} \quad \text{and} \quad \nabla_{b} = \nabla_{y}
#
# Since the same weights :math:`w` and biases :math:`b` are used for all rows in the same batch, their gradients need to sum up.
# To perform this step efficiently, we use a parallel reduction strategy: each kernel instance accumulates
# partial :math:`\nabla_{w}` and :math:`\nabla_{b}` across certain rows into one of :math:`\text{GROUP_SIZE_M}` independent buffers.
# These buffers stay in the L2 cache and then are further reduced by another function to compute the actual :math:`\nabla_{w}` and :math:`\nabla_{b}`.
#
# Let the number of input rows :math:`M = 4` and :math:`\text{GROUP_SIZE_M} = 2`,
# here's a diagram of the parallel reduction strategy for :math:`\nabla_{w}` (:math:`\nabla_{b}` is omitted for brevity):
#
# .. image:: parallel_reduction.png
#
# In Stage 1, the rows of X that have the same color share the same buffer and thus a lock is used to ensure that only one kernel instance writes to the buffer at a time.
# In Stage 2, the buffers are further reduced to compute the final :math:`\nabla_{w}` and :math:`\nabla_{b}`.
# In the following implementation, Stage 1 is implemented by the function :code:`_layer_norm_bwd_dx_fused` and Stage 2 is implemented by the function :code:`_layer_norm_bwd_dwdb`.
@triton.jit
def _layer_norm_bwd_dx_fused(
DX, # pointer to the input gradient
DY, # pointer to the output gradient
DW, # pointer to the partial sum of weights gradient
DB, # pointer to the partial sum of biases gradient
X, # pointer to the input
W, # pointer to the weights
B, # pointer to the biases
Mean, # pointer to the mean
Rstd, # pointer to the 1/std
Lock, # pointer to the lock
stride, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
eps, # epsilon to avoid division by zero
GROUP_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr
):
# Map the program id to the elements of X, DX, and DY it should compute.
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
mask = cols < N
X += row * stride
DY += row * stride
DX += row * stride
# Offset locks and weights/biases gradient pointer for parallel reduction
lock_id = row % GROUP_SIZE_M
Lock += lock_id
Count = Lock + GROUP_SIZE_M
DW = DW + lock_id * N + cols
DB = DB + lock_id * N + cols
# Load data to SRAM
x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
w = tl.load(W + cols, mask=mask).to(tl.float32)
mean = tl.load(Mean + row)
rstd = tl.load(Rstd + row)
# Compute dx
xhat = (x - mean) * rstd
wdy = w * dy
xhat = tl.where(mask, xhat, 0.)
wdy = tl.where(mask, wdy, 0.)
c1 = tl.sum(xhat * wdy, axis=0) / N
c2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * c1 + c2)) * rstd
# Write dx
tl.store(DX + cols, dx, mask=mask)
# Accumulate partial sums for dw/db
partial_dw = (dy * xhat).to(w.dtype)
partial_db = (dy).to(w.dtype)
while tl.atomic_cas(Lock, 0, 1) == 1:
pass
count = tl.load(Count)
# First store doesn't accumulate
if count == 0:
tl.atomic_xchg(Count, 1)
else:
partial_dw += tl.load(DW, mask=mask)
partial_db += tl.load(DB, mask=mask)
tl.store(DW, partial_dw, mask=mask)
tl.store(DB, partial_db, mask=mask)
# Release the lock
tl.atomic_xchg(Lock, 0)
@triton.jit
def _layer_norm_bwd_dwdb(
DW, # pointer to the partial sum of weights gradient
DB, # pointer to the partial sum of biases gradient
FINAL_DW, # pointer to the weights gradient
FINAL_DB, # pointer to the biases gradient
M, # GROUP_SIZE_M
N, # number of columns
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr
):
# Map the program id to the elements of DW and DB it should compute.
pid = tl.program_id(0)
cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
db = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
# Iterate through the rows of DW and DB to sum the partial sums.
for i in range(0, M, BLOCK_SIZE_M):
rows = i + tl.arange(0, BLOCK_SIZE_M)
mask = (rows[:, None] < M) & (cols[None, :] < N)
offs = rows[:, None] * N + cols[None, :]
dw += tl.load(DW + offs, mask=mask, other=0.)
db += tl.load(DB + offs, mask=mask, other=0.)
# Write the final sum to the output.
sum_dw = tl.sum(dw, axis=0)
sum_db = tl.sum(db, axis=0)
tl.store(FINAL_DW + cols, sum_dw, mask=cols < N)
tl.store(FINAL_DB + cols, sum_db, mask=cols < N)
# %%
# Benchmark
# ---------
#
# We can now compare the performance of our kernel against that of PyTorch.
# Here we focus on inputs that have Less than 64KB per feature.
# Specifically, one can set :code:`'mode': 'backward'` to benchmark the backward pass.
class LayerNorm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, normalized_shape, weight, bias, eps):
# allocate output
y = torch.empty_like(x)
# reshape input data into 2D tensor
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
mean = torch.empty((M, ), dtype=torch.float32, device='cuda')
rstd = torch.empty((M, ), dtype=torch.float32, device='cuda')
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_SIZE:
raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
# enqueue kernel
_layer_norm_fwd_fused[(M,)](x_arg, y, weight, bias, mean, rstd,
x_arg.stride(0), N, eps,
BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps)
ctx.save_for_backward(x, weight, bias, mean, rstd)
ctx.BLOCK_SIZE = BLOCK_SIZE
ctx.num_warps = num_warps
ctx.eps = eps
return y
@staticmethod
def backward(ctx, dy):
x, w, b, m, v = ctx.saved_tensors
# heuristics for amount of parallel reduction stream for DW/DB
N = w.shape[0]
GROUP_SIZE_M = 64
if N <= 8192: GROUP_SIZE_M = 96
if N <= 4096: GROUP_SIZE_M = 128
if N <= 1024: GROUP_SIZE_M = 256
# allocate output
locks = torch.zeros(2 * GROUP_SIZE_M, dtype=torch.int32, device='cuda')
_dw = torch.empty((GROUP_SIZE_M, w.shape[0]), dtype=x.dtype, device=w.device)
_db = torch.empty((GROUP_SIZE_M, w.shape[0]), dtype=x.dtype, device=w.device)
dw = torch.empty((w.shape[0],), dtype=w.dtype, device=w.device)
db = torch.empty((w.shape[0],), dtype=w.dtype, device=w.device)
dx = torch.empty_like(dy)
# enqueue kernel using forward pass heuristics
# also compute partial sums for DW and DB
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
_layer_norm_bwd_dx_fused[(M,)](dx, dy, _dw, _db, x, w, b, m, v, locks,
x_arg.stride(0), N, ctx.eps,
BLOCK_SIZE_N=ctx.BLOCK_SIZE,
GROUP_SIZE_M=GROUP_SIZE_M,
num_warps=ctx.num_warps)
grid = lambda meta: [triton.cdiv(N, meta['BLOCK_SIZE_N'])]
# accumulate partial sums in separate kernel
_layer_norm_bwd_dwdb[grid](_dw, _db, dw, db, GROUP_SIZE_M, N,
BLOCK_SIZE_M=32,
BLOCK_SIZE_N=128)
return dx, None, dw, db, None
layer_norm = LayerNorm.apply
def test_layer_norm(M, N, dtype, eps=1e-5, device='cuda'):
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=True)
bias = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device='cuda')
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
# forward pass
y_tri = layer_norm(x, w_shape, weight, bias, eps)
y_ref = torch.nn.functional.layer_norm(x, w_shape, weight, bias, eps).to(dtype)
# backward pass (triton)
y_tri.backward(dy, retain_graph=True)
dx_tri, dw_tri, db_tri = [_.grad.clone() for _ in [x, weight, bias]]
x.grad, weight.grad, bias.grad = None, None, None
# backward pass (torch)
y_ref.backward(dy, retain_graph=True)
dx_ref, dw_ref, db_ref = [_.grad.clone() for _ in [x, weight, bias]]
# compare
assert torch.allclose(y_tri, y_ref, atol=1e-2, rtol=0)
assert torch.allclose(dx_tri, dx_ref, atol=1e-2, rtol=0)
assert torch.allclose(db_tri, db_ref, atol=1e-2, rtol=0)
assert torch.allclose(dw_tri, dw_ref, atol=1e-2, rtol=0)
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['N'],
x_vals=[512 * i for i in range(2, 32)],
line_arg='provider',
line_vals=['triton', 'torch'] + (['apex'] if HAS_APEX else []),
line_names=['Triton', 'Torch'] + (['Apex'] if HAS_APEX else []),
styles=[('blue', '-'), ('green', '-'), ('orange', '-')],
ylabel='GB/s',
plot_name='layer-norm-backward',
args={'M': 4096, 'dtype': torch.float16, 'mode': 'backward'}
)
)
def bench_layer_norm(M, N, dtype, provider, mode='backward', eps=1e-5, device='cuda'):
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=True)
bias = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device='cuda')
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
quantiles = [0.5, 0.2, 0.8]
# utility functions
if provider == 'triton':
y_fwd = lambda: layer_norm(x, w_shape, weight, bias, eps)
if provider == 'torch':
y_fwd = lambda: torch.nn.functional.layer_norm(x, w_shape, weight, bias, eps)
if provider == 'apex':
apex_layer_norm = apex.normalization.FusedLayerNorm(w_shape).to(x.device).to(x.dtype)
y_fwd = lambda: apex_layer_norm(x)
# forward pass
if mode == 'forward':
gbps = lambda ms: 2 * x.numel() * x.element_size() / ms * 1e-6
ms, min_ms, max_ms = triton.testing.do_bench(y_fwd, quantiles=quantiles, rep=500)
# backward pass
if mode == 'backward':
gbps = lambda ms: 3 * x.numel() * x.element_size() / ms * 1e-6
y = y_fwd()
ms, min_ms, max_ms = triton.testing.do_bench(lambda: y.backward(dy, retain_graph=True),
quantiles=quantiles, grad_to_none=[x], rep=500)
return gbps(ms), gbps(max_ms), gbps(min_ms)
test_layer_norm(1151, 8192, torch.float16)
bench_layer_norm.run(save_path='.', print_data=True)
# %%
# References
# ----------
#
# .. [BA2016] Jimmy Lei Ba and Jamie Ryan Kiros and Geoffrey E. Hinton, "Layer Normalization", Arxiv 2016
| 15,483 | 40.290667 | 214 |
py
|
triton
|
triton-main/python/tutorials/02-fused-softmax.py
|
"""
Fused Softmax
=============
In this tutorial, you will write a fused softmax operation that is significantly faster
than PyTorch's native op for a particular class of matrices: those whose rows can fit in
the GPU's SRAM.
In doing so, you will learn about:
* The benefits of kernel fusion for bandwidth-bound operations.
* Reduction operators in Triton.
"""
# %%
# Motivations
# -----------
#
# Custom GPU kernels for elementwise additions are educationally valuable but won't get you very far in practice.
# Let us consider instead the case of a simple (numerically stabilized) softmax operation:
import torch
import triton
import triton.language as tl
@torch.jit.script
def naive_softmax(x):
"""Compute row-wise softmax of X using native pytorch
We subtract the maximum element in order to avoid overflows. Softmax is invariant to
this shift.
"""
# read MN elements ; write M elements
x_max = x.max(dim=1)[0]
# read MN + M elements ; write MN elements
z = x - x_max[:, None]
# read MN elements ; write MN elements
numerator = torch.exp(z)
# read MN elements ; write M elements
denominator = numerator.sum(dim=1)
# read MN + M elements ; write MN elements
ret = numerator / denominator[:, None]
# in total: read 5MN + 2M elements ; wrote 3MN + 2M elements
return ret
# %%
# When implemented naively in PyTorch, computing :code:`y = naive_softmax(x)` for :math:`x \in R^{M \times N}`
# requires reading :math:`5MN + 2M` elements from DRAM and writing back :math:`3MN + 2M` elements.
# This is obviously wasteful; we'd prefer to have a custom "fused" kernel that only reads
# X once and does all the necessary computations on-chip.
# Doing so would require reading and writing back only :math:`MN` bytes, so we could
# expect a theoretical speed-up of ~4x (i.e., :math:`(8MN + 4M) / 2MN`).
# The `torch.jit.script` flags aims to perform this kind of "kernel fusion" automatically
# but, as we will see later, it is still far from ideal.
# %%
# Compute Kernel
# --------------
#
# Our softmax kernel works as follows: each program loads a row of the input matrix X,
# normalizes it and writes back the result to the output Y.
#
# Note that one important limitation of Triton is that each block must have a
# power-of-two number of elements, so we need to internally "pad" each row and guard the
# memory operations properly if we want to handle any possible input shapes:
@triton.jit
def softmax_kernel(
output_ptr, input_ptr, input_row_stride, output_row_stride, n_cols,
BLOCK_SIZE: tl.constexpr
):
# The rows of the softmax are independent, so we parallelize across those
row_idx = tl.program_id(0)
# The stride represents how much we need to increase the pointer to advance 1 row
row_start_ptr = input_ptr + row_idx * input_row_stride
# The block size is the next power of two greater than n_cols, so we can fit each
# row in a single block
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + col_offsets
# Load the row into SRAM, using a mask since BLOCK_SIZE may be > than n_cols
row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float('inf'))
# Subtract maximum for numerical stability
row_minus_max = row - tl.max(row, axis=0)
# Note that exponentiation in Triton is fast but approximate (i.e., think __expf in CUDA)
numerator = tl.exp(row_minus_max)
denominator = tl.sum(numerator, axis=0)
softmax_output = numerator / denominator
# Write back output to DRAM
output_row_start_ptr = output_ptr + row_idx * output_row_stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, softmax_output, mask=col_offsets < n_cols)
# %%
# We can create a helper function that enqueues the kernel and its (meta-)arguments for any given input tensor.
def softmax(x):
n_rows, n_cols = x.shape
# The block size is the smallest power of two greater than the number of columns in `x`
BLOCK_SIZE = triton.next_power_of_2(n_cols)
# Another trick we can use is to ask the compiler to use more threads per row by
# increasing the number of warps (`num_warps`) over which each row is distributed.
# You will see in the next tutorial how to auto-tune this value in a more natural
# way so you don't have to come up with manual heuristics yourself.
num_warps = 4
if BLOCK_SIZE >= 2048:
num_warps = 8
if BLOCK_SIZE >= 4096:
num_warps = 16
# Allocate output
y = torch.empty_like(x)
# Enqueue kernel. The 1D launch grid is simple: we have one kernel instance per row o
# f the input matrix
softmax_kernel[(n_rows,)](
y,
x,
x.stride(0),
y.stride(0),
n_cols,
num_warps=num_warps,
BLOCK_SIZE=BLOCK_SIZE,
)
return y
# %%
# Unit Test
# ---------
# %%
# We make sure that we test our kernel on a matrix with an irregular number of rows and columns.
# This will allow us to verify that our padding mechanism works.
torch.manual_seed(0)
x = torch.randn(1823, 781, device='cuda')
y_triton = softmax(x)
y_torch = torch.softmax(x, axis=1)
assert torch.allclose(y_triton, y_torch), (y_triton, y_torch)
# %%
# As expected, the results are identical.
# %%
# Benchmark
# ---------
#
# Here we will benchmark our operation as a function of the number of columns in the input matrix -- assuming 4096 rows.
# We will then compare its performance against (1) :code:`torch.softmax` and (2) the :code:`naive_softmax` defined above.
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['N'], # argument names to use as an x-axis for the plot
x_vals=[
128 * i for i in range(2, 100)
], # different possible values for `x_name`
line_arg='provider', # argument name whose value corresponds to a different line in the plot
line_vals=[
'triton',
'torch-native',
'torch-jit',
], # possible values for `line_arg``
line_names=[
"Triton",
"Torch (native)",
"Torch (jit)",
], # label name for the lines
styles=[('blue', '-'), ('green', '-'), ('green', '--')], # line styles
ylabel="GB/s", # label name for the y-axis
plot_name="softmax-performance", # name for the plot. Used also as a file name for saving the plot.
args={'M': 4096}, # values for function arguments not in `x_names` and `y_name`
)
)
def benchmark(M, N, provider):
x = torch.randn(M, N, device='cuda', dtype=torch.float32)
quantiles = [0.5, 0.2, 0.8]
if provider == 'torch-native':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.softmax(x, axis=-1), quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: softmax(x), quantiles=quantiles)
if provider == 'torch-jit':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: naive_softmax(x), quantiles=quantiles)
gbps = lambda ms: 2 * x.nelement() * x.element_size() * 1e-9 / (ms * 1e-3)
return gbps(ms), gbps(max_ms), gbps(min_ms)
benchmark.run(show_plots=True, print_data=True)
# %%
# In the above plot, we can see that:
# - Triton is 4x faster than the Torch JIT. This confirms our suspicions that the Torch JIT does not do any fusion here.
# - Triton is noticeably faster than :code:`torch.softmax` -- in addition to being **easier to read, understand and maintain**.
# Note however that the PyTorch `softmax` operation is more general and will work on tensors of any shape.
| 7,632 | 36.975124 | 128 |
py
|
triton
|
triton-main/python/tutorials/06-fused-attention.py
|
"""
Fused Attention
===============
This is a Triton implementation of the Flash Attention v2 algorithm from Tri Dao (https://tridao.me/publications/flash2/flash2.pdf)
Extra Credits:
- Original flash attention paper (https://arxiv.org/abs/2205.14135)
- Rabe and Staats (https://arxiv.org/pdf/2112.05682v2.pdf)
- Adam P. Goucher for simplified vector math
"""
import pytest
import torch
import triton
import triton.language as tl
@triton.jit
def max_fn(x, y):
return tl.math.max(x, y)
@triton.jit
def _fwd_kernel(
Q, K, V, sm_scale,
L, M,
Out,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on,
Z, H, N_CTX,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
MODE: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
qvk_offset = off_hz * stride_qh
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0)
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(BLOCK_DMODEL, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(BLOCK_DMODEL, BLOCK_N),
order=(0, 1)
)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL),
order=(1, 0)
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0)
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# causal check on every loop iteration can be expensive
# and peeling the last iteration of the loop does not work well with ptxas
# so we have a mode to do the causal check in a separate kernel entirely
if MODE == 0: # entire non-causal attention
lo, hi = 0, N_CTX
if MODE == 1: # entire causal attention
lo, hi = 0, (start_m + 1) * BLOCK_M
if MODE == 2: # off band-diagonal
lo, hi = 0, start_m * BLOCK_M
if MODE == 3: # on band-diagonal
l_ptrs = L + off_hz * N_CTX + offs_m
m_ptrs = M + off_hz * N_CTX + offs_m
m_i = tl.load(m_ptrs)
l_i = tl.load(l_ptrs)
acc += tl.load(O_block_ptr).to(tl.float32)
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
# scale sm_scale by log_2(e) and use
# 2^x instead of exp in the loop because CSE and LICM
# don't work as expected with `exp` in the loop
qk_scale = sm_scale * 1.44269504
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
q = (q * qk_scale).to(tl.float16)
# advance block pointers to first iteration of the loop
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k)
if MODE == 1 or MODE == 3:
qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf"))
# -- compute m_ij, p, l_ij
m_ij = tl.maximum(m_i, tl.max(qk, 1))
p = tl.math.exp2(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i *= alpha
l_i_new = l_i + l_ij
# scale acc
acc_scale = l_i * 0 + alpha
acc = acc * acc_scale[:, None]
# update acc
v = tl.load(V_block_ptr)
p = p.to(tl.float16)
acc += tl.dot(p, v)
# update m_i and l_i
l_i = l_i_new
m_i = m_ij
# update pointers
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
# write back l and m
acc = acc / l_i[:, None]
l_ptrs = L + off_hz * N_CTX + offs_m
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(l_ptrs, l_i)
tl.store(m_ptrs, m_i)
# write back O
tl.store(O_block_ptr, acc.to(tl.float16))
@triton.jit
def _bwd_preprocess(
Out, DO, L,
NewDO, Delta,
BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_n = tl.arange(0, D_HEAD)
# load
o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
denom = tl.load(L + off_m).to(tl.float32)
# compute
do = do / denom[:, None]
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(NewDO + off_m[:, None] * D_HEAD + off_n[None, :], do)
tl.store(Delta + off_m, delta)
@triton.jit
def _bwd_kernel(
Q, K, V, sm_scale, Out, DO,
DQ, DK, DV,
L, M,
D,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn,
Z, H, N_CTX,
num_block,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
MODE: tl.constexpr,
):
off_hz = tl.program_id(0)
off_z = off_hz // H
off_h = off_hz % H
qk_scale = sm_scale * 1.44269504
# offset pointers for batch/head
Q += off_z * stride_qz + off_h * stride_qh
K += off_z * stride_qz + off_h * stride_qh
V += off_z * stride_qz + off_h * stride_qh
DO += off_z * stride_qz + off_h * stride_qh
DQ += off_z * stride_qz + off_h * stride_qh
DK += off_z * stride_qz + off_h * stride_qh
DV += off_z * stride_qz + off_h * stride_qh
for start_n in range(0, num_block):
if MODE == 0:
lo = 0
else:
lo = start_n * BLOCK_M
# initialize row/col offsets
offs_qm = lo + tl.arange(0, BLOCK_M)
offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M)
offs_m = tl.arange(0, BLOCK_N)
offs_k = tl.arange(0, BLOCK_DMODEL)
# initialize pointers to value-like data
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
v_ptrs = V + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
do_ptrs = DO + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
dq_ptrs = DQ + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
# pointer to row-wise quantities in value-like data
D_ptrs = D + off_hz * N_CTX
m_ptrs = M + off_hz * N_CTX
# initialize dv amd dk
dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# k and v stay in SRAM throughout
k = tl.load(k_ptrs)
v = tl.load(v_ptrs)
# loop over rows
for start_m in range(lo, num_block * BLOCK_M, BLOCK_M):
offs_m_curr = start_m + offs_m
# load q, k, v, do on-chip
q = tl.load(q_ptrs)
# recompute p = softmax(qk, dim=-1).T
# NOTE: `do` is pre-divided by `l`; no normalization here
# if MODE == 1:
if MODE == 1:
qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), float(0.), float("-inf"))
else:
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, tl.trans(k))
qk *= qk_scale
m = tl.load(m_ptrs + offs_m_curr)
p = tl.math.exp2(qk - m[:, None])
# compute dv
do = tl.load(do_ptrs)
dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do)
# compute dp = dot(v, do)
Di = tl.load(D_ptrs + offs_m_curr)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
dp += tl.dot(do, tl.trans(v))
# compute ds = p * (dp - delta[:, None])
ds = p * dp * sm_scale
# compute dk = dot(ds.T, q)
dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q)
# compute dq
dq = tl.load(dq_ptrs)
dq += tl.dot(ds.to(Q.dtype.element_ty), k)
tl.store(dq_ptrs, dq)
# increment pointers
dq_ptrs += BLOCK_M * stride_qm
q_ptrs += BLOCK_M * stride_qm
do_ptrs += BLOCK_M * stride_qm
# write-back
dv_ptrs = DV + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
dk_ptrs = DK + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
tl.store(dv_ptrs, dv)
tl.store(dk_ptrs, dk)
empty = torch.empty(128, device="cuda")
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, causal, sm_scale):
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
assert Lq == Lk and Lk == Lv
assert Lk in {16, 32, 64, 128}
o = torch.empty_like(q)
BLOCK_M = 128
BLOCK_N = 64
grid = (triton.cdiv(q.shape[2], BLOCK_M), q.shape[0] * q.shape[1], 1)
L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
m = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
num_warps = 4 if Lk <= 64 else 8
if causal:
modes = [1] if q.shape[2] <= 2048 else [2, 3]
else:
modes = [0]
for mode in modes:
_fwd_kernel[grid](
q, k, v, sm_scale,
L, m,
o,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
o.stride(0), o.stride(1), o.stride(2), o.stride(3),
q.shape[0], q.shape[1], q.shape[2],
BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk,
MODE=mode,
num_warps=num_warps,
num_stages=4)
ctx.save_for_backward(q, k, v, o, L, m)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = Lk
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
BLOCK = 128
q, k, v, o, l, m = ctx.saved_tensors
do = do.contiguous()
dq = torch.zeros_like(q, dtype=torch.float32)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
do_scaled = torch.empty_like(do)
delta = torch.empty_like(l)
if ctx.causal:
mode = 1
else:
mode = 0
_bwd_preprocess[(ctx.grid[0] * ctx.grid[1], )](
o, do, l,
do_scaled, delta,
BLOCK_M=BLOCK, D_HEAD=ctx.BLOCK_DMODEL,
)
_bwd_kernel[(ctx.grid[1],)](
q, k, v, ctx.sm_scale,
o, do_scaled,
dq, dk, dv,
l, m,
delta,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
q.shape[0], q.shape[1], q.shape[2],
ctx.grid[0],
BLOCK_M=BLOCK, BLOCK_N=BLOCK,
BLOCK_DMODEL=ctx.BLOCK_DMODEL, num_warps=8,
MODE=mode,
num_stages=1,
)
return dq, dk, dv, None, None
attention = _attention.apply
@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(6, 9, 1024, 64)])
@pytest.mark.parametrize('causal', [False, True])
def test_op(Z, H, N_CTX, D_HEAD, causal, dtype=torch.float16):
torch.manual_seed(20)
q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
sm_scale = 0.5
dout = torch.randn_like(q)
# reference implementation
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# triton implementation
tri_out = attention(q, k, v, causal, sm_scale).half()
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=0)
assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=0)
assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=0)
try:
from flash_attn.flash_attn_interface import flash_attn_func
HAS_FLASH = True
except BaseException:
HAS_FLASH = False
BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64
# vary seq length for fixed head and batch=4
configs = [triton.testing.Benchmark(
x_names=['N_CTX'],
x_vals=[2**i for i in range(10, 15)],
line_arg='provider',
line_vals=['triton'] + (['flash'] if HAS_FLASH else []),
line_names=['Triton'] + (['Flash'] if HAS_FLASH else []),
styles=[('red', '-'), ('blue', '-')],
ylabel='ms',
plot_name=f'fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-{mode}',
args={'H': N_HEADS, 'BATCH': BATCH, 'D_HEAD': D_HEAD, 'dtype': torch.float16, 'mode': mode, 'causal': causal}
) for mode in ['fwd'] for causal in [False]]
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, D_HEAD, causal, mode, provider, dtype=torch.float16, device="cuda"):
assert mode in ['fwd', 'bwd']
warmup = 25
rep = 100
if provider == "triton":
q = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
sm_scale = 1.3
fn = lambda: attention(q, k, v, causal, sm_scale)
if mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
if provider == "flash":
lengths = torch.full((BATCH,), fill_value=N_CTX, device=device)
cu_seqlens = torch.zeros((BATCH + 1,), device=device, dtype=torch.int32)
cu_seqlens[1:] = lengths.cumsum(0)
qkv = torch.randn((BATCH * N_CTX, 3, H, D_HEAD), dtype=dtype, device=device, requires_grad=True)
fn = lambda: flash_attn_func(qkv, cu_seqlens, 0., N_CTX, causal=causal)
if mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
flops_per_matmul = 2. * BATCH * H * N_CTX * N_CTX * D_HEAD
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
if mode == 'bwd':
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
return total_flops / ms * 1e-9
# only works on post-Ampere GPUs right now
bench_flash_attention.run(save_path='.', print_data=True)
| 16,390 | 36.1678 | 131 |
py
|
triton
|
triton-main/python/tutorials/01-vector-add.py
|
"""
Vector Addition
===============
In this tutorial, you will write a simple vector addition using Triton.
In doing so, you will learn about:
* The basic programming model of Triton.
* The `triton.jit` decorator, which is used to define Triton kernels.
* The best practices for validating and benchmarking your custom ops against native reference implementations.
"""
# %%
# Compute Kernel
# --------------
import torch
import triton
import triton.language as tl
@triton.jit
def add_kernel(
x_ptr, # *Pointer* to first input vector.
y_ptr, # *Pointer* to second input vector.
output_ptr, # *Pointer* to output vector.
n_elements, # Size of the vector.
BLOCK_SIZE: tl.constexpr, # Number of elements each program should process.
# NOTE: `constexpr` so it can be used as a shape value.
):
# There are multiple 'programs' processing different data. We identify which program
# we are here:
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.
# This program will process inputs that are offset from the initial data.
# For instance, if you had a vector of length 256 and block_size of 64, the programs
# would each access the elements [0:64, 64:128, 128:192, 192:256].
# Note that offsets is a list of pointers:
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
# Create a mask to guard memory operations against out-of-bounds accesses.
mask = offsets < n_elements
# Load x and y from DRAM, masking out any extra elements in case the input is not a
# multiple of the block size.
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
# Write x + y back to DRAM.
tl.store(output_ptr + offsets, output, mask=mask)
# %%
# Let's also declare a helper function to (1) allocate the `z` tensor
# and (2) enqueue the above kernel with appropriate grid/block sizes:
def add(x: torch.Tensor, y: torch.Tensor):
# We need to preallocate the output.
output = torch.empty_like(x)
assert x.is_cuda and y.is_cuda and output.is_cuda
n_elements = output.numel()
# The SPMD launch grid denotes the number of kernel instances that run in parallel.
# It is analogous to CUDA launch grids. It can be either Tuple[int], or Callable(metaparameters) -> Tuple[int].
# In this case, we use a 1D grid where the size is the number of blocks:
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
# NOTE:
# - Each torch.tensor object is implicitly converted into a pointer to its first element.
# - `triton.jit`'ed functions can be indexed with a launch grid to obtain a callable GPU kernel.
# - Don't forget to pass meta-parameters as keywords arguments.
add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=1024)
# We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still
# running asynchronously at this point.
return output
# %%
# We can now use the above function to compute the element-wise sum of two `torch.tensor` objects and test its correctness:
torch.manual_seed(0)
size = 98432
x = torch.rand(size, device='cuda')
y = torch.rand(size, device='cuda')
output_torch = x + y
output_triton = add(x, y)
print(output_torch)
print(output_triton)
print(
f'The maximum difference between torch and triton is '
f'{torch.max(torch.abs(output_torch - output_triton))}'
)
# %%
# Seems like we're good to go!
# %%
# Benchmark
# ---------
#
# We can now benchmark our custom op on vectors of increasing sizes to get a sense of how it does relative to PyTorch.
# To make things easier, Triton has a set of built-in utilities that allow us to concisely plot the performance of our custom ops.
# for different problem sizes.
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['size'], # Argument names to use as an x-axis for the plot.
x_vals=[
2 ** i for i in range(12, 28, 1)
], # Different possible values for `x_name`.
x_log=True, # x axis is logarithmic.
line_arg='provider', # Argument name whose value corresponds to a different line in the plot.
line_vals=['triton', 'torch'], # Possible values for `line_arg`.
line_names=['Triton', 'Torch'], # Label name for the lines.
styles=[('blue', '-'), ('green', '-')], # Line styles.
ylabel='GB/s', # Label name for the y-axis.
plot_name='vector-add-performance', # Name for the plot. Used also as a file name for saving the plot.
args={}, # Values for function arguments not in `x_names` and `y_name`.
)
)
def benchmark(size, provider):
x = torch.rand(size, device='cuda', dtype=torch.float32)
y = torch.rand(size, device='cuda', dtype=torch.float32)
quantiles = [0.5, 0.2, 0.8]
if provider == 'torch':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: x + y, quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: add(x, y), quantiles=quantiles)
gbps = lambda ms: 12 * size / ms * 1e-6
return gbps(ms), gbps(max_ms), gbps(min_ms)
# %%
# We can now run the decorated function above. Pass `print_data=True` to see the performance number, `show_plots=True` to plot them, and/or
# `save_path='/path/to/results/' to save them to disk along with raw CSV data:
benchmark.run(print_data=True, show_plots=True)
| 5,496 | 38.264286 | 139 |
py
|
triton
|
triton-main/python/tutorials/08-experimental-block-pointer.py
|
"""
Block Pointer (Experimental)
============================
This tutorial will guide you through writing a matrix multiplication algorithm that utilizes block pointer semantics.
These semantics are more friendly for Triton to optimize and can result in better performance on specific hardware.
Note that this feature is still experimental and may change in the future.
"""
# %%
# Motivations
# -----------
# In the previous matrix multiplication tutorial, we constructed blocks of values by de-referencing blocks of pointers,
# i.e., :code:`load(block<pointer_type<element_type>>) -> block<element_type>`, which involved loading blocks of
# elements from memory. This approach allowed for flexibility in using hardware-managed cache and implementing complex
# data structures, such as tensors of trees or unstructured look-up tables.
#
# However, the drawback of this approach is that it relies heavily on complex optimization passes by the compiler to
# optimize memory access patterns. This can result in brittle code that may suffer from performance degradation when the
# optimizer fails to perform adequately. Additionally, as memory controllers specialize to accommodate dense spatial
# data structures commonly used in machine learning workloads, this problem is likely to worsen.
#
# To address this issue, we will use block pointers :code:`pointer_type<block<element_type>>` and load them into
# :code:`block<element_type>`, in which way gives better friendliness for the compiler to optimize memory access
# patterns.
#
# Let's start with the previous matrix multiplication example and demonstrate how to rewrite it to utilize block pointer
# semantics.
# %%
# Make a Block Pointer
# --------------------
# A block pointer pointers to a block in a parent tensor and is constructed by :code:`make_block_ptr` function,
# which takes the following information as arguments:
#
# * :code:`base`: the base pointer to the parent tensor;
#
# * :code:`shape`: the shape of the parent tensor;
#
# * :code:`strides`: the strides of the parent tensor, which means how much to increase the pointer by when moving by 1 element in a specific axis;
#
# * :code:`offsets`: the offsets of the block;
#
# * :code:`block_shape`: the shape of the block;
#
# * :code:`order`: the order of the block, which means how the block is laid out in memory.
#
# For example, to a block pointer to a :code:`BLOCK_SIZE_M * BLOCK_SIZE_K` block in a row-major 2D matrix A by
# offsets :code:`(pid_m * BLOCK_SIZE_M, 0)` and strides :code:`(stride_am, stride_ak)`, we can use the following code
# (exactly the same as the previous matrix multiplication tutorial):
#
# .. code-block:: python
#
# a_block_ptr = tl.make_block_ptr(base=a_ptr, shape=(M, K), strides=(stride_am, stride_ak),
# offsets=(pid_m * BLOCK_SIZE_M, 0), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_K),
# order=(1, 0))
#
# Note that the :code:`order` argument is set to :code:`(1, 0)`, which means the second axis is the inner dimension in
# terms of storage, and the first axis is the outer dimension. This information may sound redundant, but it is necessary
# for some hardware backends to optimize for better performance.
# %%
# Load/Store a Block Pointer
# --------------------------
# To load/store a block pointer, we can use :code:`load/store` function, which takes a block pointer as an argument,
# de-references it, and loads/stores a block. You may mask some values in the block, here we have an extra argument
# :code:`boundary_check` to specify whether to check the boundary of each axis for the block pointer. With check on,
# out-of-bound values will be masked according to the :code:`padding_option` argument (load only), which can be
# :code:`zero` or :code:`nan`. Temporarily, we do not support other values due to some hardware limitations. In this
# mode of block pointer load/store does not support :code:`mask` or :code:`other` arguments in the legacy mode.
#
# So to load the block pointer of A in the previous section, we can simply write
# :code:`a = tl.load(a_block_ptr, boundary_check=(0, 1))`. Boundary check may cost extra performance, so if you can
# guarantee that the block pointer is always in-bound in some axis, you can turn off the check by not passing the index
# into the :code:`boundary_check` argument. For example, if we know that :code:`M` is a multiple of
# :code:`BLOCK_SIZE_M`, we can replace with :code:`a = tl.load(a_block_ptr, boundary_check=(1, ))`, since axis 0 is
# always in bound.
# %%
# Advance a Block Pointer
# -----------------------
# To advance a block pointer, we can use :code:`advance` function, which takes a block pointer and the increment for
# each axis as arguments and returns a new block pointer with the same shape and strides as the original one,
# but with the offsets advanced by the specified amount.
#
# For example, to advance the block pointer by :code:`BLOCK_SIZE_K` in the second axis
# (no need to multiply with strides), we can write :code:`a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_SIZE_K))`.
# %%
# Final Result
# ------------
import torch
import triton
import triton.language as tl
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
],
key=['M', 'N', 'K'],
)
@triton.jit
def matmul_kernel_with_block_pointers(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak,
stride_bk, stride_bn,
stride_cm, stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See the matrix multiplication tutorial for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create block pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction and accumulate.
# See above `Make a Block Pointer` section for details.
a_block_ptr = tl.make_block_ptr(base=a_ptr, shape=(M, K), strides=(stride_am, stride_ak),
offsets=(pid_m * BLOCK_SIZE_M, 0), block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_K),
order=(1, 0))
b_block_ptr = tl.make_block_ptr(base=b_ptr, shape=(K, N), strides=(stride_bk, stride_bn),
offsets=(0, pid_n * BLOCK_SIZE_N), block_shape=(BLOCK_SIZE_K, BLOCK_SIZE_N),
order=(1, 0))
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block.
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, K, BLOCK_SIZE_K):
# Load with boundary checks, no need to calculate the mask manually.
# For better performance, you may remove some axis from the boundary
# check, if you can guarantee that the access is always in-bound in
# that axis.
# See above `Load/Store a Block Pointer` section for details.
a = tl.load(a_block_ptr, boundary_check=(0, 1))
b = tl.load(b_block_ptr, boundary_check=(0, 1))
# We accumulate along the K dimension.
accumulator += tl.dot(a, b)
# Advance the block pointer to the next K block.
# See above `Advance a Block Pointer` section for details.
a_block_ptr = tl.advance(a_block_ptr, (0, BLOCK_SIZE_K))
b_block_ptr = tl.advance(b_block_ptr, (BLOCK_SIZE_K, 0))
c = accumulator.to(tl.float16)
# ----------------------------------------------------------------
# Write back the block of the output matrix C with boundary checks.
# See above `Load/Store a Block Pointer` section for details.
c_block_ptr = tl.make_block_ptr(base=c_ptr, shape=(M, N), strides=(stride_cm, stride_cn),
offsets=(pid_m * BLOCK_SIZE_M, pid_n * BLOCK_SIZE_N),
block_shape=(BLOCK_SIZE_M, BLOCK_SIZE_N), order=(1, 0))
tl.store(c_block_ptr, c, boundary_check=(0, 1))
# We can now create a convenience wrapper function that only takes two input tensors,
# and (1) checks any shape constraint; (2) allocates the output; (3) launches the above kernel.
def matmul(a, b):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
assert a.is_contiguous(), "Matrix A must be contiguous"
assert b.is_contiguous(), "Matrix B must be contiguous"
M, K = a.shape
K, N = b.shape
# Allocates output.
c = torch.empty((M, N), device=a.device, dtype=a.dtype)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (
triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),
)
matmul_kernel_with_block_pointers[grid](
a, b, c,
M, N, K,
a.stride(0), a.stride(1),
b.stride(0), b.stride(1),
c.stride(0), c.stride(1),
)
return c
# %%
# Unit Test
# ---------
#
# Still we can test our matrix multiplication with block pointers against a native torch implementation (i.e., cuBLAS).
torch.manual_seed(0)
a = torch.randn((512, 512), device='cuda', dtype=torch.float16)
b = torch.randn((512, 512), device='cuda', dtype=torch.float16)
triton_output = matmul(a, b)
torch_output = torch.matmul(a, b)
print(f"triton_output={triton_output}")
print(f"torch_output={torch_output}")
if torch.allclose(triton_output, torch_output, atol=1e-2, rtol=0):
print("✅ Triton and Torch match")
else:
print("❌ Triton and Torch differ")
| 11,784 | 50.462882 | 147 |
py
|
triton
|
triton-main/python/tutorials/07-math-functions.py
|
"""
Libdevice (`tl.math`) function
==============================
Triton can invoke a custom function from an external library.
In this example, we will use the `libdevice` library (a.k.a `math` in triton) to apply `asin` on a tensor.
Please refer to https://docs.nvidia.com/cuda/libdevice-users-guide/index.html regarding the semantics of all available libdevice functions.
In `triton/language/math.py`, we try to aggregate functions with the same computation but different data types together.
For example, both `__nv_asin` and `__nvasinf` calculate the principal value of the arc sine of the input, but `__nv_asin` operates on `double` and `__nv_asinf` operates on `float`.
Using triton, you can simply call `tl.math.asin`.
Triton automatically selects the correct underlying device function to invoke based on input and output types.
"""
# %%
# asin Kernel
# ------------
import torch
import triton
import triton.language as tl
@triton.jit
def asin_kernel(
x_ptr,
y_ptr,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
x = tl.math.asin(x)
tl.store(y_ptr + offsets, x, mask=mask)
# %%
# Using the default libdevice library path
# -----------------------------------------
# We can use the default libdevice library path encoded in `triton/language/math.py`
torch.manual_seed(0)
size = 98432
x = torch.rand(size, device='cuda')
output_triton = torch.zeros(size, device='cuda')
output_torch = torch.asin(x)
assert x.is_cuda and output_triton.is_cuda
n_elements = output_torch.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
asin_kernel[grid](x, output_triton, n_elements, BLOCK_SIZE=1024)
print(output_torch)
print(output_triton)
print(
f'The maximum difference between torch and triton is '
f'{torch.max(torch.abs(output_torch - output_triton))}'
)
# %%
# Customize the libdevice library path
# -------------------------------------
# We can also customize the libdevice library path by passing the path to the `libdevice` library to the `asin` kernel.
output_triton = torch.empty_like(x)
asin_kernel[grid](x, output_triton, n_elements, BLOCK_SIZE=1024,
extern_libs={'libdevice': '/usr/local/cuda/nvvm/libdevice/libdevice.10.bc'})
print(output_torch)
print(output_triton)
print(
f'The maximum difference between torch and triton is '
f'{torch.max(torch.abs(output_torch - output_triton))}'
)
| 2,597 | 34.108108 | 180 |
py
|
triton
|
triton-main/python/tutorials/04-low-memory-dropout.py
|
"""
Low-Memory Dropout
==================
In this tutorial, you will write a memory-efficient implementation of dropout whose state
will be composed of a single int32 seed. This differs from more traditional implementations of dropout,
whose state is generally composed of a bit mask tensor of the same shape as the input.
In doing so, you will learn about:
* The limitations of naive implementations of Dropout with PyTorch.
* Parallel pseudo-random number generation in Triton.
"""
# %%
# Baseline
# --------
#
# The *dropout* operator was first introduced in [SRIVASTAVA2014]_ as a way to improve the performance
# of deep neural networks in low-data regime (i.e. regularization).
#
# It takes a vector as input and produces a vector of the same shape as output. Each scalar in the
# output has a probability :math:`p` of being changed to zero and otherwise it is copied from the input.
# This forces the network to perform well even when only :math:`1 - p` scalars from the input are available.
#
# At evaluation time we want to use the full power of the network so we set :math:`p=0`. Naively this would
# increase the norm of the output (which can be a bad thing, e.g. it can lead to artificial decrease
# in the output softmax temperature). To prevent this we multiply the output by :math:`\frac{1}{1 - p}`, which
# keeps the norm consistent regardless of the dropout probability.
#
# Let's first take a look at the baseline implementation.
import tabulate
import torch
import triton
import triton.language as tl
@triton.jit
def _dropout(
x_ptr, # pointer to the input
x_keep_ptr, # pointer to a mask of 0s and 1s
output_ptr, # pointer to the output
n_elements, # number of elements in the `x` tensor
p, # probability that an element of `x` is changed to zero
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# Load data
x = tl.load(x_ptr + offsets, mask=mask)
x_keep = tl.load(x_keep_ptr + offsets, mask=mask)
# The line below is the crucial part, described in the paragraph above!
output = tl.where(x_keep, x / (1 - p), 0.0)
# Write-back output
tl.store(output_ptr + offsets, output, mask=mask)
def dropout(x, x_keep, p):
output = torch.empty_like(x)
assert x.is_contiguous()
n_elements = x.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
_dropout[grid](x, x_keep, output, n_elements, p, BLOCK_SIZE=1024)
return output
# Input tensor
x = torch.randn(size=(10,)).cuda()
# Dropout mask
p = 0.5
x_keep = (torch.rand(size=(10,)) > p).to(torch.int32).cuda()
#
output = dropout(x, x_keep=x_keep, p=p)
print(tabulate.tabulate([
["input"] + x.tolist(),
["keep mask"] + x_keep.tolist(),
["output"] + output.tolist()
]))
# %%
# Seeded dropout
# --------------
#
# The above implementation of dropout works fine, but it can be a bit awkward to deal with. Firstly
# we need to store the dropout mask for backpropagation. Secondly, dropout state management can get
# very tricky when using recompute/checkpointing (e.g. see all the notes about `preserve_rng_state` in
# https://pytorch.org/docs/1.9.0/checkpoint.html). In this tutorial we'll describe an alternative implementation
# that (1) has a smaller memory footprint; (2) requires less data movement; and (3) simplifies the management
# of persisting randomness across multiple invocations of the kernel.
#
# Pseudo-random number generation in Triton is simple! In this tutorial we will use the
# :code:`triton.language.rand` function which generates a block of uniformly distributed :code:`float32`
# values in [0, 1), given a seed and a block of :code:`int32` offsets. But if you need it, Triton also provides
# other :ref:`random number generation strategies <Random Number Generation>`.
#
# .. note::
# Triton's implementation of PRNG is based on the Philox algorithm (described on [SALMON2011]_).
#
# Let's put it all together.
@triton.jit
def _seeded_dropout(
x_ptr,
output_ptr,
n_elements,
p,
seed,
BLOCK_SIZE: tl.constexpr,
):
# compute memory offsets of elements handled by this instance
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
# load data from x
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
# randomly prune it
random = tl.rand(seed, offsets)
x_keep = random > p
# write-back
output = tl.where(x_keep, x / (1 - p), 0.0)
tl.store(output_ptr + offsets, output, mask=mask)
def seeded_dropout(x, p, seed):
output = torch.empty_like(x)
assert x.is_contiguous()
n_elements = x.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
_seeded_dropout[grid](x, output, n_elements, p, seed, BLOCK_SIZE=1024)
return output
x = torch.randn(size=(10,)).cuda()
# Compare this to the baseline - dropout mask is never instantiated!
output = seeded_dropout(x, p=0.5, seed=123)
output2 = seeded_dropout(x, p=0.5, seed=123)
output3 = seeded_dropout(x, p=0.5, seed=512)
print(tabulate.tabulate([
["input"] + x.tolist(),
["output (seed = 123)"] + output.tolist(),
["output (seed = 123)"] + output2.tolist(),
["output (seed = 512)"] + output3.tolist()
]))
# %%
# Et Voilà! We have a triton kernel that applies the same dropout mask provided the seed is the same!
# If you'd like explore further applications of pseudorandomness in GPU programming, we encourage you
# to explore the `triton/language/random` folder!
# %%
# Exercises
# ---------
#
# 1. Extend the kernel to operate over a matrix and use a vector of seeds - one per row.
# 2. Add support for striding.
# 3. (challenge) Implement a kernel for sparse Johnson-Lindenstrauss transform which generates the projection matrix one the fly each time using a seed.
# %%
# References
# ----------
#
# .. [SALMON2011] John K. Salmon, Mark A. Moraes, Ron O. Dror, and David E. Shaw, "Parallel Random Numbers: As Easy as 1, 2, 3", 2011
# .. [SRIVASTAVA2014] Nitish Srivastava and Geoffrey Hinton and Alex Krizhevsky and Ilya Sutskever and Ruslan Salakhutdinov, "Dropout: A Simple Way to Prevent Neural Networks from Overfitting", JMLR 2014
| 6,337 | 35.425287 | 203 |
py
|
triton
|
triton-main/python/tutorials/03-matrix-multiplication.py
|
"""
Matrix Multiplication
=====================
In this tutorial, you will write a very short high-performance FP16 matrix multiplication kernel that achieves
performance on parallel with cuBLAS.
You will specifically learn about:
* Block-level matrix multiplications.
* Multi-dimensional pointer arithmetics.
* Program re-ordering for improved L2 cache hit rate.
* Automatic performance tuning.
"""
# %%
# Motivations
# -----------
#
# Matrix multiplications are a key building block of most modern high-performance computing systems.
# They are notoriously hard to optimize, hence their implementation is generally done by
# hardware vendors themselves as part of so-called "kernel libraries" (e.g., cuBLAS).
# Unfortunately, these libraries are often proprietary and cannot be easily customized
# to accommodate the needs of modern deep learning workloads (e.g., fused activation functions).
# In this tutorial, you will learn how to implement efficient matrix multiplications by
# yourself with Triton, in a way that is easy to customize and extend.
#
# Roughly speaking, the kernel that we will write will implement the following blocked
# algorithm to multiply a (M, K) by a (K, N) matrix:
#
# .. code-block:: python
#
# # Do in parallel
# for m in range(0, M, BLOCK_SIZE_M):
# # Do in parallel
# for n in range(0, N, BLOCK_SIZE_N):
# acc = zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=float32)
# for k in range(0, K, BLOCK_SIZE_K):
# a = A[m : m+BLOCK_SIZE_M, k : k+BLOCK_SIZE_K]
# b = B[k : k+BLOCK_SIZE_K, n : n+BLOCK_SIZE_N]
# acc += dot(a, b)
# C[m : m+BLOCK_SIZE_M, n : n+BLOCK_SIZE_N] = acc
#
# where each iteration of the doubly-nested for-loop is performed by a dedicated Triton program instance.
# %%
# Compute Kernel
# --------------
#
# The above algorithm is, actually, fairly straightforward to implement in Triton.
# The main difficulty comes from the computation of the memory locations at which blocks
# of :code:`A` and :code:`B` must be read in the inner loop. For that, we need
# multi-dimensional pointer arithmetics.
#
# Pointer Arithmetics
# ~~~~~~~~~~~~~~~~~~~
#
# For a row-major 2D tensor :code:`X`, the memory location of :code:`X[i, j]` is given b
# y :code:`&X[i, j] = X + i*stride_xi + j*stride_xj`.
# Therefore, blocks of pointers for :code:`A[m : m+BLOCK_SIZE_M, k:k+BLOCK_SIZE_K]` and
# :code:`B[k : k+BLOCK_SIZE_K, n : n+BLOCK_SIZE_N]` can be defined in pseudo-code as:
#
# .. code-block:: python
#
# &A[m : m+BLOCK_SIZE_M, k:k+BLOCK_SIZE_K] = a_ptr + (m : m+BLOCK_SIZE_M)[:, None]*A.stride(0) + (k : k+BLOCK_SIZE_K)[None, :]*A.stride(1);
# &B[k : k+BLOCK_SIZE_K, n:n+BLOCK_SIZE_N] = b_ptr + (k : k+BLOCK_SIZE_K)[:, None]*B.stride(0) + (n : n+BLOCK_SIZE_N)[None, :]*B.stride(1);
#
# Which means that pointers for blocks of A and B can be initialized (i.e., :code:`k=0`) in Triton as the following
# code. Also note that we need an extra modulo to handle the case where :code:`M` is not a multiple of
# :code:`BLOCK_SIZE_M` or :code:`N` is not a multiple of :code:`BLOCK_SIZE_N`, in which case we can pad the data with
# some useless values, which will not contribute to the results. For the :code:`K` dimension, we will handle that later
# using masking load semantics.
#
# .. code-block:: python
#
# offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
# offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
# offs_k = tl.arange(0, BLOCK_SIZE_K)
# a_ptrs = a_ptr + (offs_am[:, None]*stride_am + offs_k [None, :]*stride_ak)
# b_ptrs = b_ptr + (offs_k [:, None]*stride_bk + offs_bn[None, :]*stride_bn)
#
# And then updated in the inner loop as follows:
#
# .. code-block:: python
#
# a_ptrs += BLOCK_SIZE_K * stride_ak;
# b_ptrs += BLOCK_SIZE_K * stride_bk;
#
#
# L2 Cache Optimizations
# ~~~~~~~~~~~~~~~~~~~~~~
#
# As mentioned above, each program instance computes a :code:`[BLOCK_SIZE_M, BLOCK_SIZE_N]`
# block of :code:`C`.
# It is important to remember that the order in which these blocks are computed does
# matter, since it affects the L2 cache hit rate of our program. and unfortunately, a
# a simple row-major ordering
#
# .. code-block:: Python
#
# pid = triton.program_id(0);
# grid_m = (M + BLOCK_SIZE_M - 1) // BLOCK_SIZE_M;
# grid_n = (N + BLOCK_SIZE_N - 1) // BLOCK_SIZE_N;
# pid_m = pid / grid_n;
# pid_n = pid % grid_n;
#
# is just not going to cut it.
#
# One possible solution is to launch blocks in an order that promotes data reuse.
# This can be done by 'super-grouping' blocks in groups of :code:`GROUP_M` rows before
# switching to the next column:
#
# .. code-block:: python
#
# # Program ID
# pid = tl.program_id(axis=0)
# # Number of program ids along the M axis
# num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
# # Number of programs ids along the N axis
# num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
# # Number of programs in group
# num_pid_in_group = GROUP_SIZE_M * num_pid_n
# # Id of the group this program is in
# group_id = pid // num_pid_in_group
# # Row-id of the first program in the group
# first_pid_m = group_id * GROUP_SIZE_M
# # If `num_pid_m` isn't divisible by `GROUP_SIZE_M`, the last group is smaller
# group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
# # *Within groups*, programs are ordered in a column-major order
# # Row-id of the program in the *launch grid*
# pid_m = first_pid_m + (pid % group_size_m)
# # Col-id of the program in the *launch grid*
# pid_n = (pid % num_pid_in_group) // group_size_m
#
# For example, in the following matmul where each matrix is 9 blocks by 9 blocks,
# we can see that if we compute the output in row-major ordering, we need to load 90
# blocks into SRAM to compute the first 9 output blocks, but if we do it in grouped
# ordering, we only need to load 54 blocks.
#
# .. image:: grouped_vs_row_major_ordering.png
#
# In practice, this can improve the performance of our matrix multiplication kernel by
# more than 10\% on some hardware architecture (e.g., 220 to 245 TFLOPS on A100).
#
# %%
# Final Result
# ------------
import torch
import triton
import triton.language as tl
# `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes:
# - A list of `triton.Config` objects that define different configurations of
# meta-parameters (e.g., `BLOCK_SIZE_M`) and compilation options (e.g., `num_warps`) to try
# - An auto-tuning *key* whose change in values will trigger evaluation of all the
# provided configs
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
],
key=['M', 'N', 'K'],
)
@triton.jit
def matmul_kernel(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak,
stride_bk, stride_bn,
stride_cm, stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
ACTIVATION: tl.constexpr,
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetics` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
accumulator += tl.dot(a, b)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
# You can fuse arbitrary activation functions here
# while the accumulator is still in FP32!
if ACTIVATION == "leaky_relu":
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `_matmul`.
@triton.jit
def leaky_relu(x):
x = x + 1
return tl.where(x >= 0, x, 0.01 * x)
# %%
# We can now create a convenience wrapper function that only takes two input tensors,
# and (1) checks any shape constraint; (2) allocates the output; (3) launches the above kernel.
def matmul(a, b, activation=""):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
assert a.is_contiguous(), "Matrix A must be contiguous"
assert b.is_contiguous(), "Matrix B must be contiguous"
M, K = a.shape
K, N = b.shape
# Allocates output.
c = torch.empty((M, N), device=a.device, dtype=a.dtype)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (
triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),
)
matmul_kernel[grid](
a, b, c,
M, N, K,
a.stride(0), a.stride(1),
b.stride(0), b.stride(1),
c.stride(0), c.stride(1),
ACTIVATION=activation
)
return c
# %%
# Unit Test
# ---------
#
# We can test our custom matrix multiplication operation against a native torch implementation (i.e., cuBLAS).
torch.manual_seed(0)
a = torch.randn((512, 512), device='cuda', dtype=torch.float16)
b = torch.randn((512, 512), device='cuda', dtype=torch.float16)
triton_output = matmul(a, b)
torch_output = torch.matmul(a, b)
print(f"triton_output={triton_output}")
print(f"torch_output={torch_output}")
if torch.allclose(triton_output, torch_output, atol=1e-2, rtol=0):
print("✅ Triton and Torch match")
else:
print("❌ Triton and Torch differ")
# %%
# Benchmark
# ---------
#
# Square Matrix Performance
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We can now compare the performance of our kernel against that of cuBLAS. Here we focus on square matrices,
# but feel free to arrange this script as you wish to benchmark any other matrix shape.
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['M', 'N', 'K'], # Argument names to use as an x-axis for the plot
x_vals=[
128 * i for i in range(2, 33)
], # Different possible values for `x_name`
line_arg='provider', # Argument name whose value corresponds to a different line in the plot
# Possible values for `line_arg`
line_vals=['cublas', 'triton'],
# Label name for the lines
line_names=["cuBLAS", "Triton"],
# Line styles
styles=[('green', '-'), ('blue', '-')],
ylabel="TFLOPS", # Label name for the y-axis
plot_name="matmul-performance", # Name for the plot, used also as a file name for saving the plot.
args={},
)
)
def benchmark(M, N, K, provider):
a = torch.randn((M, K), device='cuda', dtype=torch.float16)
b = torch.randn((K, N), device='cuda', dtype=torch.float16)
quantiles = [0.5, 0.2, 0.8]
if provider == 'cublas':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b), quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: matmul(a, b), quantiles=quantiles)
perf = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3)
return perf(ms), perf(max_ms), perf(min_ms)
benchmark.run(show_plots=True, print_data=True)
| 14,818 | 41.219373 | 143 |
py
|
triton
|
triton-main/python/examples/copy_strided.py
|
import triton
import triton.language as tl
# triton kernel
@triton.jit
def kernel(X, stride_xm,
Z, stride_zn,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
off_m = tl.arange(0, BLOCK_M)
off_n = tl.arange(0, BLOCK_N)
Xs = X + off_m[:, None] * stride_xm + off_n[None, :] * 1
Zs = Z + off_m[:, None] * 1 + off_n[None, :] * stride_zn
tl.store(Zs, tl.load(Xs))
ret = triton.compile(kernel, signature="*fp32,i32,*fp32,i32", constants={"BLOCK_M": 64, "BLOCK_N": 64})
print(ret.asm["ttgir"])
| 531 | 27 | 103 |
py
|
triton
|
triton-main/python/examples/empty.py
|
import torch
import triton
import triton.language as tl
@triton.jit
def kernel(X, stride_xm, stride_xn, BLOCK: tl.constexpr):
pass
X = torch.randn(1, device="cuda")
pgm = kernel[(1,)](X, 1, 1, BLOCK=1024)
| 214 | 14.357143 | 57 |
py
|
triton
|
triton-main/python/test/tools/compare_files.py
|
import argparse
import difflib
import glob
import os
import sys
from typing import Dict, List, Optional, Tuple
import yaml
class ComparisonResult:
def __init__(self, name: str, extension: str, numComparisons: int, diffs: List[str] = None, errors: List[str] = None):
self.name = name
self.extension = extension
self.numComparisons = numComparisons
self.diffs = [] if diffs is None else diffs
self.errors = [] if errors is None else errors
def isSuccess(self) -> bool:
return len(self.diffs) == 0 and len(self.errors) == 0
def __str__(self) -> str:
return f"name={self.name}, extension={self.extension}, numComparisons={self.numComparisons}, success={self.isSuccess()}"
def listFilesWithExtension(path: str, extension: str) -> List[str]:
"""
Returns a list of files in the given path with the given extension
The files are returned with their full path
"""
files = glob.glob(os.path.join(path, f'*.{extension}'))
return files
def getFileWithExtension(path: str, ext: str) -> Optional[str]:
"""
Returns a single file in the given path with the given extension
"""
# get all files in directory with extension
files = listFilesWithExtension(path, ext)
if len(files) == 0:
return None
# filter out files with grp in their name
files = [f for f in files if "__grp__" not in f]
if len(files) != 1:
print(f"Found {len(files)} files in {path} with extension {ext}!")
sys.exit(2)
return files[0]
def loadYamlFile(filePath: str) -> List[Dict[str, str]]:
"""
Loads a yaml file and returns its content as a list of dictionaries
"""
with open(filePath, 'r') as file:
content = yaml.safe_load(file)
return content
def compareFiles(file1: str, file2: str) -> bool:
"""
Compares two files and returns True if they are the same, False otherwise
"""
with open(file1, 'rb') as f1, open(file2, 'rb') as f2:
content1 = f1.read()
content2 = f2.read()
return content1 == content2
def diffFiles(file1, file2):
with open(file1, 'r') as f1:
file1_lines = f1.readlines()
with open(file2, 'r') as f2:
file2_lines = f2.readlines()
diff = list(difflib.unified_diff(file1_lines, file2_lines, file1, file2))
return diff
def getFileVec(path: str) -> List[Tuple[str, str]]:
"""
Returns a list of tuples (extension, file) for the given path (note: the path includes the hash)
The returned list must have extensions (json, ttir, ttgir)
in this particular order, unless a file with a certain extension does not exist
"""
vec = []
for ext in ["json", "ttir", "ttgir"]:
file = getFileWithExtension(path, ext)
if file is not None:
vec.append((ext, file))
return vec
def getNameToHashesDict(path: str) -> Dict[str, List[str]]:
"""
Returns a dictionary that maps kernel names to a list of hashes that have the same kernel name
in the given path
Note: the hashes must have a json file and either a ttir or ttgir file, otherwise they are ignored
"""
nameToHashes = {}
for hash in os.listdir(path):
fullPath = os.path.join(path, hash)
if not os.path.isdir(fullPath):
print(f"Path {fullPath} is not a directory!")
sys.exit(2)
fileVec = getFileVec(fullPath)
if len(fileVec) < 2 or fileVec[0][0] != "json":
continue
jsonFile = fileVec[0][1]
# load json file
with open(jsonFile, 'r') as file:
content = yaml.safe_load(file)
# get name
name = content["name"]
nameToHashes.setdefault(name, []).append(hash)
return nameToHashes
def doFilesMatch(path1: str, path2: str) -> bool:
"""
Returns True if the files in the given paths match, False otherwise
The files are considered to match if:
1. The number of files in both paths match
2. The json files match
3. Both paths have a ttir that match, if a ttir does not exist, the ttgir file must exist and match
"""
filesVec1 = getFileVec(path1)
filesVec2 = getFileVec(path2)
# The number of files must match
if len(filesVec1) != len(filesVec2):
return False
for (ext1, file1), (ext2, file2) in zip(filesVec1, filesVec2):
if ext1 != ext2:
return False
if not compareFiles(file1, file2):
return False
else:
# once we actually compared a ttir or ttgir file, we can break
if ext1 in ("ttir", "ttgir"):
break
return True
def compareMatchingFiles(name: str, extension: str, nameToHashes1: Dict[str, List[str]], nameToHashes2: Dict[str, List[str]], args) -> ComparisonResult:
"""
Compare files with the given name/extension in all hashes in both paths
Return the first mismatching files as a tuple (file1, file2), otherwise, return an empty tuple
"""
hashes1 = nameToHashes1.get(name, [])
hashes2 = nameToHashes2.get(name, [])
diffs = []
errors = []
numComparisons = 0
for hash1 in hashes1:
path1 = os.path.join(args.path1, hash1)
for hash2 in hashes2:
path2 = os.path.join(args.path2, hash2)
# check whether both paths have:
# 1. json files that match
# 2. ttir files that match (if they exist), otherwise ttgir files that match (if they exist)
# if any of these contraints is not met, then we can skip this pair of hashes since they are not a match
if not doFilesMatch(path1, path2):
continue
numComparisons += 1
extFile1 = listFilesWithExtension(path1, extension)[0]
extFile2 = listFilesWithExtension(path2, extension)[0]
diff = diffFiles(extFile1, extFile2)
if len(diff) > 0:
diffs.append(diffFiles(extFile2, extFile1))
if numComparisons == 0:
errors.append(f"Did not find any matching files for {name}")
return ComparisonResult(name=name, extension=extension, numComparisons=numComparisons, diffs=diffs, errors=errors)
def dumpResults(results: List[ComparisonResult], fileName: str):
"""
Dumps the results to the given file
"""
with open(fileName, 'w') as file:
for result in results:
file.write(str(result) + "\n")
file.write("Diffs:\n")
for diff in result.diffs:
for line in diff:
file.write(line)
file.write("Errors:\n")
for error in result.errors:
file.write(error)
file.write("\n\n")
def main(args) -> bool:
"""
Iterates over all kernels in the given yaml file and compares them
in the given paths
"""
if args.path1 == args.path2:
print("Cannot compare files in the same directory!")
sys.exit(2)
# Get kernel name to hashes dict, these hashes would have the same kernel name
nameToHashes1 = getNameToHashesDict(args.path1)
nameToHashes2 = getNameToHashesDict(args.path2)
yamlFilePath = args.kernels
if not os.path.exists(yamlFilePath):
print(f"Path {yamlFilePath} does not exist!")
sys.exit(2)
nameAndExtension = loadYamlFile(yamlFilePath)["name_and_extension"]
results = []
# iterate over the kernels that need to be checked
for d in nameAndExtension:
name = d["name"] # kernel name
extension = d["extension"] # extension of the file to be compared (e.g. ptx)
# Compare all hashes on path 1 with all hashes on path 2
# result is either the mismatching (file1, file2) with "extension" or empty tuple if no mismatch
result = compareMatchingFiles(name, extension, nameToHashes1, nameToHashes2, args)
print(result)
# Otherwise, add it to the mismatches
results.append(result)
# Dump results
dumpResults(results, "kernels_reference_check.txt")
success = all(result.isSuccess() for result in results)
if not success:
print("Failed!")
sys.exit(1)
print("Passed!")
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--path1",
type=str,
default=None,
required=True,
help=("Path to first cache directory"),
)
parser.add_argument(
"--path2",
type=str,
default=None,
required=True,
help=("Path to second cache directory"),
)
parser.add_argument(
"--kernels",
type=str,
default=None,
required=True,
help=("Path to kernels yaml file"),
)
args = parser.parse_args()
main(args)
| 8,889 | 32.931298 | 152 |
py
|
triton
|
triton-main/python/test/unit/tools/test_aot.py
|
import glob
import os
import subprocess
import sys
import tempfile
import numpy as np
import triton
from triton.common import cuda_include_dir, libcuda_dirs
kernel_utils_src = """
import triton
@triton.jit
def mul(x, y):
return x * y
"""
kernel_src = """
import triton
import triton.language as tl
import kernel_utils
@triton.jit
def kernel(C, A, B,
stride_cm, stride_cn,
stride_am, stride_ak,
stride_bk, stride_bn,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr):
ms = tl.arange(0, BLOCK_M)
ns = tl.arange(0, BLOCK_N)
ks = tl.arange(0, BLOCK_K)
a = tl.load(A + ms[:, None] * stride_am + ks[None, :] * stride_ak)
b = tl.load(B + ks[:, None] * stride_bk + ns[None, :] * stride_bn)
c = tl.dot(a, b)
c = kernel_utils.mul(c, c)
tl.store(C + ms[:, None] * stride_cm + ns[None, :] * stride_cn, c)
"""
test_src = """
#include <cuda.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include "kernel.h"
static void write_buffer_to_csv(char *filename, int32_t *buffer, int size) {
FILE *file = fopen(filename, "w");
if (file == NULL) {
printf(\"Could not open file %s\\n\", filename);
return;
}
for (int i = 0; i < size; i++) {
fprintf(file, "%d", buffer[i]);
if (i < size - 1) {
fprintf(file, ",");
}
}
fclose(file);
}
static void read_csv_to_buffer(char *filename, int16_t *buffer, int size) {
FILE *file = fopen(filename, "r");
if (file == NULL) {
printf(\"Could not open file %s\\n\", filename);
return;
}
int index = 0;
while (fscanf(file, "%hd,", &buffer[index]) != EOF && index < size) {
index++;
}
fclose(file);
}
int main(int argc, char **argv) {
int M = 16, N = 16, K = 16;
int BM = 16, BN = 16, BK = 16;
// initialize CUDA handles
CUdevice dev;
CUcontext ctx;
CUstream stream;
CUdeviceptr A, B, C;
CUresult err = 0;
cuInit(0);
cuDeviceGet(&dev, 0);
cuCtxCreate(&ctx, 0, dev);
cuMemAlloc(&A, M * K * 2);
cuMemAlloc(&B, K * N * 2);
cuMemAlloc(&C, M * N * 4);
cuStreamCreate(&stream, 0);
load_matmul_fp16xfp16_16x16x16();
// initialize input data
int16_t hA[M*K];
int16_t hB[K*N];
memset(hA, 0, M*K*2);
memset(hB, 0, K*N*2);
read_csv_to_buffer(argv[1], hA, M*K);
read_csv_to_buffer(argv[2], hB, K*N);
cuMemcpyHtoD(A, hA, M*K*2);
cuMemcpyHtoD(B, hB, K*N*2);
// launch kernel
int gX = 1, gY = 1, gZ = 1;
cuStreamSynchronize(stream);
matmul_fp16xfp16_16x16x16(stream, M/BM, N/BN, 1, C, A, B, N, K, N);
cuStreamSynchronize(stream);
// read data
int32_t hC[M*N];
memset(hC, 0, M*N*4);
cuMemcpyDtoH(hC, C, M*N*4);
write_buffer_to_csv(argv[3], hC, M*N);
// free cuda handles
unload_matmul_fp16xfp16_16x16x16();
cuMemFree(A);
cuMemFree(B);
cuMemFree(C);
cuCtxDestroy(ctx);
}
"""
def test_compile_link_matmul():
np.random.seed(3)
with tempfile.TemporaryDirectory() as tmp_dir:
kernel_path = os.path.join(tmp_dir, "kernel.py")
with open(kernel_path, "w") as file:
file.write(kernel_src)
kernel_utils_path = os.path.join(tmp_dir, "kernel_utils.py")
with open(kernel_utils_path, "w") as file:
file.write(kernel_utils_src)
compiler_path = os.path.join(triton.tools.__path__[0], "compile.py")
linker_path = os.path.join(triton.tools.__path__[0], "link.py")
dtype = "fp16"
M, N, K = 16, 16, 16
BM, BN, BK = 16, 16, 16
# compile all desired configs
hints = [":16", ""]
for ha in hints:
for hb in hints:
sig = f'*fp32:16, *{dtype}:16, *{dtype}:16, i32{ha}, 1, i32{hb}, 1, i32:16, 1, {BM}, {BN}, {BK}'
name = f"matmul_{dtype}x{dtype}_{BM}x{BN}x{BK}"
subprocess.run([sys.executable, compiler_path, "-n", "kernel", "--signature", sig, "--out-name", name, "-o", name, "-w", "1", kernel_path], check=True, cwd=tmp_dir)
# link all desired configs
h_files = glob.glob(os.path.join(tmp_dir, "*.h"))
subprocess.run([sys.executable, linker_path] + h_files + ["-o", "kernel"], check=True, cwd=tmp_dir)
# compile test case
with open(os.path.join(tmp_dir, "test.c"), "w") as file:
file.write(test_src)
c_files = glob.glob(os.path.join(tmp_dir, "*.c"))
subprocess.run(["gcc"] + c_files + ["-I", cuda_include_dir(),
"-L", libcuda_dirs()[0],
"-l", "cuda",
"-o", "test"], check=True, cwd=tmp_dir)
# initialize test data
a = np.random.randn(M * K).astype(np.float16).reshape((M, K))
b = np.random.randn(M * K).astype(np.float16).reshape((K, N))
a_path = os.path.join(tmp_dir, "a.csv")
b_path = os.path.join(tmp_dir, "b.csv")
c_path = os.path.join(tmp_dir, "c.csv")
for x, path in [(a, a_path), (b, b_path)]:
x.view(np.int16).ravel().tofile(path, sep=",")
# run test case
subprocess.run(["./test", a_path, b_path, c_path], check=True, cwd=tmp_dir)
# read data and compare against reference
c = np.genfromtxt(c_path, delimiter=",", dtype=np.int32)
c_tri = c.reshape((M, N)).view(np.float32)
c_ref = np.matmul(a.astype(np.float32), b.astype(np.float32))
np.testing.assert_allclose(c_tri, c_ref * c_ref, atol=1e-4, rtol=0.)
def test_ttgir_to_ptx():
src = """
module attributes {"triton_gpu.num-warps" = 4 : i32, "triton_gpu.threads-per-warp" = 32 : i32} {
tt.func public @sum_kernel_0d1d(%arg0: !tt.ptr<i32>, %arg1: !tt.ptr<i32>) {
tt.return
}
}
"""
with tempfile.TemporaryDirectory() as tmp_dir:
kernel_path = os.path.join(tmp_dir, "empty_kernel.ttgir")
with open(kernel_path, "w") as fp:
fp.write(src)
k = triton.compile(kernel_path, cc=80)
ptx = k.asm["ptx"]
assert ".target sm_80" in ptx
assert ".address_size 64" in ptx
| 6,126 | 28.887805 | 180 |
py
|
triton
|
triton-main/python/test/unit/runtime/test_cache.py
|
import os
import shutil
import pytest
import torch
import triton
import triton.language as tl
from triton.runtime.jit import JITFunction
tmpdir = ".tmp"
@triton.jit
def function_1(i):
i = i + 1
i = function_2(i)
return i
@triton.jit
def function_2(i):
i = i + 1
return i
@triton.jit
def kernel(X, i, BLOCK: tl.constexpr):
i = i + 1
i = function_1(i)
tl.store(X, i)
@triton.jit(do_not_specialize=["i"])
def kernel_nospec(X, i, BLOCK: tl.constexpr):
i = i + 1
i = function_1(i)
tl.store(X, i)
def apply_src_change(target, old, new):
kernel.hash = None
function_1.hash = None
function_2.hash = None
function_1.src = function_1.src.replace(old, new)
target.src = target.src.replace(old, new)
ret = target.cache_key
target.src = target.src.replace(new, old)
return ret
def test_nochange():
baseline = kernel.cache_key
updated = apply_src_change(kernel, 'i + 1', 'i + 1')
assert baseline == updated
def test_toplevel_change():
baseline = kernel.cache_key
updated = apply_src_change(kernel, 'i + 1', 'i + 2')
assert baseline != updated
def test_nested1_change():
baseline = kernel.cache_key
updated = apply_src_change(function_1, 'i + 1', 'i + 2')
assert baseline != updated
def reset_tmp_dir():
os.environ["TRITON_CACHE_DIR"] = tmpdir
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
def test_reuse():
counter = 0
def inc_counter(*args, **kwargs):
nonlocal counter
counter += 1
JITFunction.cache_hook = inc_counter
reset_tmp_dir()
x = torch.empty(1, dtype=torch.int32, device='cuda')
for i in range(10):
kernel[(1,)](x, 1, BLOCK=1024)
assert counter == 1
@pytest.mark.parametrize('mode', ['enable', 'disable'])
def test_specialize(mode):
counter = 0
def inc_counter(*args, **kwargs):
nonlocal counter
counter += 1
JITFunction.cache_hook = inc_counter
reset_tmp_dir()
x = torch.empty(1, dtype=torch.int32, device='cuda')
function = {'enable': kernel, 'disable': kernel_nospec}[mode]
target = {'enable': 3, 'disable': 1}[mode]
for i in [1, 2, 4, 8, 16, 32]:
function[(1,)](x, i, BLOCK=512)
assert counter == target
def test_constexpr_not_callable() -> None:
@triton.jit
def kernel(X, c: tl.constexpr):
tl.store(X, 2)
x = torch.empty(1, dtype=torch.int32, device='cuda')
error = False
try:
kernel[(1, )](x, c="str")
except BaseException:
error = True
assert error is False
# try and catch
try:
kernel[(1, )](x, c=tl.abs)
except BaseException:
error = True
assert error is True
def test_jit_warmup_cache() -> None:
@triton.jit
def kernel_add(a, b, o, N: tl.constexpr):
idx = tl.arange(0, N)
tl.store(o + idx,
tl.load(a + idx) + tl.load(b + idx))
args = [
torch.randn(32, dtype=torch.float32, device="cuda"),
torch.randn(32, dtype=torch.float32, device="cuda"),
torch.randn(32, dtype=torch.float32, device="cuda"),
32,
]
assert len(kernel_add.cache) == 0
kernel_add.warmup(torch.float32, torch.float32, torch.float32, 32, grid=(1,))
assert len(kernel_add.cache) == 1
kernel_add.warmup(*args, grid=(1,))
assert len(kernel_add.cache) == 1
kernel_add.warmup(*args, grid=(1,))
assert len(kernel_add.cache) == 1
def test_jit_debug() -> None:
@triton.jit
def kernel_add(a, b, o, N: tl.constexpr):
idx = tl.arange(0, N)
tl.device_assert(idx < 32, "idx < 32")
tl.store(o + idx,
tl.load(a + idx) + tl.load(b + idx))
device = torch.cuda.current_device()
assert len(kernel_add.cache[device]) == 0
kernel_add.warmup(torch.float32, torch.float32, torch.float32, 32, grid=(1,))
assert len(kernel_add.cache[device]) == 1
kernel_add.debug = False
kernel_add.warmup(torch.float32, torch.float32, torch.float32, 32, grid=(1,))
assert len(kernel_add.cache[device]) == 2
kernel_add.debug = True
kernel_add.warmup(torch.float32, torch.float32, torch.float32, 32, grid=(1,))
assert len(kernel_add.cache[device]) == 3
bins = list(kernel_add.cache[device].values())
assert bins[2].asm['ttir'] != bins[1].asm['ttir']
@triton.jit
def add_fn(a, b, o, N: tl.constexpr):
idx = tl.arange(0, N)
tl.store(o + idx, tl.load(a + idx) + tl.load(b + idx))
def test_jit_noinline() -> None:
@triton.jit
def kernel_add_device(a, b, o, N: tl.constexpr):
add_fn(a, b, o, N)
device = torch.cuda.current_device()
assert len(kernel_add_device.cache[device]) == 0
kernel_add_device.warmup(torch.float32, torch.float32, torch.float32, 32, grid=(1,))
assert len(kernel_add_device.cache[device]) == 1
bins = list(kernel_add_device.cache[device].values())
inline_ttir = bins[0].asm['ttir']
add_fn.noinline = True
add_fn.hash = None
kernel_add_device.hash = None
kernel_add_device.cache[device].clear()
kernel_add_device.warmup(torch.float32, torch.float32, torch.float32, 32, grid=(1,))
assert len(kernel_add_device.cache[device]) == 1
bins = list(kernel_add_device.cache[device].values())
noinline_ttir = bins[0].asm['ttir']
assert inline_ttir != noinline_ttir
def test_memory_leak() -> None:
@triton.jit
def kernel(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 10
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0 + tl.zeros([XBLOCK], tl.int32)), tmp0, xmask)
| 5,762 | 26.574163 | 88 |
py
|
triton
|
triton-main/python/test/unit/runtime/test_driver.py
|
import sys
import triton
def test_is_lazy():
from importlib import reload
reload(sys.modules["triton.runtime.driver"])
reload(sys.modules["triton.runtime"])
mod = sys.modules[triton.runtime.driver.__module__]
assert isinstance(triton.runtime.driver, getattr(mod, "LazyProxy"))
assert triton.runtime.driver._obj is None
utils = triton.runtime.driver.utils # noqa: F841
assert issubclass(triton.runtime.driver._obj.__class__, getattr(mod, "DriverBase"))
| 488 | 31.6 | 87 |
py
|
triton
|
triton-main/python/test/unit/runtime/test_subproc.py
|
import multiprocessing
import os
import shutil
from collections import namedtuple
import torch
import triton
import triton.language as tl
tmpdir = ".tmp"
def reset_tmp_dir():
os.environ["TRITON_CACHE_DIR"] = tmpdir
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
instance_descriptor = namedtuple("instance_descriptor", ["divisible_by_16", "equal_to_1"])
def compile_fn(config, cc):
@triton.jit
def kernel_sub(a, b, o, N: tl.constexpr):
idx = tl.arange(0, N)
tl.store(o + idx, tl.load(a + idx) - tl.load(b + idx) * 777)
triton.compile(
fn=kernel_sub,
signature={0: "*fp32", 1: "*fp32", 2: "*fp32"},
device=0,
constants={3: 32},
configs=[config],
warm_cache_only=True,
cc=cc,
)
def test_compile_in_subproc() -> None:
major, minor = torch.cuda.get_device_capability(0)
cc = major * 10 + minor
config = instance_descriptor(tuple(range(4)), ())
multiprocessing.set_start_method('fork')
proc = multiprocessing.Process(
target=compile_fn,
args=(config, cc))
proc.start()
proc.join()
assert proc.exitcode == 0
def compile_fn_dot(config, cc):
@triton.jit
def kernel_dot(Z):
offs = tl.arange(0, 16)[:, None] * 16 + tl.arange(0, 16)[None, :]
z = tl.load(Z + offs)
z = tl.dot(z, z)
tl.store(Z + offs, z)
triton.compile(
fn=kernel_dot,
signature={0: "*fp32"},
device=0,
configs=[config],
warm_cache_only=True,
cc=cc,
)
def test_compile_in_forked_subproc() -> None:
reset_tmp_dir()
major, minor = torch.cuda.get_device_capability(0)
cc = major * 10 + minor
config = instance_descriptor(tuple(range(1)), ())
assert multiprocessing.get_start_method() == 'fork'
proc = multiprocessing.Process(
target=compile_fn_dot,
args=(config, cc))
proc.start()
proc.join()
assert proc.exitcode == 0
| 1,989 | 22.690476 | 90 |
py
|
triton
|
triton-main/python/test/unit/runtime/test_autotuner.py
|
import torch
import triton
import triton.language as tl
def test_kwargs():
N = 1024
src = torch.empty(N, device='cuda')
dst = torch.empty(N, device='cuda')
configs = [triton.Config(kwargs={'BLOCK_SIZE': 32}), triton.Config(kwargs={'BLOCK_SIZE': 128})]
@triton.autotune(configs=configs, key=['N'])
@triton.jit
def _kernel(dst, src, N, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
x = tl.load(src + offsets, mask=offsets < N)
tl.store(dst + offsets, x, mask=offsets < N)
grid = lambda META: (triton.cdiv(N, META['BLOCK_SIZE']),)
_kernel[grid](dst, src, N)
_kernel[grid](dst=dst, src=src, N=N)
| 709 | 29.869565 | 99 |
py
|
triton
|
triton-main/python/test/unit/runtime/test_launch.py
|
import gc
# import importlib
# import os
# import sys
# import tempfile
# import textwrap
# import time
import tracemalloc
import torch
import triton
import triton.language as tl
# from typing import Tuple
def test_memory_leak() -> None:
@triton.jit
def kernel(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 10
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0 + tl.zeros([XBLOCK], tl.int32)), tmp0, xmask)
tracemalloc.start()
try:
inp = torch.randn(10, device='cuda')
out = torch.randn(10, device='cuda')
kernel[(10,)](inp, out, 10, XBLOCK=16)
gc.collect()
begin, _ = tracemalloc.get_traced_memory()
for _ in range(100):
kernel[(10,)](inp, out, 10, XBLOCK=16)
gc.collect()
end, _ = tracemalloc.get_traced_memory()
assert end - begin < 5000
finally:
tracemalloc.stop()
# LATENCY_THRESHOLD_US = 46
# def test_kernel_launch_latency() -> None:
# def define_kernel(kernel_name: str, num_tensor_args: int) -> str:
# arg_str = ",".join([f"arg{i}: torch.Tensor" for i in range(num_tensor_args)])
# arg_str += ", n_elements: int, BLOCK_SIZE: tl.constexpr"
# func_str = f"""
# import torch
# import triton
# import triton.language as tl
# @triton.jit
# def {kernel_name}({arg_str}):
# pass
# """
# with tempfile.NamedTemporaryFile(mode="w+t", suffix=".py", delete=False) as temp_file:
# temp_file.write(textwrap.dedent(func_str))
# temp_file_path = temp_file.name
# return temp_file_path
# def import_kernel(file_path, kernel_name):
# directory, filename = os.path.split(file_path)
# module_name, _ = os.path.splitext(filename)
# sys.path.insert(0, directory)
# module = importlib.import_module(module_name)
# kernel = getattr(module, kernel_name)
# return kernel
# def empty(*kernel_args: Tuple[torch.Tensor]):
# first_arg = kernel_args[0]
# n_elements = first_arg.numel()
# grid = (triton.cdiv(n_elements, 1024),)
# device = torch.cuda.current_device()
# # Warmup
# empty_kernel[grid](*kernel_args, n_elements, BLOCK_SIZE=1024, device=device)
# torch.cuda.synchronize()
# # Measure launch overhead at steady state
# num_runs = 1000
# start_time = time.time()
# for i in range(num_runs):
# empty_kernel[grid](*kernel_args, n_elements, BLOCK_SIZE=1024, device=device)
# end_time = time.time()
# latency_us = (end_time - start_time) / num_runs * 1e6
# assert latency_us < LATENCY_THRESHOLD_US, "Kernel launch time has increased!"
# num_tensor_args = 40
# kernel_name = 'empty_kernel'
# file_path = define_kernel(kernel_name, num_tensor_args)
# empty_kernel = import_kernel(file_path, kernel_name)
# # Initialize random tensors for the empty_kernel
# torch.manual_seed(0)
# size = 1024
# kernel_args = (torch.rand(size, device='cuda') for i in range(num_tensor_args))
# # Run empty, which would run empty_kernel internally
# empty(*kernel_args)
| 3,409 | 30.869159 | 96 |
py
|
triton
|
triton-main/python/test/unit/interpreter/test_interpreter.py
|
import random
import torch
import triton
import triton.language as tl
from triton.interpreter.interpreter import program_ids_from_grid
def test_addition():
@triton.jit(interpret=True)
def add_kernel(
x_ptr,
y_ptr,
output_ptr,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
tl.store(output_ptr + offsets, output, mask=mask)
a = torch.rand((128,), device="cuda")
b = torch.rand((128,), device="cuda")
expected = a + b
output = torch.empty((128,), device="cuda")
def grid(meta):
return (triton.cdiv(128, meta["BLOCK_SIZE"]),)
add_kernel[grid](a, b, output, 128, BLOCK_SIZE=32)
assert torch.allclose(expected, output, atol=1e-2, rtol=0)
def test_program_ids_from_grid():
random.seed(123)
grid = (3, 4)
expected_combinations = 3 * 4
unique_combinations = set(program_ids_from_grid(grid))
assert len(unique_combinations) == expected_combinations
first_run = list(program_ids_from_grid(grid))
second_run = list(program_ids_from_grid(grid))
assert first_run != second_run
def test_atomic():
@triton.jit(interpret=True)
def atomic(
x_ptr,
):
pid = tl.program_id(axis=0)
tl.atomic_add(x_ptr + pid, 1)
t = tl.atomic_xchg(x_ptr + pid, 3)
t += 1 # 2
tl.atomic_cas(x_ptr + pid, 3, t) # match
tl.atomic_cas(x_ptr + pid, 40, 9) # no match
nb_dim = 16
a = torch.zeros((nb_dim, ), dtype=torch.int32, device="cuda")
atomic[(nb_dim, )](a)
assert torch.allclose(a, torch.full_like(a, 2))
| 1,878 | 25.842857 | 65 |
py
|
triton
|
triton-main/python/test/unit/operators/test_inductor.py
|
import torch
import triton
import triton.language as tl
def test_normalization_with_remat():
@triton.jit
def triton_(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 64
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp11 = tl.load(in_ptr2 + (x0), xmask)
tmp13 = tl.load(in_ptr3 + (x0), xmask)
_tmp17 = tl.zeros([XBLOCK, RBLOCK], tl.float32) + 0
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + (4096 * x3)), rmask & xmask, eviction_policy='evict_last', other=0)
tmp2 = tmp0 - tmp1
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = tl.sqrt(tmp5)
tmp7 = 1 / tmp6
tmp8 = 1.0
tmp9 = tmp7 * tmp8
tmp10 = tmp2 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
_tmp17 = tl.where(rmask & xmask, _tmp17 + tmp14, _tmp17)
tl.store(in_out_ptr0 + (r2 + (4096 * x3) + tl.zeros([XBLOCK, RBLOCK], tl.int32)), tmp14, rmask & xmask)
tmp17 = tl.sum(_tmp17, 1)[:, None]
tmp18 = 4096.0
tmp19 = tmp17 / tmp18
tl.store(in_out_ptr1 + (x3 + tl.zeros([XBLOCK, 1], tl.int32)), tmp19, xmask)
torch.manual_seed(123)
buf14 = torch.rand(8, 64, 64, 64, device="cuda")
buf16 = torch.rand(8, 1, 64, device="cuda")
arg114_1 = torch.rand(64, device="cuda")
arg115_1 = torch.rand(64, device="cuda")
arg8_1 = torch.rand(64, device="cuda")
arg9_1 = torch.rand(64, device="cuda")
triton_[(512,)](buf14, buf16, arg114_1, arg115_1, arg8_1, arg9_1, 512, 4096, 1, 2048)
torch.testing.assert_allclose(buf16.mean().item(), buf14.mean().item(), atol=1e-7, rtol=0)
def test_avg_pool_bw():
@triton.jit
def triton_(in_ptr0, out_ptr0, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x5 = xindex
tmp0 = (-1) + x1
tmp1 = (-1) + x0
tmp2 = 2 + x1
tmp3 = 2 + x0
tmp4 = 0
tmp5 = tl.where(tmp0 != tmp0, tmp0, tl.where(tmp0 > tmp4, tmp0, tmp4))
tmp6 = tl.where(tmp1 != tmp1, tmp1, tl.where(tmp1 > tmp4, tmp1, tmp4))
tmp7 = 8
tmp8 = tl.where(tmp2 != tmp2, tmp2, tl.where(tmp2 < tmp7, tmp2, tmp7))
tmp9 = tl.where(tmp3 != tmp3, tmp3, tl.where(tmp3 < tmp7, tmp3, tmp7))
tmp10 = tmp5 + tmp4
tmp11 = tmp6 + tmp4
tmp12 = 1
tmp13 = tmp8 - tmp12
tmp14 = tl.where(tmp10 != tmp10, tmp10, tl.where(tmp10 < tmp13, tmp10, tmp13))
tmp15 = tmp9 - tmp12
tmp16 = tl.where(tmp11 != tmp11, tmp11, tl.where(tmp11 < tmp15, tmp11, tmp15))
tmp17 = tl.load(in_ptr0 + (tmp16 + (8 * tmp14) + (64 * x2)), None).to(tl.float32)
tmp18 = tmp17 / 9
tmp19 = tmp10 < tmp8
tmp20 = tmp11 < tmp9
tmp21 = tmp19 & tmp20
tmp22 = 0.0
tmp23 = tl.where(tmp21, tmp18, tmp22)
tmp24 = tmp6 + tmp12
tmp25 = tl.where(tmp24 != tmp24, tmp24, tl.where(tmp24 < tmp15, tmp24, tmp15))
tmp26 = tl.load(in_ptr0 + (tmp25 + (8 * tmp14) + (64 * x2)), None).to(tl.float32)
tmp27 = tmp26 / 9
tmp28 = tmp24 < tmp9
tmp29 = tmp19 & tmp28
tmp30 = tmp23 + tmp27
tmp31 = tl.where(tmp29, tmp30, tmp23)
tmp32 = 2
tmp33 = tmp6 + tmp32
tmp34 = tl.where(tmp33 != tmp33, tmp33, tl.where(tmp33 < tmp15, tmp33, tmp15))
tmp35 = tl.load(in_ptr0 + (tmp34 + (8 * tmp14) + (64 * x2)), None).to(tl.float32)
tmp36 = tmp35 / 9
tmp37 = tmp33 < tmp9
tmp38 = tmp19 & tmp37
tmp39 = tmp31 + tmp36
tmp40 = tl.where(tmp38, tmp39, tmp31)
tmp41 = tmp5 + tmp12
tmp42 = tl.where(tmp41 != tmp41, tmp41, tl.where(tmp41 < tmp13, tmp41, tmp13))
tmp43 = tl.load(in_ptr0 + (tmp16 + (8 * tmp42) + (64 * x2)), None).to(tl.float32)
tmp44 = tmp43 / 9
tmp45 = tmp41 < tmp8
tmp46 = tmp45 & tmp20
tmp47 = tmp40 + tmp44
tmp48 = tl.where(tmp46, tmp47, tmp40)
tmp49 = tl.load(in_ptr0 + (tmp25 + (8 * tmp42) + (64 * x2)), None).to(tl.float32)
tmp50 = tmp49 / 9
tmp51 = tmp45 & tmp28
tmp52 = tmp48 + tmp50
tmp53 = tl.where(tmp51, tmp52, tmp48)
tmp54 = tl.load(in_ptr0 + (tmp34 + (8 * tmp42) + (64 * x2)), None).to(tl.float32)
tmp55 = tmp54 / 9
tmp56 = tmp45 & tmp37
tmp57 = tmp53 + tmp55
tmp58 = tl.where(tmp56, tmp57, tmp53)
tmp59 = tmp5 + tmp32
tmp60 = tl.where(tmp59 != tmp59, tmp59, tl.where(tmp59 < tmp13, tmp59, tmp13))
tmp61 = tl.load(in_ptr0 + (tmp16 + (8 * tmp60) + (64 * x2)), None).to(tl.float32)
tmp62 = tmp61 / 9
tmp63 = tmp59 < tmp8
tmp64 = tmp63 & tmp20
tmp65 = tmp58 + tmp62
tmp66 = tl.where(tmp64, tmp65, tmp58)
tmp67 = tl.load(in_ptr0 + (tmp25 + (8 * tmp60) + (64 * x2)), None).to(tl.float32)
tmp68 = tmp67 / 9
tmp69 = tmp63 & tmp28
tmp70 = tmp66 + tmp68
tmp71 = tl.where(tmp69, tmp70, tmp66)
tmp72 = tl.load(in_ptr0 + (tmp34 + (8 * tmp60) + (64 * x2)), None).to(tl.float32)
tmp73 = tmp72 / 9
tmp74 = tmp63 & tmp37
tmp75 = tmp71 + tmp73
tmp76 = tl.where(tmp74, tmp75, tmp71)
tl.store(out_ptr0 + (x5 + tl.zeros([XBLOCK], tl.int32)), tmp76, None)
inp = torch.ones(8, 2048, 8, 8, device="cuda", dtype=torch.half)
out = torch.ones_like(inp) * 3
numel = inp.numel()
triton_[(numel // 1024,)](inp, out, 1024)
out_ref = torch.ones_like(inp)
out_ref[:, :, 1:7, 0::7] = 2 / 3
out_ref[:, :, 0::7, 1:7] = 2 / 3
out_ref[:, :, 0::7, 0::7] = 4 / 9
torch.testing.assert_allclose(out, out_ref)
| 6,326 | 39.557692 | 138 |
py
|
triton
|
triton-main/python/test/unit/operators/test_cross_entropy.py
|
import pytest
import torch
import triton
import triton.ops
@pytest.mark.parametrize("M, N, dtype, mode",
[
(M, N, dtype, mode) for M in [1024, 821]
for N in [512, 857, 1871, 2089, 8573, 31000]
for dtype in ['float16', 'float32']
for mode in ['forward', 'backward']
]
)
def test_op(M, N, dtype, mode):
capability = torch.cuda.get_device_capability()
if capability[0] < 8 and dtype == "bfloat16":
pytest.skip("Only test bfloat16 on devices with sm >= 80")
dtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16, 'float32': torch.float32}[dtype]
# create inputs
x = torch.randn(M, N, dtype=dtype, device='cuda', requires_grad=True)
idx = 4 + torch.ones(M, dtype=torch.int64, device='cuda')
# forward pass
tt_y = triton.ops.cross_entropy(x, idx)
th_y = torch.nn.CrossEntropyLoss(reduction="none")(x, idx)
if mode == 'forward':
torch.testing.assert_allclose(th_y, tt_y)
# backward pass
elif mode == 'backward':
dy = torch.randn_like(tt_y)
# triton backward
tt_y.backward(dy)
tt_dx = x.grad.clone()
# torch backward
x.grad = None
th_y.backward(dy)
th_dx = x.grad.clone()
torch.testing.assert_allclose(th_dx, tt_dx)
| 1,447 | 34.317073 | 99 |
py
|
triton
|
triton-main/python/test/unit/operators/test_blocksparse.py
|
import pytest
import torch
import triton
import triton.ops
def sparsify_tensor(x, mask, block):
ret = torch.empty((x.size(0), mask.sum(), block, block), dtype=x.dtype, device=x.device)
for idx, (h, i, j) in enumerate(zip(*mask.nonzero(as_tuple=True))):
ret[:, idx, :, :] = x[:, h, i * block:(i + 1) * block, j * block:(j + 1) * block]
return ret
def make_pair(shape, device="cuda", alpha=1e-2, beta=0., trans=False, data=None, dtype=torch.float32):
if data is None:
data = torch.randn(shape, dtype=torch.float32, requires_grad=True, device=device)
ref_ret = data
ref_ret = ref_ret * alpha + beta
ref_ret = ref_ret.half().to(dtype)
if trans:
ref_ret = ref_ret.t().requires_grad_()
ref_ret = ref_ret.detach().requires_grad_()
tri_ret = ref_ret.clone().detach().requires_grad_()
return ref_ret, tri_ret
def mask_tensor(x, mask, block, value=0):
ret = x.clone()
for h, i, j in zip(*(mask == 0).nonzero(as_tuple=True)):
ret[:, h, i * block:(i + 1) * block, j * block:(j + 1) * block] = value
return ret
@pytest.mark.parametrize("MODE", ["sdd", "dds", "dsd"])
@pytest.mark.parametrize("TRANS_A", [False, True])
@pytest.mark.parametrize("TRANS_B", [False, True])
@pytest.mark.parametrize("BLOCK", [16, 32, 64])
@pytest.mark.parametrize("DTYPE", [torch.float16])
def test_matmul(MODE, TRANS_A, TRANS_B, BLOCK, DTYPE, Z=3, H=2, M=512, N=384, K=256):
seed = 0
torch.manual_seed(seed)
is_sdd = MODE == "sdd"
is_dsd = MODE == "dsd"
is_dds = MODE == "dds"
do_sparsify = lambda x: sparsify_tensor(x, layout, BLOCK)
do_mask = lambda x: mask_tensor(x, layout, BLOCK)
# create inputs
# create op
a_shape = (Z, H, K, M) if TRANS_A else (Z, H, M, K)
b_shape = (Z, H, N, K) if TRANS_B else (Z, H, K, N)
c_shape = (Z, H, M, N)
shape = {
"sdd": (M, N),
"dsd": (a_shape[2], a_shape[3]),
"dds": (b_shape[2], b_shape[3]),
}[MODE]
layout = torch.randint(2, (H, shape[0] // BLOCK, shape[1] // BLOCK))
layout[1, 2, :] = 0
layout[1, :, 1] = 0
# create data
a_ref, a_tri = make_pair(a_shape, alpha=.1, dtype=DTYPE)
b_ref, b_tri = make_pair(b_shape, alpha=.1, dtype=DTYPE)
dc_ref, dc_tri = make_pair(c_shape, dtype=DTYPE)
# compute [torch]
dc_ref = do_mask(dc_ref) if is_sdd else dc_ref
a_ref = do_mask(a_ref) if is_dsd else a_ref
b_ref = do_mask(b_ref) if is_dds else b_ref
a_ref.retain_grad()
b_ref.retain_grad()
c_ref = torch.matmul(a_ref.transpose(2, 3) if TRANS_A else a_ref,
b_ref.transpose(2, 3) if TRANS_B else b_ref)
c_ref.backward(dc_ref)
c_ref = do_sparsify(c_ref) if is_sdd else c_ref
da_ref = do_sparsify(a_ref.grad) if is_dsd else a_ref.grad
db_ref = do_sparsify(b_ref.grad) if is_dds else b_ref.grad
# triton result
dc_tri = do_sparsify(dc_tri) if is_sdd else dc_tri
a_tri = do_sparsify(a_tri) if is_dsd else a_tri
b_tri = do_sparsify(b_tri) if is_dds else b_tri
a_tri.retain_grad()
b_tri.retain_grad()
op = triton.ops.blocksparse.matmul(layout, BLOCK, MODE, trans_a=TRANS_A, trans_b=TRANS_B, device="cuda")
c_tri = op(a_tri, b_tri)
c_tri.backward(dc_tri)
da_tri = a_tri.grad
db_tri = b_tri.grad
# compare
torch.testing.assert_allclose(c_ref, c_tri)
torch.testing.assert_allclose(da_ref, da_tri)
torch.testing.assert_allclose(db_ref, db_tri)
configs = [
(16, 256),
(32, 576),
(64, 1871),
(128, 2511),
]
@pytest.mark.parametrize("is_dense", [False, True])
@pytest.mark.parametrize("BLOCK, WIDTH", configs)
def test_softmax(BLOCK, WIDTH, is_dense, Z=2, H=2, is_causal=True, scale=0.4):
# set seed
torch.random.manual_seed(0)
Z, H, M, N = 2, 3, WIDTH, WIDTH
# initialize layout
# make sure each row has at least one non-zero element
layout = torch.randint(2, (H, M // BLOCK, N // BLOCK))
if is_dense:
layout[:] = 1
else:
layout[1, 2, :] = 0
layout[1, :, 1] = 0
# initialize data
a_shape = (Z, H, M, N)
a_ref, a_tri = make_pair(a_shape)
dout_ref, dout_tri = make_pair(a_shape)
# compute [torch]
a_ref = mask_tensor(a_ref, layout, BLOCK, value=float("-inf"))
a_ref.retain_grad()
at_mask = torch.ones((M, N), device="cuda")
if is_causal:
at_mask = torch.tril(at_mask)
M = at_mask[None, None, :, :] + torch.zeros_like(a_ref)
a_ref[M == 0] = float("-inf")
out_ref = torch.softmax(a_ref * scale, -1)
out_ref.backward(dout_ref)
out_ref = sparsify_tensor(out_ref, layout, BLOCK)
da_ref = sparsify_tensor(a_ref.grad, layout, BLOCK)
# compute [triton]
a_tri = sparsify_tensor(a_tri, layout, BLOCK)
a_tri.retain_grad()
dout_tri = sparsify_tensor(dout_tri, layout, BLOCK)
op = triton.ops.blocksparse.softmax(layout, BLOCK, device="cuda", is_dense=is_dense)
out_tri = op(a_tri, scale=scale, is_causal=is_causal)
out_tri.backward(dout_tri)
da_tri = a_tri.grad
# compare
torch.testing.assert_allclose(out_tri, out_ref)
torch.testing.assert_allclose(da_tri, da_ref)
@pytest.mark.parametrize("block", [16, 32, 64])
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_attention_fwd_bwd(
block,
dtype,
input_scale=1.0,
scale=1 / 8.0,
n_ctx=256,
batch_size=2,
n_heads=2,
):
capability = torch.cuda.get_device_capability()
if capability[0] < 7:
pytest.skip("Only test tl.dot() on devices with sm >= 70")
# inputs
qkv_shape = (batch_size, n_heads, n_ctx, 64)
qkvs = [
torch.nn.Parameter(input_scale * torch.randn(qkv_shape), requires_grad=True).to(dtype).cuda() for _ in range(3)
]
# Triton:
n_blocks = n_ctx // block
layout = torch.tril(torch.ones([n_heads, n_blocks, n_blocks], dtype=torch.long))
query, key, value = [x.clone() for x in qkvs]
query.retain_grad()
key.retain_grad()
value.retain_grad()
attn_out = triton_attention(layout, block, query=query, key=key, value=value, scale=scale)
# ad hoc loss
loss = (attn_out ** 2).mean()
loss.backward()
grads = [query.grad, key.grad, value.grad]
# Torch version:
torch_q, torch_k, torch_v = [x.clone() for x in qkvs]
attn_mask = torch.ones([n_ctx, n_ctx], device="cuda", dtype=dtype)
attn_mask = torch.tril(attn_mask, diagonal=0)
attn_mask = 1e6 * (-1 + (attn_mask.reshape((1, 1, n_ctx, n_ctx)).cuda()))
torch_q.retain_grad()
torch_k.retain_grad()
torch_v.retain_grad()
scores = scale * torch.einsum("bhsd,bhtd->bhst", torch_q, torch_k)
scores = scores + attn_mask
probs = torch.softmax(scores, dim=-1)
torch_attn_out = torch.einsum("bhst,bhtd->bhsd", probs, torch_v)
# ad hoc loss
torch_loss = (torch_attn_out ** 2).mean()
torch_loss.backward()
torch_grads = [torch_q.grad, torch_k.grad, torch_v.grad]
# comparison
# print(f"Triton loss {loss} and torch loss {torch_loss}. Also checking grads...")
torch.testing.assert_allclose(loss, torch_loss, atol=1e-3, rtol=0)
for g1, g2 in zip(grads, torch_grads):
torch.testing.assert_allclose(g1, g2)
@pytest.mark.parametrize("block", [16, 32, 64])
def triton_attention(
layout,
block: int,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
scale: float,
):
sparse_dot_sdd_nt = triton.ops.blocksparse.matmul(layout, block, "sdd", trans_a=False, trans_b=True, device=value.device)
sparse_dot_dsd_nn = triton.ops.blocksparse.matmul(layout, block, "dsd", trans_a=False, trans_b=False, device=value.device)
sparse_softmax = triton.ops.blocksparse.softmax(layout, block, device=value.device)
w = sparse_dot_sdd_nt(query, key)
w = sparse_softmax(w, scale=scale, is_causal=True)
a = sparse_dot_dsd_nn(w, value)
return a
| 7,887 | 34.854545 | 126 |
py
|
triton
|
triton-main/python/test/unit/operators/test_matmul.py
|
import itertools
import pytest
import torch
import triton
import triton.language as tl
import triton.ops
def f8_to_f16(x, dtype):
@triton.jit
def kernel(Y, X, N, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(0)
offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offs < N
x = tl.load(X + offs, mask=mask)
tl.store(Y + offs, x, mask=mask)
ret = torch.empty(x.shape, dtype=torch.float16, device=x.device)
grid = lambda META: (triton.cdiv(x.numel(), META['BLOCK_SIZE']),)
dtype = getattr(tl, dtype)
kernel[grid](ret, triton.reinterpret(x, dtype), ret.numel(), BLOCK_SIZE=1024)
return ret
@pytest.mark.parametrize(
"BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, NWARP, NSTAGE, M, N, K, AT, BT, ADTYPE, BDTYPE",
itertools.chain(
*[
[
# 1 warp
(16, 16, 16, 1, 1, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(32, 16, 16, 1, 1, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(16, 32, 16, 1, 1, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(16, 16, 32, 1, 1, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(32, 16, 32, 1, 1, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(16, 32, 32, 1, 1, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(16, 16, 64, 1, 1, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(64, 16, 64, 1, 1, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(16, 64, 64, 1, 1, 2, None, None, None, AT, BT, DTYPE, DTYPE),
# 2 warp
(64, 32, 64, 1, 2, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(32, 64, 64, 1, 2, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(64, 32, 16, 1, 2, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(32, 64, 16, 1, 2, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(128, 32, 32, 1, 2, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(32, 128, 32, 1, 2, 2, None, None, None, AT, BT, DTYPE, DTYPE),
# 4 warp
(128, 64, 16, 1, 4, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(64, 128, 16, 1, 4, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(128, 32, 32, 1, 4, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(32, 128, 32, 1, 4, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(128, 32, 64, 1, 4, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(32, 128, 64, 1, 4, 2, None, None, None, AT, BT, DTYPE, DTYPE),
# 8 warp
(128, 256, 16, 1, 8, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(256, 128, 16, 1, 8, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(256, 128, 32, 1, 8, 2, None, None, None, AT, BT, DTYPE, DTYPE),
# split-k
(64, 64, 16, 2, 4, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(64, 64, 16, 4, 4, 2, None, None, None, AT, BT, DTYPE, DTYPE),
(64, 64, 16, 8, 4, 2, None, None, None, AT, BT, DTYPE, DTYPE),
# variable input
(128, 128, 32, 1, 4, 2, 256, 384, 160, AT, BT, DTYPE, DTYPE),
(128, 128, 32, 1, 4, 2, 107, 233, 256, AT, BT, DTYPE, DTYPE),
(128, 128, 32, 1, 4, 2, 107, 233, 311, AT, BT, DTYPE, DTYPE),
(128, 256, 64, 1, 8, 3, 256, 512, 160, AT, BT, DTYPE, DTYPE),
] for DTYPE in ["float16", "bfloat16", "float32"] for AT in [False, True] for BT in [False, True]
],
# n-stage
*[
[
(16, 16, 16, 1, 1, STAGES, 32, 32, 80, AT, BT, DTYPE, DTYPE),
(64, 32, 64, 1, 2, STAGES, 128, 64, 128, AT, BT, DTYPE, DTYPE),
(128, 64, 16, 1, 4, STAGES, 256, 128, 80, AT, BT, DTYPE, DTYPE),
(256, 128, 32, 1, 8, STAGES, 512, 256, 160, AT, BT, DTYPE, DTYPE),
(128, 128, 32, 1, 4, STAGES, 256, 256, 160, AT, BT, DTYPE, DTYPE),
# split-k
(64, 64, 16, 8, 4, STAGES, 128, 128, 768, AT, BT, DTYPE, DTYPE),
(64, 64, 16, 8, 4, STAGES, 128, 128, 32, AT, BT, DTYPE, DTYPE),
] for DTYPE in ["float16", "bfloat16", "float32"] for AT in [False, True] for BT in [False, True] for STAGES in [4]
],
# mixed-precision
*[
[
(32, 32, 32, 1, 1, 2, None, None, None, AT, BT, ADTYPE, BDTYPE),
(128, 256, 32, 1, 8, 2, None, None, None, AT, BT, ADTYPE, BDTYPE),
(32, 64, 32, 1, 1, 2, 64, 128, 32, AT, BT, ADTYPE, BDTYPE),
(128, 128, 32, 8, 4, 2, 256, 256, 128, AT, BT, ADTYPE, BDTYPE),
] for ADTYPE, BDTYPE in [("float8e4b15", "float8e5"),
("float8e4", "float16"),
("float16", "float8e5"),
("float16", "float32"),
("float32", "float16"),
("bfloat16", "float32"),
("float32", "bfloat16")] for AT in [False, True] for BT in [False, True]
]
),
)
def test_op(BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, NWARP, NSTAGE, M, N, K, AT, BT, ADTYPE, BDTYPE):
capability = torch.cuda.get_device_capability()
if capability[0] < 7:
pytest.skip("Only test tl.dot() on devices with sm >= 70")
if capability[0] < 8 and (ADTYPE == "bfloat16" or BDTYPE == "bfloat16"):
pytest.skip("Only test bfloat16 on devices with sm >= 80")
if (ADTYPE == "bfloat16" or BDTYPE == "bfloat16") and SPLIT_K != 1:
pytest.skip("bfloat16 matmuls don't allow split_k for now")
torch.manual_seed(0)
# nuke kernel decorators -- will set meta-parameters manually
kwargs = {'BLOCK_M': BLOCK_M, 'BLOCK_N': BLOCK_N, 'BLOCK_K': BLOCK_K, 'SPLIT_K': SPLIT_K}
pre_hook = None if SPLIT_K == 1 else lambda nargs: nargs['C'].zero_()
configs = [triton.Config(kwargs=kwargs, num_warps=NWARP, num_stages=NSTAGE, pre_hook=pre_hook)]
kernel = triton.ops._matmul.kernel
kernel.configs = configs
# kernel.run = kernel.run.run.run
# get matrix shape
M = BLOCK_M if M is None else M
N = BLOCK_N if N is None else N
K = BLOCK_K * SPLIT_K if K is None else K
a_fp8 = "float8" in ADTYPE
b_fp8 = "float8" in BDTYPE
def maybe_upcast(x, dtype, is_float8):
if is_float8:
return f8_to_f16(x, dtype)
return x
def init_input(n, m, t, dtype, is_float8):
if t:
return init_input(m, n, False, dtype, is_float8).t()
if is_float8:
return torch.randint(20, 60, (n, m), device="cuda", dtype=torch.int8)
dtype = {"float16": torch.float16, "bfloat16": torch.bfloat16, "float32": torch.float32}[dtype]
return .1 * torch.randn((n, m), device="cuda", dtype=dtype)
# allocate/transpose inputs
a = init_input(M, K, AT, ADTYPE, a_fp8)
b = init_input(K, N, BT, BDTYPE, b_fp8)
# run test
th_a = maybe_upcast(a, ADTYPE, a_fp8).to(torch.float32)
if AT and a_fp8:
th_a = th_a.view(th_a.shape[::-1]).T
th_b = maybe_upcast(b, BDTYPE, b_fp8).to(torch.float32)
if BT and b_fp8:
th_b = th_b.view(th_b.shape[::-1]).T
th_c = torch.matmul(th_a, th_b)
try:
if a_fp8:
a = triton.reinterpret(a, getattr(tl, ADTYPE))
if b_fp8:
b = triton.reinterpret(b, getattr(tl, BDTYPE))
tt_c = triton.ops.matmul(a, b)
atol, rtol = 1e-2, 0
if ADTYPE == torch.bfloat16 or BDTYPE == torch.bfloat16:
atol, rtol = 3.5e-2, 0
torch.testing.assert_allclose(th_c, tt_c, atol=atol, rtol=rtol)
except triton.OutOfResources as e:
pytest.skip(str(e))
| 7,831 | 47.345679 | 127 |
py
|
triton
|
triton-main/python/test/unit/operators/test_flash_attention.py
|
import pytest
import torch
import triton
import triton.ops
@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(4, 48, 1024, 16),
(4, 48, 1024, 32),
(4, 48, 1024, 64),
(4, 48, 1024, 128)])
@pytest.mark.parametrize('dtype', [torch.float16, torch.bfloat16])
@pytest.mark.parametrize('causal', [True, False])
@pytest.mark.parametrize('seq_par', [True, False])
def test_op(Z, H, N_CTX, D_HEAD, dtype, causal, seq_par):
capability = torch.cuda.get_device_capability()
if capability[0] < 8:
pytest.skip("Flash attention only supported for compute capability < 80")
torch.manual_seed(20)
q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
sm_scale = 0.5
dout = torch.randn_like(q)
# reference implementation
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).to(dtype)
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# # triton implementation
tri_out = triton.ops.attention(q, k, v, causal, sm_scale, seq_par)
# print(ref_out)
# print(tri_out)
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
atol = 1e-1 if dtype == torch.bfloat16 else 1e-2
torch.testing.assert_allclose(ref_out, tri_out, atol=atol, rtol=0)
torch.testing.assert_allclose(ref_dv, tri_dv, atol=atol, rtol=0)
torch.testing.assert_allclose(ref_dk, tri_dk, atol=atol, rtol=0)
torch.testing.assert_allclose(ref_dq, tri_dq, atol=atol, rtol=0)
| 2,243 | 43 | 113 |
py
|
triton
|
triton-main/python/test/unit/language/test_core.py
|
# flake8: noqa: F821,F841
import itertools
import os
import re
from typing import Optional, Union
import numpy as np
import pytest
import torch
from numpy.random import RandomState
import triton
import triton._C.libtriton.triton as _triton
import triton.language as tl
from triton.runtime.jit import JITFunction, TensorWrapper, reinterpret
int_dtypes = ['int8', 'int16', 'int32', 'int64']
uint_dtypes = ['uint8', 'uint16', 'uint32', 'uint64']
float_dtypes = ['float16', 'float32', 'float64']
dtypes = int_dtypes + uint_dtypes + float_dtypes
dtypes_with_bfloat16 = dtypes + ['bfloat16']
torch_dtypes = ['bool'] + int_dtypes + ['uint8'] + float_dtypes + ['bfloat16']
def _bitwidth(dtype: str) -> int:
# ex.: "int64" -> 64
return int(re.search(r'(\d+)$', dtype).group(1))
def numpy_random(shape, dtype_str, rs: Optional[RandomState] = None, low=None, high=None):
"""
Override `rs` if you're calling this function twice and don't want the same
result for both calls.
"""
if isinstance(shape, int):
shape = (shape, )
if rs is None:
rs = RandomState(seed=17)
if dtype_str in int_dtypes + uint_dtypes:
iinfo = np.iinfo(getattr(np, dtype_str))
low = iinfo.min if low is None else max(low, iinfo.min)
high = iinfo.max if high is None else min(high, iinfo.max)
dtype = getattr(np, dtype_str)
x = rs.randint(low, high, shape, dtype=dtype)
x[x == 0] = 1 # Hack. Never return zero so tests of division don't error out.
return x
elif dtype_str in float_dtypes:
return rs.normal(0, 1, shape).astype(dtype_str)
elif dtype_str == 'bfloat16':
return (rs.normal(0, 1, shape).astype('float32').view('uint32')
& np.uint32(0xffff0000)).view('float32')
elif dtype_str in ['bool', 'int1', 'bool_']:
return rs.normal(0, 1, shape) > 0.0
else:
raise RuntimeError(f'Unknown dtype {dtype_str}')
def to_triton(x: np.ndarray, device='cuda', dst_type=None) -> Union[TensorWrapper, torch.Tensor]:
'''
Note: We need dst_type because the type of x can be different from dst_type.
For example: x is of type `float32`, dst_type is `bfloat16`.
If dst_type is None, we infer dst_type from x.
'''
t = x.dtype.name
if t in uint_dtypes:
signed_type_name = t.lstrip('u') # e.g. "uint16" -> "int16"
x_signed = x.astype(getattr(np, signed_type_name))
return reinterpret(torch.tensor(x_signed, device=device), getattr(tl, t))
else:
if t == 'float32' and dst_type == 'bfloat16':
return torch.tensor(x, device=device).bfloat16()
return torch.tensor(x, device=device)
def torch_dtype_name(dtype) -> str:
if isinstance(dtype, triton.language.dtype):
return dtype.name
elif isinstance(dtype, torch.dtype):
# 'torch.int64' -> 'int64'
m = re.match(r'^torch\.(\w+)$', str(dtype))
return m.group(1)
else:
raise TypeError(f'not a triton or torch dtype: {type(dtype)}')
def to_numpy(x):
if isinstance(x, TensorWrapper):
return x.base.cpu().numpy().astype(getattr(np, torch_dtype_name(x.dtype)))
elif isinstance(x, torch.Tensor):
if x.dtype is torch.bfloat16:
return x.cpu().float().numpy()
return x.cpu().numpy()
else:
raise ValueError(f"Not a triton-compatible tensor: {x}")
def patch_kernel(template, to_replace):
kernel = triton.JITFunction(template.fn)
for key, value in to_replace.items():
kernel.src = kernel.src.replace(key, value)
return kernel
def check_cuda_only(device):
if device not in ['cuda']:
pytest.skip("Only for cuda")
def check_type_supported(dtype, device):
'''
skip test if dtype is not supported on the current device
'''
if device in ['cuda']:
cc = torch.cuda.get_device_capability()
if cc[0] < 8 and (dtype is tl.bfloat16 or dtype == "bfloat16" or dtype is torch.bfloat16):
pytest.skip("bfloat16 is only supported on NVGPU with cc >= 80")
class MmaLayout:
def __init__(self, version, warps_per_cta):
self.version = version
self.warps_per_cta = warps_per_cta
def __str__(self):
return f"#triton_gpu.mma<{{versionMajor={self.version[0]}, versionMinor={self.version[1]}, warpsPerCTA={self.warps_per_cta}}}>"
class BlockedLayout:
def __init__(self, size_per_thread, threads_per_warp, warps_per_cta, order):
self.sz_per_thread = size_per_thread
self.threads_per_warp = threads_per_warp
self.warps_per_cta = warps_per_cta
self.order = order
def __str__(self):
return f"#triton_gpu.blocked<{{sizePerThread={self.sz_per_thread}, threadsPerWarp={self.threads_per_warp}, warpsPerCTA={self.warps_per_cta}, order={self.order}}}>"
class SharedLayout:
def __init__(self, vec, per_phase, max_phase, order):
self.vec = str(vec)
self.per_phase = str(per_phase)
self.max_phase = str(max_phase)
self.order = str(order)
def __str__(self):
return f"#triton_gpu.shared<{{vec={self.vec}, perPhase={self.per_phase}, maxPhase={self.max_phase}, order={self.order}}}>"
@pytest.mark.parametrize("dtype_x", list(dtypes) + ["bfloat16"])
def test_empty_kernel(dtype_x, device):
SIZE = 128
@triton.jit
def kernel(X, SIZE: tl.constexpr):
pass
check_type_supported(dtype_x, device)
x = to_triton(numpy_random(SIZE, dtype_str=dtype_x), device=device, dst_type=dtype_x)
kernel[(1, )](x, SIZE=SIZE, num_warps=4)
# generic test functions
def _test_unary(dtype_x, expr, numpy_expr=None, device='cuda'):
check_type_supported(dtype_x, device) # early return if dtype_x is not supported
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, SIZE: tl.constexpr):
off = tl.arange(0, SIZE)
x = tl.load(X + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = numpy_random(SIZE, dtype_str=dtype_x)
if 'log' in expr:
x = np.abs(x) + 0.01
# reference result
z_ref = eval(expr if numpy_expr is None else numpy_expr)
# triton result
x_tri = to_triton(x, device=device, dst_type=dtype_x)
z_tri = to_triton(np.empty_like(z_ref), device=device, dst_type=dtype_x)
kernel[(1, )](z_tri, x_tri, SIZE=SIZE, num_warps=4)
# compare
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
def _binary_op_dtype_override(a: str, b: str) -> Optional[np.dtype]:
"""
Given two dtype strings, returns the numpy dtype Triton thinks binary
operations on the two types should return. Returns None if the return value
matches numpy. This is generally needed because Triton and pytorch return
narrower floating point types than numpy in mixed operations, and because
Triton follows C/C++ semantics around mixed signed/unsigned operations, and
numpy/pytorch do not.
"""
overrides = {
('float16', 'int16'): np.float16,
('float16', 'int32'): np.float16,
('float16', 'int64'): np.float16,
('float16', 'uint16'): np.float16,
('float16', 'uint32'): np.float16,
('float16', 'uint64'): np.float16,
('int8', 'uint8'): np.uint8,
('int8', 'uint16'): np.uint16,
('int8', 'uint32'): np.uint32,
('int8', 'uint64'): np.uint64,
('int16', 'uint16'): np.uint16,
('int16', 'uint32'): np.uint32,
('int16', 'uint64'): np.uint64,
('int32', 'uint32'): np.uint32,
('int32', 'uint64'): np.uint64,
('int64', 'uint64'): np.uint64,
}
key = (a, b) if a < b else (b, a)
return overrides.get(key)
def _test_binary(dtype_x, dtype_y, expr, numpy_expr=None, mode_x='real', mode_y='real', device='cuda', y_low=None, y_high=None):
check_type_supported(dtype_x, device) # early return if dtype_x is not supported
check_type_supported(dtype_y, device)
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, Y, SIZE: tl.constexpr):
off = tl.arange(0, SIZE)
x = tl.load(X + off)
y = tl.load(Y + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
rs = RandomState(17)
x = numpy_random(SIZE, dtype_str=dtype_x, rs=rs)
y = numpy_random(SIZE, dtype_str=dtype_y, rs=rs, low=y_low, high=y_high)
if mode_x == 'nan':
x[:] = float('nan')
if mode_y == 'nan':
y[:] = float('nan')
# reference result
z_ref = eval(expr if numpy_expr is None else numpy_expr)
dtype_z = _binary_op_dtype_override(dtype_x, dtype_y)
if dtype_z is not None:
z_ref = z_ref.astype(dtype_z)
# triton result
x_tri = to_triton(x, device=device, dst_type=dtype_x)
y_tri = to_triton(y, device=device, dst_type=dtype_y)
z_tri = to_triton(np.empty(SIZE, dtype=z_ref.dtype), device=device)
kernel[(1, )](z_tri, x_tri, y_tri, SIZE=SIZE, num_warps=4)
np.testing.assert_allclose(z_ref, to_numpy(z_tri), err_msg=expr, rtol=0.01)
def _mod_operation_ill_conditioned(dtype_x, dtype_y) -> bool:
# The result of x % y is ill-conditioned if x % y is much smaller than x.
# pytorch/CUDA has slightly different (probably better) rounding on
# remainders than stock LLVM. We currently don't expect to match it
# bit-for-bit.
return (dtype_x, dtype_y) in [
('int32', 'bfloat16'),
('int32', 'float16'),
('int32', 'float32'),
('int64', 'bfloat16'),
('int64', 'float16'),
('int64', 'float32'),
('int64', 'float64'),
('uint16', 'bfloat16'),
('uint16', 'float16'),
('uint16', 'float32'),
('uint32', 'bfloat16'),
('uint32', 'float16'),
('uint32', 'float32'),
('uint64', 'bfloat16'),
('uint64', 'float16'),
('uint64', 'float32'),
('uint64', 'float64'),
]
# ---------------
# test binary ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, op", [
(dtype_x, dtype_y, op)
for op in ['+', '-', '*', '/', '%']
for dtype_x in dtypes_with_bfloat16
for dtype_y in dtypes_with_bfloat16
])
def test_bin_op(dtype_x, dtype_y, op, device):
expr = f' x {op} y'
if op == '%' and dtype_x in int_dtypes + uint_dtypes and dtype_y in int_dtypes + uint_dtypes:
# LLVM has 'numpy.fmod', not 'numpy.remainder', semantics on integer remainders.
numpy_expr = 'np.fmod(x, y)'
elif op in ('/', '%') and dtype_x in ('int16', 'float16', 'bfloat16') and dtype_y in ('int16', 'float16', 'bfloat16'):
# Triton promotes 16-bit floating-point / and % to 32-bit because there
# are no native div or FRem operations on float16. Since we have to
# convert anyway, we may as well take the accuracy bump.
numpy_expr = f'x.astype(np.float32) {op} y.astype(np.float32)'
elif (dtype_x in uint_dtypes and dtype_y in int_dtypes and _bitwidth(dtype_x) >= _bitwidth(dtype_y)):
numpy_expr = f'x.astype(np.{dtype_x}) {op} y.astype(np.{dtype_x})'
elif (dtype_y in uint_dtypes and dtype_x in int_dtypes and _bitwidth(dtype_y) >= _bitwidth(dtype_x)):
numpy_expr = f'x.astype(np.{dtype_y}) {op} y.astype(np.{dtype_y})'
else:
numpy_expr = None
if op == '%' and _mod_operation_ill_conditioned(dtype_x, dtype_y):
with pytest.raises(AssertionError, match='Not equal to tolerance'):
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
elif (op in ('%', '/') and
((dtype_x in int_dtypes and dtype_y in uint_dtypes) or
(dtype_x in uint_dtypes and dtype_y in int_dtypes))):
with pytest.raises(triton.CompilationError) as exc_info:
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
assert re.match('Cannot use .* because they have different signedness', str(exc_info.value.__cause__))
else:
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
@pytest.mark.parametrize("dtype_x, dtype_y",
[(dtype_x, dtype_y) for dtype_x in int_dtypes for dtype_y in int_dtypes] +
[(dtype_x, dtype_y) for dtype_x in uint_dtypes for dtype_y in uint_dtypes]
)
def test_floordiv(dtype_x, dtype_y, device):
# Triton has IEEE, not numpy/torch, semantics for %, and those carry
# through to //, so we have to use a nonstandard expression to get a
# reference result for //.
expr = 'x // y'
numpy_expr = '((x - np.fmod(x, y)) / y)'
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
def test_unsigned_name_mangling(device):
# Test that uint32 and int32 are mangled differently by the compiler
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(O1, O2, X, Y, SIZE: tl.constexpr):
off = tl.arange(0, SIZE)
x = tl.load(X + off)
y = tl.load(Y + off)
out1 = tl.abs(x) # uint32 -> nop
out2 = tl.abs(-y) # int32 -> should have an effect
tl.store(O1 + off, out1)
tl.store(O2 + off, out2)
dtype_x = 'uint32'
dtype_y = 'int32'
# inputs
rs = RandomState(17)
x = numpy_random(SIZE, dtype_str=dtype_x, rs=rs)
y = numpy_random(SIZE, dtype_str=dtype_y, rs=rs)
# reference result
expect = (np.abs(x), np.abs(-y))
# triton result
x_tri = to_triton(x, device=device, dst_type=dtype_x)
y_tri = to_triton(y, device=device, dst_type=dtype_y)
actual = tuple(
to_triton(np.empty_like(e), device=device)
for e in expect
)
kernel[(1, )](actual[0], actual[1], x_tri, y_tri, SIZE=SIZE, num_warps=4)
# Bitwise op, so expect exact equality
assert (expect[0] == to_numpy(actual[0])).all()
assert (expect[1] == to_numpy(actual[1])).all()
# ---------------
# test bitwise ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, op", [
(dtype_x, dtype_y, op)
for op in ['&', '|', '^']
for dtype_x in dtypes + dtypes_with_bfloat16
for dtype_y in dtypes + dtypes_with_bfloat16
])
def test_bitwise_op(dtype_x, dtype_y, op, device):
expr = f'x {op} y'
if (dtype_x in uint_dtypes and dtype_y in int_dtypes and _bitwidth(dtype_x) >= _bitwidth(dtype_y)):
numpy_expr = f'x.astype(np.{dtype_x}) {op} y.astype(np.{dtype_x})'
elif (dtype_y in uint_dtypes and dtype_x in int_dtypes and _bitwidth(dtype_y) >= _bitwidth(dtype_x)):
numpy_expr = f'x.astype(np.{dtype_y}) {op} y.astype(np.{dtype_y})'
else:
numpy_expr = None
if 'float' in dtype_x + dtype_y:
with pytest.raises(triton.CompilationError) as exc_info:
_test_binary(dtype_x, dtype_y, expr, numpy_expr='np.array([])', device=device)
# The CompilationError must have been caused by a C++ exception with this text.
assert re.match('invalid operands of type', str(exc_info.value.__cause__))
else:
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
@pytest.mark.parametrize("dtype_x, dtype_y, op", [
(dtype_x, dtype_y, op)
for op in ['<<', '>>']
for dtype_x in int_dtypes + uint_dtypes
for dtype_y in int_dtypes + uint_dtypes
])
def test_shift_op(dtype_x, dtype_y, op, device):
expr = f'x {op} y'
bw = max(_bitwidth(dtype_x), _bitwidth(dtype_y))
if dtype_x.startswith('int'):
dtype_z = f'int{bw}'
else:
dtype_z = f'uint{bw}'
numpy_expr = f'x.astype(np.{dtype_z}) {op} y.astype(np.{dtype_z})'
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device, y_low=0, y_high=65)
# ---------------
# test compare ops
# ---------------
ops = ['==', '!=', '>', '<', '>=', '<=']
@pytest.mark.parametrize("dtype_x, dtype_y, op, mode_x, mode_y",
# real
[
(dtype_x, dtype_y, op, 'real', 'real')
for op in ops
for dtype_x in dtypes
for dtype_y in dtypes
] +
# NaNs
[('float32', 'float32', op, mode_x, mode_y)
for op in ops
for mode_x, mode_y in [('nan', 'real'),
('real', 'nan'),
('nan', 'nan')]
])
def test_compare_op(dtype_x, dtype_y, op, mode_x, mode_y, device):
expr = f'x {op} y'
if (dtype_x in uint_dtypes and dtype_y in int_dtypes and _bitwidth(dtype_x) >= _bitwidth(dtype_y)):
numpy_expr = f'x.astype(np.{dtype_x}) {op} y.astype(np.{dtype_x})'
elif (dtype_y in uint_dtypes and dtype_x in int_dtypes and _bitwidth(dtype_y) >= _bitwidth(dtype_x)):
numpy_expr = f'x.astype(np.{dtype_y}) {op} y.astype(np.{dtype_y})'
else:
numpy_expr = None
_test_binary(dtype_x, dtype_y, expr, numpy_expr, mode_x=mode_x, mode_y=mode_y, device=device)
# ---------------
# test broadcast
# ---------------
@pytest.mark.parametrize("dtype", dtypes_with_bfloat16)
def test_broadcast(dtype, device):
@triton.jit
def broadcast_kernel(x_ptr, y_ptr, y_broadcasted_ptr, M: tl.constexpr, N: tl.constexpr):
offset1 = tl.arange(0, M)
offset2 = tl.arange(0, N)
x = tl.load(x_ptr + N * offset1[:, None] + offset2[None, :])
y = tl.load(y_ptr + offset2)
_, y_broadcasted = tl.broadcast(x, y)
tl.store(y_broadcasted_ptr + N * offset1[:, None] + offset2[None, :], y_broadcasted)
M = 32
N = 64
rs = RandomState(17)
x = numpy_random((M, N), dtype_str=dtype, rs=rs)
y = numpy_random(N, dtype_str=dtype, rs=rs)
_, y_broadcasted_np = np.broadcast_arrays(x, y)
x_tri = to_triton(x, device=device, dst_type=dtype)
y_tri = to_triton(y, device=device, dst_type=dtype)
y_broadcasted_tri = to_triton(np.empty((M, N), dtype=y_broadcasted_np.dtype), device=device, dst_type=dtype)
broadcast_kernel[(1,)](x_tri, y_tri, y_broadcasted_tri, M=M, N=N)
assert (y_broadcasted_np == to_numpy(y_broadcasted_tri)).all()
# ------------------
# test invalid slice
# ------------------
def test_invalid_slice(device):
dst = torch.empty(128, device=device)
@triton.jit
def _kernel(dst):
dst[10:]
with pytest.raises(triton.CompilationError, match='unsupported tensor index'):
_kernel[(1,)](dst=dst)
# ----------------
# test expand_dims
# ----------------
def test_expand_dims(device):
@triton.jit
def expand_dims_kernel(dummy, N: tl.constexpr):
offset1 = tl.arange(0, N)
t = tl.expand_dims(offset1, 0)
tl.static_assert(t.shape == [1, N])
t = tl.expand_dims(offset1, 1)
tl.static_assert(t.shape == [N, 1])
t = tl.expand_dims(offset1, -1)
tl.static_assert(t.shape == [N, 1])
t = tl.expand_dims(offset1, -2)
tl.static_assert(t.shape == [1, N])
t = tl.expand_dims(offset1, (0, -1))
tl.static_assert(t.shape == [1, N, 1])
t = tl.expand_dims(offset1, (0, 1, 3))
tl.static_assert(t.shape == [1, 1, N, 1])
t = tl.expand_dims(offset1, (-4, 2, -1))
tl.static_assert(t.shape == [1, N, 1, 1])
t = tl.expand_dims(offset1, (3, 1, 2))
tl.static_assert(t.shape == [N, 1, 1, 1])
N = 32
dummy_tensor = torch.empty((), device=device)
expand_dims_kernel[(1,)](dummy_tensor, N)
def test_expand_dims_error_cases(device):
@triton.jit
def dim_out_of_range1(dummy, N: tl.constexpr):
offset1 = tl.arange(0, N)
t = tl.expand_dims(offset1, -2)
t = tl.expand_dims(offset1, -3)
@triton.jit
def dim_out_of_range2(dummy, N: tl.constexpr):
offset1 = tl.arange(0, N)
t = tl.expand_dims(offset1, 1)
t = tl.expand_dims(offset1, 2)
@triton.jit
def duplicate_dim1(dummy, N: tl.constexpr):
offset1 = tl.arange(0, N)
t = tl.expand_dims(offset1, (0, 0))
@triton.jit
def duplicate_dim2(dummy, N: tl.constexpr):
offset1 = tl.arange(0, N)
t = tl.expand_dims(offset1, (0, -3))
N = 32
dummy_tensor = torch.empty((), device=device)
with pytest.raises(triton.CompilationError, match="invalid axis -3"):
dim_out_of_range1[(1,)](dummy_tensor, N)
with pytest.raises(triton.CompilationError, match="invalid axis 2"):
dim_out_of_range2[(1,)](dummy_tensor, N)
with pytest.raises(triton.CompilationError, match=r"duplicate axes, normalized axes = \[0, 0\]"):
duplicate_dim1[(1,)](dummy_tensor, N)
with pytest.raises(triton.CompilationError, match=r"duplicate axes, normalized axes = \[0, 0\]"):
duplicate_dim2[(1,)](dummy_tensor, N)
# ----------------------------
# test invalid program id axis
# ----------------------------
def test_invalid_pid_axis(device):
dst = torch.empty(128, device=device)
@triton.jit
def _kernel(dst):
pid = tl.program_id(20)
with pytest.raises(triton.CompilationError, match=r"program_id must be in \[0,3\]"):
_kernel[(1,)](dst)
# ---------------
# test where
# ---------------
@pytest.mark.parametrize("dtype", dtypes_with_bfloat16 + ["*int32"])
def test_where(dtype, device):
select_ptrs = False
if dtype == "*int32":
dtype = "int64"
select_ptrs = True
check_type_supported(dtype, device)
@triton.jit
def where_kernel(cond_ptr, a_ptr, b_ptr, output_ptr, n_elements,
BLOCK_SIZE: tl.constexpr,
TEST_POINTERS: tl.constexpr,
TEST_SCALAR_POINTERS: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
decide = tl.load(cond_ptr + offsets, mask=mask)
if TEST_SCALAR_POINTERS:
ptr = tl.where(tl.load(cond_ptr), a_ptr, b_ptr)
output = tl.load(ptr + offsets, mask=mask)
else:
if TEST_POINTERS:
a = tl.load(a_ptr + offsets, mask=mask).to(tl.pi32_t)
b = tl.load(b_ptr + offsets, mask=mask).to(tl.pi32_t)
else:
a = tl.load(a_ptr + offsets, mask=mask)
b = tl.load(b_ptr + offsets, mask=mask)
output = tl.where(decide, a, b)
tl.store(output_ptr + offsets, output, mask=mask)
SIZE = 1_000
rs = RandomState(17)
cond = numpy_random(SIZE, 'bool', rs)
x = numpy_random(SIZE, dtype_str=dtype, rs=rs)
y = numpy_random(SIZE, dtype_str=dtype, rs=rs)
z = np.where(cond, x, y)
cond_tri = to_triton(cond, device=device)
x_tri = to_triton(x, device=device, dst_type=dtype)
y_tri = to_triton(y, device=device, dst_type=dtype)
z_tri = to_triton(np.empty(SIZE, dtype=z.dtype), device=device, dst_type=dtype)
grid = lambda meta: (triton.cdiv(SIZE, meta['BLOCK_SIZE']),)
where_kernel[grid](cond_tri, x_tri, y_tri, z_tri, SIZE, BLOCK_SIZE=1024, TEST_POINTERS=select_ptrs, TEST_SCALAR_POINTERS=False)
assert (z == to_numpy(z_tri)).all()
if select_ptrs:
where_kernel[grid](cond_tri, x_tri, y_tri, z_tri, SIZE, BLOCK_SIZE=1024, TEST_POINTERS=select_ptrs, TEST_SCALAR_POINTERS=True)
z = np.where(cond[0], x, y)
assert (z == to_numpy(z_tri)).all()
def test_where_broadcast(device):
@triton.jit
def where_kernel(cond_ptr, a_ptr, out_ptr, BLOCK_SIZE: tl.constexpr):
xoffsets = tl.arange(0, BLOCK_SIZE)[:, None]
yoffsets = tl.arange(0, BLOCK_SIZE)[None, :]
mask = tl.load(cond_ptr + yoffsets)
vals = tl.load(a_ptr + yoffsets + BLOCK_SIZE * xoffsets)
res = tl.where(mask, vals, 0.)
tl.store(out_ptr + yoffsets + BLOCK_SIZE * xoffsets, res)
@triton.jit
def where_scalar_condition(a_ptr, out_ptr, BLOCK_SIZE: tl.constexpr):
xoffsets = tl.arange(0, BLOCK_SIZE)[:, None]
yoffsets = tl.arange(0, BLOCK_SIZE)[None, :]
mask = 0
vals = tl.load(a_ptr + yoffsets + BLOCK_SIZE * xoffsets)
res = tl.where(mask, vals, 0.)
tl.store(out_ptr + yoffsets + BLOCK_SIZE * xoffsets, res)
SIZE = 32
dtype = 'float32'
rs = RandomState(17)
x = numpy_random((SIZE, SIZE), dtype_str=dtype, rs=rs)
mask = numpy_random(SIZE, 'bool', rs=rs)
z = np.where(mask, x, 0)
cond_tri = to_triton(mask, device=device)
x_tri = to_triton(x, device=device, dst_type=dtype)
z_tri = to_triton(np.empty((SIZE, SIZE), dtype=z.dtype), device=device, dst_type=dtype)
where_kernel[(1,)](cond_tri, x_tri, z_tri, SIZE)
assert (z == to_numpy(z_tri)).all()
where_scalar_condition[(1,)](x_tri, z_tri, SIZE)
z = np.where(0, x, 0)
assert (z == to_numpy(z_tri)).all()
# ---------------
# test unary ops
# ---------------
@pytest.mark.parametrize("dtype_x, expr", [
(dtype_x, ' -x') for dtype_x in dtypes_with_bfloat16
] + [
(dtype_x, ' ~x') for dtype_x in int_dtypes
])
def test_unary_op(dtype_x, expr, device):
_test_unary(dtype_x, expr, device=device)
# ----------------
# test math ops
# ----------------
@pytest.mark.parametrize("dtype_x, expr", [(dtype_x, expr) for dtype_x in ["float32", "float64"] for expr in ['exp', 'log', 'cos', 'sin']])
def test_math_op(dtype_x, expr, device):
_test_unary(dtype_x, f'tl.{expr}(x)', f'np.{expr}(x) ', device=device)
# ----------------
# test abs
# ----------------
@pytest.mark.parametrize("dtype_x", [
(dtype_x)
for dtype_x in dtypes_with_bfloat16
])
def test_abs(dtype_x, device):
_test_unary(dtype_x, 'tl.abs(x)', 'np.abs(x) ', device=device)
@pytest.mark.parametrize("in_dtype", [tl.float8e4b15, tl.float8e4, tl.float8e5])
def test_abs_fp8(in_dtype, device):
@triton.jit
def abs_kernel(X, Z, SIZE: tl.constexpr):
off = tl.arange(0, SIZE)
x = tl.load(X + off)
z = tl.abs(x)
tl.store(Z + off, z)
f8_tensor = torch.tensor(range(-128, 128), dtype=torch.int8, device=device)
# f32_to_f8 doesn't handle nan, so we make sure f8_tensor doesn't contain any nan
all_exp_ones = (f8_tensor & 0b01111100) == 128 - 2**in_dtype.fp_mantissa_width
f8_tensor[all_exp_ones] = 0
f8 = triton.reinterpret(f8_tensor, in_dtype)
n_elements = f8_tensor.numel()
out_f8 = torch.empty_like(f8_tensor)
abs_kernel[(1,)](f8, triton.reinterpret(out_f8, in_dtype), n_elements)
f32_tensor = convert_float_to_float32(f8_tensor, in_dtype)
expect = f32_tensor.abs()
actual_f8 = convert_float_to_float32(out_f8, in_dtype)
torch.testing.assert_allclose(actual_f8, expect)
# ----------------
# test indexing
# ----------------
def make_ptr_str(name, shape):
rank = len(shape)
offsets = []
stride = 1
for i in reversed(range(rank)):
idx = ', '.join([':' if ii == i else 'None' for ii in range(rank)])
offsets += [f'tl.arange(0, {shape[i]})[{idx}]*{stride}']
stride *= shape[i]
return f"{name} + {' + '.join(offsets)}"
# TODO: handle `%4 = triton_gpu.convert_layout %3 : (tensor<32xi32, #blocked0>) -> tensor<32xi32, #triton_gpu.slice<{dim = 0, parent = #blocked1}>>``
@pytest.mark.parametrize("expr, dtype_str", [
(f'x[{s}]', d)
for s in ['None, :', ':, None',
'None, :, :',
':, :, None']
for d in ['int32', 'uint32', 'uint16']
])
def test_index1d(expr, dtype_str, device):
rank_x = expr.count(':')
rank_y = expr.count(',') + 1
shape_x = [32 for _ in range(rank_x)]
shape_z = [32 for _ in range(rank_y)]
shape_z_rank_mismatch = [32 for _ in range(rank_y + 1)]
shape_z_dim_mismatch = [64 for _ in range(rank_y)]
# Triton kernel
@triton.jit
def kernel(Z, X, SIZE: tl.constexpr):
m = tl.arange(0, SIZE)
n = tl.arange(0, SIZE)
x = tl.load(X_PTR_EXPR)
z = GENERATE_TEST_HERE
tl.store(Z_PTR_EXPR, z)
def generate_kernel(shape_x, shape_z):
to_replace = {
'X_PTR_EXPR': make_ptr_str('X', shape_x),
'Z_PTR_EXPR': make_ptr_str('Z', shape_z),
'GENERATE_TEST_HERE': expr,
}
return patch_kernel(kernel, to_replace)
kernel_match = generate_kernel(shape_x, shape_z)
kernel_dim_mismatch = generate_kernel(shape_x, shape_z_dim_mismatch)
kernel_rank_mismatch = generate_kernel(shape_x, shape_z_rank_mismatch)
# torch result
x = numpy_random(shape_x, dtype_str=dtype_str)
y = np.zeros(shape_z, dtype=getattr(np, dtype_str))
z_ref = eval(expr) + y
# triton result
z_tri = to_triton(np.empty_like(z_ref), device=device)
x_tri = to_triton(x, device=device)
kernel_match[(1, )](z_tri, x_tri, num_warps=1, SIZE=shape_x[0])
# compare
assert (z_ref == to_numpy(z_tri)).all()
def catch_compilation_error(kernel):
try:
kernel[(1, )](z_tri, x_tri, num_warps=1, SIZE=shape_x[0])
except triton.CompilationError as e:
np.testing.assert_(True)
except BaseException:
np.testing.assert_(False)
catch_compilation_error(kernel_dim_mismatch)
catch_compilation_error(kernel_rank_mismatch)
# ---------------
# test tuples
# ---------------
@triton.jit
def tuples_fn(a, b):
return a + b, \
a - b, \
a * b
def test_tuples(device):
@triton.jit
def with_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = tuples_fn(x, y)
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
@triton.jit
def without_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = x + y, x - y, x * y
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
x = torch.tensor([1.3], device=device, dtype=torch.float32)
y = torch.tensor([1.9], device=device, dtype=torch.float32)
a_tri = torch.tensor([0], device=device, dtype=torch.float32)
b_tri = torch.tensor([0], device=device, dtype=torch.float32)
c_tri = torch.tensor([0], device=device, dtype=torch.float32)
for kernel in [with_fn, without_fn]:
kernel[(1, )](x, y, a_tri, b_tri, c_tri, num_warps=1)
a_ref, b_ref, c_ref = x + y, x - y, x * y
assert a_tri == a_ref
assert b_tri == b_ref
assert c_tri == c_ref
@triton.jit(noinline=True)
def noinline_simple_fn(x, y, Z):
z = x + y
tl.store(Z, z)
@triton.jit(noinline=True)
def noinline_call_graph_fn1(x):
return x + 1
@triton.jit(noinline=True)
def noinline_call_graph_fn2(y):
return y + 2
@triton.jit(noinline=True)
def noinline_call_graph_fn(x, y, Z):
t0 = noinline_call_graph_fn1(x)
t1 = noinline_call_graph_fn2(y)
z = t0 + t1
tl.store(Z, z)
@triton.jit(noinline=True)
def noinline_shared_fn(x, y, Z):
offs = tl.arange(0, 16)[:, None] * 16 + tl.arange(0, 16)[None, :]
z = tl.load(Z + offs)
z = tl.dot(z, z) + x + y
tl.store(Z + offs, z)
@triton.jit(noinline=True)
def noinline_dynamic_fn(x, y, Z):
if x >= 1:
x = noinline_call_graph_fn1(x)
else:
x = noinline_call_graph_fn2(x)
if y >= 2:
y = noinline_call_graph_fn2(y)
else:
y = noinline_call_graph_fn1(y)
z = x + y
tl.store(Z, z)
@triton.jit(noinline=True)
def noinline_call_multi_values_fn(x, y):
return x + 1, y + 2
@triton.jit(noinline=True)
def noinline_multi_values_fn(x, y, Z):
x, y = noinline_call_multi_values_fn(x, y)
z = x + y
tl.store(Z, z)
@pytest.mark.parametrize("mode", ["simple", "call_graph", "shared", "dynamic", "multi_values"])
def test_noinline(mode, device):
@triton.jit
def kernel(X, Y, Z):
x = tl.load(X)
y = tl.load(Y)
GENERATE_TEST_HERE(x, y, Z)
func_name = f'noinline_{mode}_fn'
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': func_name})
x = torch.tensor([1.0], device=device, dtype=torch.float32)
y = torch.tensor([2.0], device=device, dtype=torch.float32)
if mode == "shared":
z = torch.ones((16, 16), device=device, dtype=torch.float32)
else:
z = torch.tensor([0.0], device=device, dtype=torch.float32)
kernel[(1,)](x, y, z, num_warps=1)
if mode == "simple":
assert torch.equal(z, x + y)
elif mode == "call_graph" or mode == "dynamic" or mode == "multi_values":
assert torch.equal(z, x + 1 + y + 2)
elif mode == "shared":
ref = torch.full((16, 16), 16, device=device, dtype=torch.float32)
assert torch.equal(z, ref + x + y)
# ---------------
# test atomics
# ---------------
@pytest.mark.parametrize("op, dtype_x_str, mode, sem", itertools.chain.from_iterable([
[
('add', 'float16', mode, sem),
('add', 'uint32', mode, sem), ('add', 'int32', mode, sem), ('add', 'float32', mode, sem),
('max', 'uint32', mode, sem), ('max', 'int32', mode, sem), ('max', 'float32', mode, sem),
('min', 'uint32', mode, sem), ('min', 'int32', mode, sem), ('min', 'float32', mode, sem),
]
for mode in ['all_neg', 'all_pos', 'min_neg', 'max_pos']
for sem in [None, 'acquire', 'release', 'acq_rel', 'relaxed']]))
def test_atomic_rmw(op, dtype_x_str, mode, sem, device):
check_cuda_only(device)
capability = torch.cuda.get_device_capability()
if capability[0] < 7:
if dtype_x_str == 'float16':
pytest.skip("Only test atomic float16 ops on devices with sm >= 70")
n_programs = 5
# triton kernel
@triton.jit
def kernel(X, Z):
pid = tl.program_id(0)
x = tl.load(X + pid)
old = GENERATE_TEST_HERE
sem_arg = sem if sem is None else f'"{sem}"'
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.atomic_{op}(Z, x, sem={sem_arg})'})
numpy_op = {'add': np.sum, 'max': np.max, 'min': np.min}[op]
max_neutral = float('-inf') if dtype_x_str in float_dtypes else np.iinfo(getattr(np, dtype_x_str)).min
min_neutral = float('inf') if dtype_x_str in float_dtypes else np.iinfo(getattr(np, dtype_x_str)).max
neutral = {'add': 0, 'max': max_neutral, 'min': min_neutral}[op]
# triton result
rs = RandomState(17)
x = np.array([2**i for i in range(n_programs)], dtype=getattr(np, dtype_x_str))
if mode == 'all_neg':
x = -np.abs(x)
if mode == 'all_pos':
x = np.abs(x)
if mode == 'min_neg':
idx = rs.randint(n_programs, size=(1, )).item()
x[idx] = -np.max(np.abs(x)) - 1
if mode == 'max_pos':
idx = rs.randint(n_programs, size=(1, )).item()
x[idx] = np.max(np.abs(x)) + 1
x_tri = to_triton(x, device=device)
z_tri = to_triton(np.array([neutral], dtype=getattr(np, dtype_x_str)), device=device)
h = kernel[(n_programs, )](x_tri, z_tri)
# torch result
z_ref = numpy_op(x).astype(getattr(np, dtype_x_str))
# compare
exact = op not in ['add']
if exact:
assert z_ref.item() == to_numpy(z_tri).item()
else:
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
sem_str = "acq_rel" if sem is None else sem
assert f"atom.global.gpu.{sem_str}" in h.asm["ptx"]
def test_atomic_rmw_predicate(device):
@triton.jit
def kernel(X):
val = tl.program_id(0)
if val < 64:
tl.atomic_max(X, val)
x = torch.zeros((1,), device=device, dtype=torch.int32)
kernel[(4096,)](x)
assert x.item() == 63
@pytest.mark.parametrize("shape, axis",
[(shape, axis) for shape in [(2, 2), (2, 8), (8, 2), (8, 8), (32, 32)] for axis in [0, 1]])
def test_tensor_atomic_rmw(shape, axis, device):
shape0, shape1 = shape
# triton kernel
@triton.jit
def kernel(Z, X, AXIS: tl.constexpr, SHAPE0: tl.constexpr, SHAPE1: tl.constexpr):
off0 = tl.arange(0, SHAPE0)
off1 = tl.arange(0, SHAPE1)
x = tl.load(X + off0[:, None] * SHAPE1 + off1[None, :])
z = tl.sum(x, axis=AXIS)
if AXIS == 1:
tl.atomic_add(Z + off0, z)
else:
tl.atomic_add(Z + off1, z)
rs = RandomState(17)
x = numpy_random((shape0, shape1), dtype_str="float32", rs=rs)
# reference result
z_ref = np.sum(x, axis=axis, keepdims=False)
# triton result
x_tri = to_triton(x, device=device)
z_shape = (shape0, ) if axis == 1 else (shape1, )
z_tri = to_triton(np.zeros(z_shape, dtype="float32"), device=device)
kernel[(1,)](z_tri, x_tri, axis, shape0, shape1)
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=1e-4)
def test_tensor_atomic_rmw_block(device):
shape = (8, 8)
@triton.jit
def kernel(X, SHAPE0: tl.constexpr, SHAPE1: tl.constexpr):
off0 = tl.arange(0, SHAPE0)
off1 = tl.arange(0, SHAPE1)
offs = off0[:, None] * SHAPE1 + off1[None, :]
val = offs.to(tl.float32)
x = X + offs
tl.atomic_min(x, val)
x = torch.ones((8, 8), device=device, dtype=torch.float32)
kernel[(2,)](x, shape[0], shape[1])
assert torch.min(x).item() == 0.0
@pytest.mark.parametrize("sem", [None, 'acquire', 'release', 'acq_rel', 'relaxed'])
def test_atomic_cas(sem, device):
# 1. make sure that atomic_cas changes the original value (Lock)
@triton.jit
def change_value(Lock):
tl.atomic_cas(Lock, 0, 1)
Lock = torch.zeros((1,), device=device, dtype=torch.int32)
change_value[(1,)](Lock)
assert (Lock[0] == 1)
# 2. only one block enters the critical section
@triton.jit
def serialized_add(data, Lock, SEM: tl.constexpr):
ptrs = data + tl.arange(0, 128)
while tl.atomic_cas(Lock, 0, 1, SEM) == 1:
pass
tl.store(ptrs, tl.load(ptrs) + 1.0)
# release lock
tl.atomic_xchg(Lock, 0)
Lock = torch.zeros((1,), device=device, dtype=torch.int32)
data = torch.zeros((128,), device=device, dtype=torch.float32)
ref = torch.full((128,), 64.0)
h = serialized_add[(64,)](data, Lock, SEM=sem)
sem_str = "acq_rel" if sem is None else sem
np.testing.assert_allclose(to_numpy(data), to_numpy(ref))
assert f"atom.global.{sem_str}" in h.asm["ptx"]
# ---------------
# test cast
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_z, bitcast", [
(dtype_x, dtype_z, False)
for dtype_x in dtypes
for dtype_z in dtypes
] + [
('float32', 'bfloat16', False),
('bfloat16', 'float32', False),
('float32', 'int32', True),
('float32', 'int1', False),
('int8', 'bfloat16', False),
] + [
(f'uint{x}', f'int{x}', True) for x in [8, 16, 32, 64]
] + [
(f'int{x}', f'uint{x}', True) for x in [8, 16, 32, 64]
])
def test_cast(dtype_x, dtype_z, bitcast, device):
# bfloat16 on cc < 80 will not be tested
check_type_supported(dtype_x, device)
check_type_supported(dtype_z, device)
size = 1024
# This is tricky because numpy doesn't have bfloat, and torch doesn't have uints.
if dtype_x.startswith('bfloat'):
x_tri = torch.randn(size, dtype=getattr(torch, dtype_x), device=device)
else:
x = numpy_random(size, dtype_str=dtype_x, low=-10, high=10) * 10
# Triton clamps negative values to zero, while numpy wraps around
# intmax, so avoid negatives for now.
# TODO: figure out which one should actually be happening, and test it
if dtype_z in uint_dtypes:
x = np.absolute(x)
x_tri = to_triton(x, device=device)
# triton kernel
@triton.jit
def kernel(X, Z, BITCAST: tl.constexpr, SIZE: tl.constexpr):
x_ptr = X + tl.arange(0, SIZE)
z_ptr = Z + tl.arange(0, SIZE)
x = tl.load(x_ptr)
z = x.to(Z.dtype.element_ty, bitcast=BITCAST)
tl.store(z_ptr, z)
dtype_z_np = dtype_z if dtype_z != 'int1' else 'bool_'
# triton result
if dtype_z.startswith('bfloat'):
z_tri = torch.empty((size,), dtype=getattr(torch, dtype_z), device=device)
else:
z_tri = to_triton(np.empty((size, ), dtype=getattr(np, dtype_z_np)), device=device)
kernel[(1, )](x_tri, z_tri, BITCAST=bitcast, SIZE=size, num_warps=1)
# torch result
if dtype_z.startswith('bfloat') or dtype_x.startswith('bfloat'):
assert bitcast is False
z_ref = x_tri.to(z_tri.dtype)
torch.testing.assert_close(z_ref, z_tri, rtol=0, atol=0)
else:
if bitcast:
z_ref = x.view(getattr(np, dtype_z_np))
else:
z_ref = x.astype(getattr(np, dtype_z_np))
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0, atol=0)
@pytest.mark.parametrize("dtype_str, num_warps", [(dtype_str, num_warps) for dtype_str in int_dtypes + float_dtypes for num_warps in [4, 8]])
def test_cat(dtype_str, num_warps, device):
check_type_supported(dtype_str, device)
@triton.jit
def kernel(X, Y, Z, N: tl.constexpr):
offs = tl.arange(0, N)
x = tl.load(X + offs)
y = tl.load(Y + offs)
z = tl.cat(x, y, can_reorder=True)
tl.store(Z + tl.arange(0, 2 * N), z)
x = torch.arange(0, 128, device=device).to(getattr(torch, dtype_str))
y = torch.arange(-128, 0, device=device).to(getattr(torch, dtype_str))
z_ref = torch.cat([x, y], dim=0).sum()
z = torch.zeros((256,), dtype=getattr(torch, dtype_str), device=device)
kernel[(1, )](x, y, z, N=128, num_warps=num_warps)
assert z.sum() == z_ref
# check if there's no duplicate value in z
assert z.unique().size(0) == z.size(0)
@pytest.mark.parametrize("dtype_str", list(torch_dtypes))
def test_store_constant(dtype_str, device):
check_type_supported(dtype_str, device)
"""Tests that boolean True is stored as 1"""
@triton.jit
def kernel(output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
output = GENERATE_TEST_HERE
tl.store(output_ptr + offsets, output, mask=mask)
triton_dtype_str = 'uint8' if dtype_str == 'bool' else dtype_str
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.zeros([BLOCK_SIZE], dtype=tl.{triton_dtype_str}) + 1'})
block_size = 128
ref = torch.ones([block_size], dtype=getattr(torch, dtype_str), device=device)
output = torch.zeros([block_size], dtype=getattr(torch, dtype_str), device=device)
kernel[(1,)](output, block_size, BLOCK_SIZE=block_size)
assert torch.all(output == ref)
def test_load_store_same_ptr(device):
@triton.jit()
def kernel(in_out_ptr):
pid = tl.program_id(axis=0)
x = tl.load(in_out_ptr + pid)
out = x * 2
tl.store(in_out_ptr + pid, out)
for _ in range(1000):
x = torch.ones((65536,), device=device, dtype=torch.float32)
kernel[(65536,)](x, num_warps=32)
assert torch.all(x == 2)
def convert_float_to_float32(fp: torch.tensor, dtype=None):
if not dtype:
dtype = getattr(tl, torch_dtype_name(fp.dtype))
fp = fp.view(getattr(torch, f"int{dtype.primitive_bitwidth}"))
exp_width = dtype.primitive_bitwidth - dtype.fp_mantissa_width - 1
exp_bias = dtype.exponent_bias
sign = ((fp >> (dtype.primitive_bitwidth - 1)) & 0x01).int()
exp = ((fp >> dtype.fp_mantissa_width) & ((1 << exp_width) - 1)).int()
frac = (fp & ((1 << dtype.fp_mantissa_width) - 1)).int()
output = torch.where(exp == 0,
# subnormal
((-1.0) ** sign) * (2.0 ** (1 - exp_bias)) * (frac / (2.0 ** dtype.fp_mantissa_width)),
# normal
((-1.0) ** sign) * (2.0 ** (exp - exp_bias)) * (1.0 + frac / (2.0 ** dtype.fp_mantissa_width))).float()
extended_exp = ((1 << (tl.float32.primitive_bitwidth - tl.float32.fp_mantissa_width - 1)) - 1) << tl.float32.fp_mantissa_width
# special cases, exp is 0b11..1
if dtype in [tl.float8e4, tl.float8e4b15]:
# float8e4m3 does not have infinities
output[fp == 0b01111111] = torch.nan
output[fp == 0b11111111] = torch.nan
else:
output = torch.where(exp == (1 << exp_width) - 1,
((sign << (tl.float32.primitive_bitwidth - 1)) | extended_exp | (frac << (tl.float32.fp_mantissa_width - dtype.fp_mantissa_width))).view(torch.float32),
output)
return output
@pytest.mark.parametrize("in_dtype", [torch.float16, torch.bfloat16])
def test_convert_float16_to_float32(in_dtype, device):
"""Tests that check convert_float_to_float32 function"""
check_type_supported(in_dtype, device)
f16_input = torch.tensor(range(-int(2 ** (16 - 1)), int(2 ** (16 - 1))), dtype=torch.int16).view(in_dtype)
f32_output = convert_float_to_float32(f16_input)
nan = f16_input.isnan()
assert torch.all(f32_output[nan].isnan())
inf = f16_input.isinf()
assert torch.all(f32_output[inf].isinf())
other = torch.logical_not(torch.logical_or(nan, inf))
assert torch.all(f16_input[other] == f32_output[other])
def serialize_fp8(np_data, in_dtype):
if in_dtype == tl.float8e4b15:
# triton's f8e4b15 format is optimized for software emulation
# as a result, each pack of 4xfp8 values:
# s0b0s1b1s2b2s3b3 (for s, b sign and bits respectively)
# is actually internally stored as
# s0s2b0b2s1s3b1b3
# we apply the conversion here
f8x4 = np_data.view(np.uint32)
s = [(f8x4 & (0x80000000 >> i)) << i for i in range(0, 32, 8)]
b = [(f8x4 & (0x7f000000 >> i)) << i for i in range(0, 32, 8)]
signs = (s[0] >> 0) | (s[1] >> 16) | (s[2] >> 1) | (s[3] >> 17)
bits = (b[0] >> 1) | (b[1] >> 17) | (b[2] >> 8) | (b[3] >> 24)
# tensor of triton fp8 data
return (signs | bits).view(np.int8)
else:
return np_data
@pytest.mark.parametrize("in_dtype", [tl.float8e4b15, tl.float8e4, tl.float8e5])
@pytest.mark.parametrize("out_dtype", [torch.float16, torch.bfloat16, torch.float32])
def test_fp8_fpN_roundtrip(in_dtype, out_dtype, device):
"""
For all possible float8 values (ref_fp8 = range(0, 256)), test that:
- conversion tri_fp16 = convert(input=ref_fp8, out=out_dtype) matches the reference
- conversion tri_fp8 = convert(input=tri_fp16, out=out_dtype) matches the original
this is only possible if both conversions are correct
"""
check_type_supported(out_dtype, device)
from contextlib import nullcontext as does_not_raise
expectation = does_not_raise()
err_msg = None
if (in_dtype == tl.float8e4b15 and out_dtype != torch.float16) or\
(in_dtype != torch.float16 and out_dtype == tl.float8e4b15):
expectation = pytest.raises(triton.CompilationError)
err_msg = "fp8e4b15 can only be converted to/from fp16"
@triton.jit
def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
input = tl.load(input_ptr + offsets, mask=mask)
output = input
tl.store(output_ptr + offsets, output, mask=mask)
# initialize array containing all possible f8 values except NaN
ref_fp8 = np.array(range(-128, 128), dtype=np.int8)
is_nan = (ref_fp8 & 0b01111100) == 128 - 2**in_dtype.fp_mantissa_width
exp_mask = 0b01111111 ^ ((1 << in_dtype.fp_mantissa_width) - 1)
is_subnormal = np.logical_or((ref_fp8 & exp_mask) == 0, (ref_fp8 & exp_mask) == exp_mask)
ref_fp8[is_nan] = 0
ref_fp8[is_subnormal] = 0
tri_fp8 = torch.from_numpy(serialize_fp8(ref_fp8, in_dtype)).cuda()
tri_fp16 = torch.empty(256, dtype=out_dtype, device="cuda")
with expectation as e:
copy_kernel[(1,)](triton.reinterpret(tri_fp8, in_dtype), tri_fp16, tri_fp16.shape[0], BLOCK_SIZE=1024)
ref_fp8 = torch.from_numpy(ref_fp8).cuda()
ref_fp16 = convert_float_to_float32(ref_fp8, in_dtype)
assert torch.all(tri_fp16[~is_subnormal] == ref_fp16[~is_subnormal])
ref_fp8 = torch.empty_like(tri_fp16, dtype=torch.int8)
copy_kernel[(1,)](tri_fp16, triton.reinterpret(ref_fp8, in_dtype), tri_fp16.shape[0], BLOCK_SIZE=1024)
assert torch.all(tri_fp8 == ref_fp8)
if err_msg is not None:
assert err_msg in str(e)
# ---------------
# test reduce
# ---------------
def get_reduced_dtype(dtype_str, op):
if op in ('argmin', 'argmax'):
return 'int32'
if dtype_str in ['int8', 'uint8', 'int16', 'uint16']:
return 'int32'
if dtype_str == 'bfloat16':
return 'float32'
return dtype_str
@pytest.mark.parametrize("op, dtype_str, shape",
[(op, dtype, shape)
for op in ['min', 'max',
'min-with-indices',
'max-with-indices',
'argmin-tie-break-left',
'argmax-tie-break-left',
'sum']
for dtype in dtypes_with_bfloat16
for shape in [32, 64, 128, 512]])
def test_reduce1d(op, dtype_str, shape, device):
check_type_supported(dtype_str, device) # bfloat16 on cc < 80 will not be tested
# triton kernel
@triton.jit
def kernel(X, Z, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
GENERATE_TEST_HERE
tl.store(Z, z)
if 'with-indices' in op:
patch = f'z, _ = tl.{op.split("-")[0]}(x, axis=0, return_indices=True)'
elif 'arg' in op:
tie_break_left = 'tie-break-left' in op
patch = f'z = tl.{op.split("-")[0]}(x, axis=0, tie_break_left={tie_break_left})'
else:
patch = f'z = tl.{op}(x, axis=0)'
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': patch})
# input
rs = RandomState(17)
# limit the range of integers so that the sum does not overflow
x = numpy_random((shape,), dtype_str=dtype_str, rs=rs)
numpy_op = {'sum': np.sum, 'max': np.max, 'min': np.min,
'max-with-indices': np.max,
'min-with-indices': np.min,
'argmin-tie-break-fast': np.argmin,
'argmin-tie-break-left': np.argmin,
'argmax-tie-break-fast': np.argmax,
'argmax-tie-break-left': np.argmax}[op]
if 'tie-break-left' in op:
x[3:10] = numpy_op(x)
x_tri = to_triton(x, device=device)
# numpy result
z_dtype_str = 'int32' if op in ('argmin', 'argmax') else dtype_str
z_tri_dtype_str = z_dtype_str
if op not in ['argmin', 'argmax'] and dtype_str == 'bfloat16':
z_dtype_str = 'float32'
z_ref = numpy_op(x).astype(getattr(np, z_dtype_str))
# trunc mantissa for a fair comparison of accuracy
z_ref = (z_ref.view('uint32') & np.uint32(0xffff0000)).view('float32')
z_tri_dtype_str = 'bfloat16'
else:
z_ref = numpy_op(x).astype(getattr(np, z_dtype_str))
# triton result
z_tri = to_triton(numpy_random((1,), dtype_str=z_dtype_str, rs=rs),
device=device, dst_type=z_tri_dtype_str)
kernel[(1,)](x_tri, z_tri, BLOCK=shape)
z_tri = to_numpy(z_tri)
# compare
if op == 'sum':
np.testing.assert_allclose(z_ref, z_tri, rtol=0.01)
else:
if op in ('argmin', 'argmax'):
# argmin and argmax can have multiple valid indices.
# so instead we compare the values pointed by indices
np.testing.assert_equal(x[z_ref], x[z_tri])
else:
np.testing.assert_equal(z_ref, z_tri)
# TODO: [Qingyi] Fix argmin / argmax
reduce_configs1 = [
(op, dtype, (1, 1024), axis) for dtype in dtypes_with_bfloat16
for op in ['min', 'max', 'sum', 'argmin', 'argmax']
for axis in [1]
]
# shape (128, 256) and (32, 1024) are not enabled on sm86 because the required shared memory
# exceeds the limit of 99KB
reduce2d_shapes = [(2, 32), (4, 32), (4, 128)]
# TODO: fix and uncomment
# , (32, 64), (64, 128)]
if torch.cuda.is_available() and 'V100' in torch.cuda.get_device_name(0):
reduce2d_shapes += [(128, 256) and (32, 1024)]
reduce_configs2 = [
(op, 'float32', shape, axis)
for op in ['min', 'max', 'sum', 'argmin', 'argmax']
for shape in reduce2d_shapes
for axis in [0, 1]
] + [
(op, 'float32', [16, 32], None)
for op in ['min', 'max', 'sum']
]
@pytest.mark.parametrize("op, dtype_str, shape, axis", reduce_configs1 + reduce_configs2)
def test_reduce2d(op, dtype_str, shape, axis, device):
check_type_supported(dtype_str, device) # bfloat16 on cc < 80 will not be tested
# triton kernel
@triton.jit
def kernel(X, Z, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, AXIS: tl.constexpr):
range_m = tl.arange(0, BLOCK_M)
range_n = tl.arange(0, BLOCK_N)
x = tl.load(X + range_m[:, None] * BLOCK_N + range_n[None, :])
z = GENERATE_TEST_HERE
if AXIS is None:
tl.store(Z, z)
elif AXIS == 1:
tl.store(Z + range_m, z)
else:
tl.store(Z + range_n, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.{op}(x, axis=AXIS)'})
# input
rs = RandomState(17)
# limit the range of integers so that the sum does not overflow
x = numpy_random(shape, dtype_str=dtype_str, rs=rs)
x_tri = to_triton(x, device=device)
numpy_op = {'sum': np.sum, 'max': np.max, 'min': np.min,
'argmin': np.argmin, 'argmax': np.argmax}[op]
z_dtype_str = get_reduced_dtype(dtype_str, op)
z_tri_dtype_str = z_dtype_str
# numpy result
if op not in ['argmin', 'argmax'] and dtype_str == 'bfloat16':
z_dtype_str = 'float32'
z_tri_dtype_str = 'bfloat16'
z_ref = numpy_op(x, axis=axis).astype(getattr(np, z_dtype_str))
# trunc mantissa for a fair comparison of accuracy
z_ref = (z_ref.view('uint32') & np.uint32(0xffff0000)).view('float32')
else:
z_ref = numpy_op(x, axis=axis).astype(getattr(np, z_dtype_str))
# triton result
ret_numel = 1 if axis is None else shape[1 - axis]
z_tri = to_triton(numpy_random((ret_numel,), dtype_str=z_dtype_str, rs=rs),
device=device, dst_type=z_tri_dtype_str)
kernel[(1,)](x_tri, z_tri, BLOCK_M=shape[0], BLOCK_N=shape[1], AXIS=axis)
z_tri = to_numpy(z_tri)
# compare
if op == 'sum':
np.testing.assert_allclose(z_ref, z_tri, rtol=0.01)
else:
if op in ('argmin', 'argmax'):
# argmin and argmax can have multiple valid indices.
# so instead we compare the values pointed by indices
z_ref_index = np.expand_dims(z_ref, axis=axis)
z_tri_index = np.expand_dims(z_tri, axis=axis)
z_ref_value = np.take_along_axis(x, z_ref_index, axis=axis)
z_tri_value = np.take_along_axis(x, z_tri_index, axis=axis)
np.testing.assert_equal(z_ref_value, z_tri_value)
else:
np.testing.assert_equal(z_ref, z_tri)
scan2d_shapes = [(8, 32), (16, 32), (32, 16), (2, 1024), (1024, 2), (32, 32), (1, 1024)]
scan_configs = [
(op, type, shape, axis, num_warps)
for num_warps in [4, 16]
for type in ['int32', 'float32']
for axis in [1, 0]
for shape in scan2d_shapes
for op in ['cumsum', 'cumprod']
]
@pytest.mark.parametrize("op, dtype_str, shape, axis, num_warps", scan_configs)
def test_scan2d(op, dtype_str, shape, axis, num_warps, device):
check_type_supported(dtype_str, device)
# triton kernel
@triton.jit
def kernel(X, Z, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, AXIS: tl.constexpr):
range_m = tl.arange(0, BLOCK_M)
range_n = tl.arange(0, BLOCK_N)
x = tl.load(X + range_m[:, None] * BLOCK_N + range_n[None, :])
z = GENERATE_TEST_HERE
tl.store(Z + range_m[:, None] * BLOCK_N + range_n[None, :], z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.{op}(x, axis={axis})'})
# input
rs = RandomState(17)
x = numpy_random(shape, dtype_str=dtype_str, rs=rs)
z = np.empty_like(x)
x_tri = to_triton(x, device=device)
numpy_op = {'cumsum': np.cumsum, 'cumprod': np.cumprod}[op]
z_dtype_str = dtype_str
z_ref = numpy_op(x, axis=axis).astype(getattr(np, z_dtype_str))
# triton result
z_tri = to_triton(z, device=device)
kernel[(1,)](x_tri, z_tri, BLOCK_M=shape[0], BLOCK_N=shape[1], AXIS=axis, num_warps=num_warps)
z_tri = to_numpy(z_tri)
# compare
if dtype_str == 'float32':
if op == 'cumprod':
np.testing.assert_allclose(z_ref, z_tri, rtol=0.01, atol=1e-3)
else:
np.testing.assert_allclose(z_ref, z_tri, rtol=0.01)
else:
np.testing.assert_equal(z_ref, z_tri)
scan_layouts = [
BlockedLayout([1, 4], [4, 8], [4, 1], [0, 1]),
BlockedLayout([1, 4], [8, 4], [4, 1], [0, 1]),
BlockedLayout([4, 1], [4, 8], [1, 4], [0, 1]),
BlockedLayout([2, 2], [4, 8], [2, 2], [0, 1]),
BlockedLayout([2, 2], [8, 4], [2, 2], [0, 1]),
BlockedLayout([1, 4], [4, 8], [4, 1], [1, 0]),
BlockedLayout([1, 4], [8, 4], [4, 1], [1, 0]),
BlockedLayout([4, 1], [4, 8], [1, 4], [1, 0]),
BlockedLayout([2, 2], [4, 8], [2, 2], [1, 0]),
BlockedLayout([2, 2], [8, 4], [2, 2], [1, 0]),
]
@pytest.mark.parametrize("M, N", [[32, 32], [32, 64], [64, 32]])
@pytest.mark.parametrize("src_layout", scan_layouts)
@pytest.mark.parametrize("axis", [0, 1])
def test_scan_layouts(M, N, src_layout, axis, device):
ir = f"""
#blocked = {src_layout}
module attributes {{"triton_gpu.num-warps" = 4 : i32, "triton_gpu.threads-per-warp" = 32 : i32}} {{
tt.func public @kernel_0d1d(%arg0: !tt.ptr<i32> {{tt.divisibility = 16 : i32}}, %arg1: !tt.ptr<i32> {{tt.divisibility = 16 : i32}}) {{
%cst = arith.constant dense<{N}> : tensor<{M}x1xi32, #blocked>
%0 = tt.make_range {{end = {M} : i32, start = 0 : i32}} : tensor<{M}xi32, #triton_gpu.slice<{{dim = 1, parent = #blocked}}>>
%1 = tt.expand_dims %0 {{axis = 1 : i32}} : (tensor<{M}xi32, #triton_gpu.slice<{{dim = 1, parent = #blocked}}>>) -> tensor<{M}x1xi32, #blocked>
%2 = arith.muli %1, %cst : tensor<{M}x1xi32, #blocked>
%3 = tt.splat %arg0 : (!tt.ptr<i32>) -> tensor<{M}x1x!tt.ptr<i32>, #blocked>
%4 = tt.addptr %3, %2 : tensor<{M}x1x!tt.ptr<i32>, #blocked>, tensor<{M}x1xi32, #blocked>
%5 = tt.make_range {{end = {N} : i32, start = 0 : i32}} : tensor<{N}xi32, #triton_gpu.slice<{{dim = 0, parent = #blocked}}>>
%6 = tt.expand_dims %5 {{axis = 0 : i32}} : (tensor<{N}xi32, #triton_gpu.slice<{{dim = 0, parent = #blocked}}>>) -> tensor<1x{N}xi32, #blocked>
%7 = tt.broadcast %4 : (tensor<{M}x1x!tt.ptr<i32>, #blocked>) -> tensor<{M}x{N}x!tt.ptr<i32>, #blocked>
%8 = tt.broadcast %6 : (tensor<1x{N}xi32, #blocked>) -> tensor<{M}x{N}xi32, #blocked>
%9 = tt.addptr %7, %8 : tensor<{M}x{N}x!tt.ptr<i32>, #blocked>, tensor<{M}x{N}xi32, #blocked>
%10 = tt.load %9 {{cache = 1 : i32, evict = 1 : i32, isVolatile = false}} : tensor<{M}x{N}xi32, #blocked>
%11 = "tt.scan"(%10) <{{axis = {axis} : i32}}> ({{
^bb0(%arg2: i32, %arg3: i32):
%16 = arith.addi %arg2, %arg3 : i32
tt.scan.return %16 : i32
}}) : (tensor<{M}x{N}xi32, #blocked>) -> tensor<{M}x{N}xi32, #blocked>
%12 = tt.splat %arg1 : (!tt.ptr<i32>) -> tensor<{M}x1x!tt.ptr<i32>, #blocked>
%13 = tt.addptr %12, %2 : tensor<{M}x1x!tt.ptr<i32>, #blocked>, tensor<{M}x1xi32, #blocked>
%14 = tt.broadcast %13 : (tensor<{M}x1x!tt.ptr<i32>, #blocked>) -> tensor<{M}x{N}x!tt.ptr<i32>, #blocked>
%15 = tt.addptr %14, %8 : tensor<{M}x{N}x!tt.ptr<i32>, #blocked>, tensor<{M}x{N}xi32, #blocked>
tt.store %15, %11 {{cache = 1 : i32, evict = 1 : i32}} : tensor<{M}x{N}xi32, #blocked>
tt.return
}}
}}
"""
import tempfile
with tempfile.NamedTemporaryFile(mode='w', suffix='.ttgir') as f:
f.write(ir)
f.flush()
kernel = triton.compile(f.name)
rs = RandomState(17)
x = rs.randint(-100, 100, (M, N)).astype('int32')
z = np.zeros((M, N)).astype('int32')
x_tri = torch.tensor(x, device=device)
z_tri = torch.tensor(z, device=device)
kernel[(1, 1, 1)](x_tri, z_tri)
z_ref = np.cumsum(x, axis=axis)
np.testing.assert_equal(z_ref, z_tri.cpu().numpy())
layouts = [
BlockedLayout([1, 4], [8, 4], [4, 1], [1, 0]),
BlockedLayout([1, 4], [8, 4], [4, 1], [0, 1]),
BlockedLayout([4, 4], [2, 16], [4, 1], [1, 0]),
MmaLayout(version=(2, 0), warps_per_cta=[4, 1]),
MmaLayout(version=(2, 0), warps_per_cta=[2, 2])
]
@pytest.mark.parametrize("M, N", [[128, 16], [128, 128], [32, 128], [32, 32]])
@pytest.mark.parametrize("src_layout", layouts)
@pytest.mark.parametrize("axis", [0, 1])
def test_reduce_layouts(M, N, src_layout, axis, device):
rdims_2d = f"1x{N}" if axis == 0 else f"{M}x1"
rdims_1d = f"{N}" if axis == 0 else f"{M}"
store_range = "%7" if axis == 0 else "%1"
ir = f"""
#blocked = #triton_gpu.blocked<{{sizePerThread = [1, 1], threadsPerWarp = [32, 1], warpsPerCTA = [4, 1], order = [0, 1]}}>
#src = {src_layout}
module attributes {{"triton_gpu.num-warps" = 4 : i32}} {{
tt.func public @kernel_0d1d2c3d4c(%arg0: !tt.ptr<i32> {{tt.divisibility = 16 : i32}}, %arg1: i32 {{tt.divisibility = 16 : i32}}, %arg2: !tt.ptr<i32> {{tt.divisibility = 16 : i32}}) {{
%0 = tt.make_range {{end = {M} : i32, start = 0 : i32}} : tensor<{M}xi32, #triton_gpu.slice<{{dim = 1, parent = #blocked}}>>
%1 = tt.expand_dims %0 {{axis = 1 : i32}} : (tensor<{M}xi32, #triton_gpu.slice<{{dim = 1, parent = #blocked}}>>) -> tensor<{M}x1xi32, #blocked>
%2 = tt.splat %arg1 : (i32) -> tensor<{M}x1xi32, #blocked>
%3 = arith.muli %1, %2 : tensor<{M}x1xi32, #blocked>
%4 = tt.splat %arg0 : (!tt.ptr<i32>) -> tensor<{M}x1x!tt.ptr<i32>, #blocked>
%5 = tt.addptr %4, %3 : tensor<{M}x1x!tt.ptr<i32>, #blocked>, tensor<{M}x1xi32, #blocked>
%6 = tt.make_range {{end = {N} : i32, start = 0 : i32}} : tensor<{N}xi32, #triton_gpu.slice<{{dim = 0, parent = #blocked}}>>
%7 = tt.expand_dims %6 {{axis = 0 : i32}} : (tensor<{N}xi32, #triton_gpu.slice<{{dim = 0, parent = #blocked}}>>) -> tensor<1x{N}xi32, #blocked>
%8 = tt.broadcast %5 : (tensor<{M}x1x!tt.ptr<i32>, #blocked>) -> tensor<{M}x{N}x!tt.ptr<i32>, #blocked>
%9 = tt.broadcast %7 : (tensor<1x{N}xi32, #blocked>) -> tensor<{M}x{N}xi32, #blocked>
%10 = tt.addptr %8, %9 : tensor<{M}x{N}x!tt.ptr<i32>, #blocked>, tensor<{M}x{N}xi32, #blocked>
%11 = tt.splat %arg2 : (!tt.ptr<i32>) -> tensor<{rdims_2d}x!tt.ptr<i32>, #blocked>
%12 = tt.addptr %11, {store_range} : tensor<{rdims_2d}x!tt.ptr<i32>, #blocked>, tensor<{rdims_2d}xi32, #blocked>
%13 = tt.load %10 {{cache = 1 : i32, evict = 1 : i32, isVolatile = false}} : tensor<{M}x{N}xi32, #blocked>
%14 = triton_gpu.convert_layout %13 : (tensor<{M}x{N}xi32, #blocked>) -> tensor<{M}x{N}xi32, #src>
%15 = "tt.reduce"(%14) ({{
^bb0(%arg3: i32, %arg4: i32):
%17 = arith.addi %arg3, %arg4 : i32
tt.reduce.return %17 : i32
}}) {{axis = {axis} : i32}} : (tensor<{M}x{N}xi32, #src>) -> tensor<{rdims_1d}xi32, #triton_gpu.slice<{{dim = {axis}, parent = #src}}>>
%18 = triton_gpu.convert_layout %15 : (tensor<{rdims_1d}xi32, #triton_gpu.slice<{{dim = {axis}, parent = #src}}>>) -> tensor<{rdims_1d}xi32, #triton_gpu.slice<{{dim = {axis}, parent = #blocked}}>>
%19 = tt.expand_dims %18 {{axis = {axis} : i32}} : (tensor<{rdims_1d}xi32, #triton_gpu.slice<{{dim = {axis}, parent = #blocked}}>>) -> tensor<{rdims_2d}xi32, #blocked>
tt.store %12, %19 {{cache = 1 : i32, evict = 1 : i32}} : tensor<{rdims_2d}xi32, #blocked>
tt.return
}}
}}
"""
import tempfile
with tempfile.NamedTemporaryFile(mode='w', suffix='.ttgir') as f:
f.write(ir)
f.flush()
kernel = triton.compile(f.name)
rs = RandomState(17)
x = rs.randint(0, 20, (M, N)).astype('int32')
if axis == 0:
z = np.zeros((1, N)).astype('int32')
else:
z = np.zeros((M, 1)).astype('int32')
x_tri = torch.tensor(x, device=device)
z_tri = torch.tensor(z, device=device)
pgm = kernel[(1, 1, 4)](x_tri, x_tri.stride(0), z_tri)
z_ref = np.sum(x, axis=axis, keepdims=True)
np.testing.assert_equal(z_ref, z_tri.cpu().numpy())
layouts = [
BlockedLayout([1, 4], [1, 32], [4, 1], [1, 0]),
BlockedLayout([1, 4], [1, 32], [2, 2], [1, 0]),
MmaLayout(version=(2, 0), warps_per_cta=[4, 1])
]
@pytest.mark.parametrize("M", [32, 64, 128, 256])
@pytest.mark.parametrize("src_layout", layouts)
def test_store_op(M, src_layout, device):
ir = f"""
#src = {src_layout}
module attributes {{"triton_gpu.num-warps" = 4 : i32}} {{
tt.func public @kernel(%arg0: !tt.ptr<f32> {{tt.divisibility = 16 : i32}}, %arg1: !tt.ptr<f32> {{tt.divisibility = 16 : i32}}) {{
%0 = tt.make_range {{end = {M} : i32, start = 0 : i32}} : tensor<{M}xi32, #triton_gpu.slice<{{dim = 1, parent = #src}}>>
%1 = tt.splat %arg0 : (!tt.ptr<f32>) -> tensor<{M}x!tt.ptr<f32>, #triton_gpu.slice<{{dim = 1, parent = #src}}>>
%2 = tt.addptr %1, %0 : tensor<{M}x!tt.ptr<f32>, #triton_gpu.slice<{{dim = 1, parent = #src}}>>, tensor<{M}xi32, #triton_gpu.slice<{{dim = 1, parent = #src}}>>
%3 = tt.load %2 {{cache = 1 : i32, evict = 1 : i32, isVolatile = false}} : tensor<{M}xf32, #triton_gpu.slice<{{dim = 1, parent = #src}}>>
%4 = tt.expand_dims %3 {{axis = 1 : i32}} : (tensor<{M}xf32, #triton_gpu.slice<{{dim = 1, parent = #src}}>>) -> tensor<{M}x1xf32, #src>
%5 = tt.make_range {{end = {M} : i32, start = 0 : i32}} : tensor<{M}xi32, #triton_gpu.slice<{{dim = 1, parent = #src}}>>
%6 = tt.expand_dims %5 {{axis = 1 : i32}} : (tensor<{M}xi32, #triton_gpu.slice<{{dim = 1, parent = #src}}>>) -> tensor<{M}x1xi32, #src>
%7 = tt.splat %arg1 : (!tt.ptr<f32>) -> tensor<{M}x1x!tt.ptr<f32>, #src>
%8 = tt.addptr %7, %6 : tensor<{M}x1x!tt.ptr<f32>, #src>, tensor<{M}x1xi32, #src>
tt.store %8, %4 : tensor<{M}x1xf32, #src>
tt.return
}}
}}
"""
import tempfile
with tempfile.NamedTemporaryFile(mode='w', suffix='.ttgir') as f:
f.write(ir)
f.flush()
store_kernel = triton.compile(f.name)
rs = RandomState(17)
x = rs.randint(0, 4, (M, 1)).astype('float32')
y = np.zeros((M, 1), dtype='float32')
x_tri = torch.tensor(x, device=device)
y_tri = torch.tensor(y, device=device)
pgm = store_kernel[(1, 1, 1)](x_tri, y_tri)
y_ref = x
np.testing.assert_allclose(y_ref, y_tri.cpu().numpy(), rtol=0.01, atol=1e-3)
layouts = [
BlockedLayout([1, 4], [1, 32], [4, 1], [1, 0]),
BlockedLayout([1, 4], [1, 32], [2, 2], [1, 0]),
MmaLayout(version=(2, 0), warps_per_cta=[4, 1])
]
@pytest.mark.parametrize("M", [64, 128, 256])
@pytest.mark.parametrize("src_layout", layouts)
@pytest.mark.parametrize("dst_layout", layouts)
@pytest.mark.parametrize("src_dim", [0, 1])
@pytest.mark.parametrize("dst_dim", [0, 1])
def test_convert1d(M, src_layout, dst_layout, src_dim, dst_dim, device):
ir = f"""
#dst = {dst_layout}
#src = {src_layout}
module attributes {{"triton_gpu.num-warps" = 4 : i32}} {{
tt.func public @kernel(%arg0: !tt.ptr<i32> {{tt.divisibility = 16 : i32}}, %arg1: !tt.ptr<i32> {{tt.divisibility = 16 : i32}}) {{
%0 = tt.splat %arg0 : (!tt.ptr<i32>) -> tensor<{M}x!tt.ptr<i32>, #triton_gpu.slice<{{dim = {src_dim}, parent = #src}}>>
%1 = tt.make_range {{end = {M} : i32, start = 0 : i32}} : tensor<{M}xi32, #triton_gpu.slice<{{dim = {src_dim}, parent = #src}}>>
%2 = tt.addptr %0, %1 : tensor<{M}x!tt.ptr<i32>, #triton_gpu.slice<{{dim = {src_dim}, parent = #src}}>>, tensor<{M}xi32, #triton_gpu.slice<{{dim = {src_dim}, parent = #src}}>>
%3 = tt.load %2 {{cache = 1 : i32, evict = 1 : i32, isVolatile = false}} : tensor<{M}xi32, #triton_gpu.slice<{{dim = {src_dim}, parent = #src}}>>
%4 = tt.splat %arg1 : (!tt.ptr<i32>) -> tensor<{M}x!tt.ptr<i32>, #triton_gpu.slice<{{dim = {dst_dim}, parent = #dst}}>>
%5 = tt.make_range {{end = {M} : i32, start = 0 : i32}} : tensor<{M}xi32, #triton_gpu.slice<{{dim = {dst_dim}, parent = #dst}}>>
%6 = tt.addptr %4, %5 : tensor<{M}x!tt.ptr<i32>, #triton_gpu.slice<{{dim = {dst_dim}, parent = #dst}}>>, tensor<{M}xi32, #triton_gpu.slice<{{dim = {dst_dim}, parent = #dst}}>>
%7 = triton_gpu.convert_layout %3 : (tensor<{M}xi32, #triton_gpu.slice<{{dim = {src_dim}, parent = #src}}>>) -> tensor<{M}xi32, #triton_gpu.slice<{{dim = {dst_dim}, parent = #dst}}>>
tt.store %6, %7 : tensor<{M}xi32, #triton_gpu.slice<{{dim = {dst_dim}, parent = #dst}}>>
tt.return
}}
}}
"""
import tempfile
with tempfile.NamedTemporaryFile(mode='w', suffix='.ttgir') as f:
f.write(ir)
f.flush()
kernel = triton.compile(f.name)
rs = RandomState(17)
x = rs.randint(0, 4, (M, )).astype('int32')
y = np.zeros((M, ), dtype='int32')
x_tri = torch.tensor(x, device=device)
y_tri = torch.tensor(y, device=device)
pgm = kernel[(1, 1, 1)](x_tri, y_tri)
y_ref = x
np.testing.assert_allclose(y_ref, y_tri.cpu().numpy(), rtol=0.01, atol=1e-3)
@triton.jit
def _welford_combine(mean_1, m2_1, weight_1, mean_2, m2_2, weight_2):
delta = mean_2 - mean_1
new_weight = weight_1 + weight_2
w2_over_w = weight_2 / new_weight
return (
mean_1 + delta * w2_over_w,
m2_1 + m2_2 + delta * delta * weight_1 * w2_over_w,
new_weight,
)
layouts = [
BlockedLayout([1, 4], [1, 32], [4, 1], [1, 0]),
BlockedLayout([1, 4], [1, 32], [2, 2], [1, 0]),
BlockedLayout([1, 4], [1, 32], [1, 4], [1, 0]),
BlockedLayout([1, 4], [8, 4], [2, 2], [0, 1])
]
@pytest.mark.parametrize("M, N", [[128, 128], [256, 128], [256, 256], [128, 256]])
@pytest.mark.parametrize("src_layout", layouts)
@pytest.mark.parametrize("op", ["sum", "max"])
@pytest.mark.parametrize("first_axis", [0, 1])
def test_chain_reduce(M, N, src_layout, op, device, first_axis):
op_str = ""
if op == "sum":
op_str = f"""
%13 = arith.addi %arg2, %arg3 : i32
tt.reduce.return %13 : i32"""
elif op == "max":
op_str = f"""
%13 = "triton_gpu.cmpi"(%arg2, %arg3) <{{predicate = 4 : i64}}> : (i32, i32) -> i1
%14 = arith.select %13, %arg2, %arg3 : i32
tt.reduce.return %14 : i32"""
ir = f"""
#src = {src_layout}
module attributes {{"triton_gpu.num-warps" = 4 : i32}} {{
tt.func public @sum_kernel_0d1d(%arg0: !tt.ptr<i32> {{tt.divisibility = 16 : i32}}, %arg1: !tt.ptr<i32> {{tt.divisibility = 16 : i32}}) {{
%cst = arith.constant dense<{N}> : tensor<{M}x1xi32, #src>
%0 = tt.make_range {{end = {M} : i32, start = 0 : i32}} : tensor<{M}xi32, #triton_gpu.slice<{{dim = 1, parent = #src}}>>
%1 = tt.expand_dims %0 {{axis = 1 : i32}} : (tensor<{M}xi32, #triton_gpu.slice<{{dim = 1, parent = #src}}>>) -> tensor<{M}x1xi32, #src>
%2 = arith.muli %1, %cst : tensor<{M}x1xi32, #src>
%3 = tt.make_range {{end = {N} : i32, start = 0 : i32}} : tensor<{N}xi32, #triton_gpu.slice<{{dim = 0, parent = #src}}>>
%4 = tt.expand_dims %3 {{axis = 0 : i32}} : (tensor<{N}xi32, #triton_gpu.slice<{{dim = 0, parent = #src}}>>) -> tensor<1x{N}xi32, #src>
%5 = tt.broadcast %2 : (tensor<{M}x1xi32, #src>) -> tensor<{M}x{N}xi32, #src>
%6 = tt.broadcast %4 : (tensor<1x{N}xi32, #src>) -> tensor<{M}x{N}xi32, #src>
%7 = arith.addi %5, %6 : tensor<{M}x{N}xi32, #src>
%8 = tt.splat %arg0 : (!tt.ptr<i32>) -> tensor<{M}x{N}x!tt.ptr<i32>, #src>
%9 = tt.addptr %8, %7 : tensor<{M}x{N}x!tt.ptr<i32>, #src>, tensor<{M}x{N}xi32, #src>
%10 = tt.load %9 {{cache = 1 : i32, evict = 1 : i32, isVolatile = false}} : tensor<{M}x{N}xi32, #src>
%11 = "tt.reduce"(%10) ({{
^bb0(%arg2: i32, %arg3: i32):
{op_str}
}}) {{axis = {first_axis} : i32}} : (tensor<{M}x{N}xi32, #src>) -> tensor<{M if first_axis == 1 else N}xi32, #triton_gpu.slice<{{dim = {first_axis}, parent = #src}}>>
%12 = "tt.reduce"(%11) ({{
^bb0(%arg2: i32, %arg3: i32):
{op_str}
}}) {{axis = 0 : i32}} : (tensor<{M if first_axis == 1 else N}xi32, #triton_gpu.slice<{{dim = {first_axis}, parent = #src}}>>) -> i32
tt.store %arg1, %12 {{cache = 1 : i32, evict = 1 : i32}} : i32
tt.return
}}
}}
"""
import tempfile
with tempfile.NamedTemporaryFile(mode='w', suffix='.ttgir') as f:
f.write(ir)
f.flush()
kernel = triton.compile(f.name)
rs = RandomState(17)
x = rs.randint(0, 4, (M, N)).astype('int32')
z = np.zeros((1,)).astype('int32')
x_tri = torch.tensor(x, device=device)
z_tri = torch.tensor(z, device=device)
pgm = kernel[(1, 1, 1)](x_tri, z_tri)
if op == "sum":
z_ref = np.sum(x)
elif op == "max":
z_ref = np.max(x)
np.testing.assert_allclose(z_ref, z_tri.cpu().numpy(), rtol=0.01, atol=1e-3)
def test_generic_reduction(device):
@triton.jit
def var_mean_kernel(X, out_mean, out_var, BLOCK: tl.constexpr):
xindex = tl.arange(0, BLOCK)
x = tl.load(X + xindex)
mean = x
m2 = tl.zeros_like(x)
weight = tl.full(x.shape, 1, x.dtype)
(mean, m2, weight) = tl.reduce((mean, m2, weight), 0, _welford_combine)
tl.store(out_mean, mean)
tl.store(out_var, m2 / weight)
SIZE = 512
x = torch.rand(SIZE, device=device)
out_mean = torch.empty((), device=device)
out_var = torch.empty((), device=device)
var_mean_kernel[(1,)](x, out_mean, out_var, BLOCK=SIZE)
expect_var, expect_mean = torch.var_mean(x, dim=0, correction=0)
torch.testing.assert_close(out_mean, expect_mean)
torch.testing.assert_close(out_var, expect_var)
# ---------------
# test permute
# ---------------
@pytest.mark.parametrize("dtype_str, shape, perm",
[(dtype, shape, perm)
# TODO: bfloat16
for dtype in ['float16', 'float32']
for shape in [(64, 64), (128, 128)]
for perm in [(1, 0)]])
def test_permute(dtype_str, shape, perm, device):
check_type_supported(dtype_str, device) # bfloat16 on cc < 80 will not be tested
# triton kernel
@triton.jit
def kernel(X, stride_xm, stride_xn,
Z, stride_zm, stride_zn,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
off_m = tl.arange(0, BLOCK_M)
off_n = tl.arange(0, BLOCK_N)
Xs = X + off_m[:, None] * stride_xm + off_n[None, :] * stride_xn
Zs = Z + off_m[:, None] * stride_zm + off_n[None, :] * stride_zn
tl.store(Zs, tl.load(Xs))
# input
x = numpy_random(shape, dtype_str=dtype_str)
# triton result
z_tri = to_triton(np.empty_like(x), device=device, dst_type=dtype_str)
z_tri_contiguous = to_triton(np.empty_like(x), device=device, dst_type=dtype_str)
x_tri = to_triton(x, device=device, dst_type=dtype_str)
pgm = kernel[(1, 1)](x_tri, x_tri.stride(0), x_tri.stride(1),
z_tri, z_tri.stride(1), z_tri.stride(0),
BLOCK_M=shape[0], BLOCK_N=shape[1])
pgm_contiguous = kernel[(1, 1)](x_tri, x_tri.stride(1), x_tri.stride(0),
z_tri_contiguous, z_tri_contiguous.stride(0), z_tri_contiguous.stride(1),
BLOCK_M=shape[0], BLOCK_N=shape[1])
# numpy result
z_ref = x.transpose(*perm)
# compare
np.testing.assert_allclose(to_numpy(z_tri), z_ref)
np.testing.assert_allclose(to_numpy(z_tri_contiguous), z_ref)
# parse ptx to make sure ld/st are vectorized
ptx = pgm.asm['ptx']
assert 'ld.global.v4' in ptx
assert 'st.global.v4' in ptx
ptx = pgm_contiguous.asm['ptx']
assert 'ld.global.v4' in ptx
assert 'st.global.v4' in ptx
# ---------------
# test dot
# ---------------
@pytest.mark.parametrize("M, N, K, num_warps, col_a, col_b, epilogue, allow_tf32, in_dtype, out_dtype",
[(*shape, 4, False, False, epilogue, allow_tf32, in_dtype, out_dtype)
for shape in [(64, 64, 64), (16, 16, 16)]
for epilogue in ['none', 'trans', 'add-matrix', 'add-rows', 'add-cols', 'softmax', 'chain-dot']
for allow_tf32 in [True, False]
for in_dtype, out_dtype in [('float16', 'float16'),
('float16', 'float32'),
('float32', 'float32')]
if not (allow_tf32 and (in_dtype in ['float16']))] +
[(*shape_nw, col_a, col_b, 'none', allow_tf32, in_dtype, out_dtype)
for shape_nw in [[128, 256, 32, 8],
[128, 16, 32, 4],
[32, 128, 64, 4],
[128, 128, 64, 4],
[64, 128, 128, 4],
[32, 128, 64, 2],
[64, 64, 32, 4],
[32, 32, 128, 16],
[128, 128, 64, 2],
[64, 128, 128, 2]]
for allow_tf32 in [True]
for col_a in [True, False]
for col_b in [True, False]
for in_dtype, out_dtype in [('int8', 'int8'),
('float16', 'float16'),
('float16', 'float32'),
('float32', 'float32')]])
def test_dot(M, N, K, num_warps, col_a, col_b, epilogue, allow_tf32, in_dtype, out_dtype, device):
check_cuda_only(device)
capability = torch.cuda.get_device_capability()
if capability[0] < 7:
pytest.skip("Only test tl.dot() on devices with sm >= 70")
if capability[0] < 8:
if in_dtype == 'int8':
pytest.skip("Only test int8 on devices with sm >= 80")
elif in_dtype == 'float32' and allow_tf32:
pytest.skip("Only test tf32 on devices with sm >= 80")
if capability[0] == 7:
if (M, N, K, num_warps) == (128, 256, 32, 8):
pytest.skip("shared memory out of resource")
if out_dtype == 'float16':
# TODO: support out_dtype=float16 for tl.dot on V100
pytest.skip("Only test out_dtype=float16 on devices with sm >=80")
torch.backends.cuda.matmul.allow_tf32 = allow_tf32
# triton kernel
@triton.jit
def kernel(X, stride_xm, stride_xk,
Y, stride_yk, stride_yn,
W, stride_wn, stride_wl,
Z, stride_zm, stride_zn,
out_dtype: tl.constexpr,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr,
ADD_MATRIX: tl.constexpr, ADD_ROWS: tl.constexpr, ADD_COLS: tl.constexpr,
ALLOW_TF32: tl.constexpr,
DO_SOFTMAX: tl.constexpr, CHAIN_DOT: tl.constexpr,
COL_A: tl.constexpr, COL_B: tl.constexpr):
off_m = tl.arange(0, BLOCK_M)
off_n = tl.arange(0, BLOCK_N)
off_l = tl.arange(0, BLOCK_N)
off_k = tl.arange(0, BLOCK_K)
Xs = X + off_m[:, None] * stride_xm + off_k[None, :] * stride_xk
Ys = Y + off_k[:, None] * stride_yk + off_n[None, :] * stride_yn
Ws = W + off_n[:, None] * stride_wn + off_l[None, :] * stride_wl
Zs = Z + off_m[:, None] * stride_zm + off_n[None, :] * stride_zn
x = tl.load(Xs)
y = tl.load(Ys)
z = tl.dot(x, y, allow_tf32=ALLOW_TF32, out_dtype=out_dtype)
if ADD_MATRIX:
z += tl.load(Zs)
if ADD_ROWS:
ZRs = Z + off_m * stride_zm
z += tl.load(ZRs)[:, None]
if ADD_COLS:
ZCs = Z + off_n * stride_zn
z += tl.load(ZCs)[None, :]
if DO_SOFTMAX:
max = tl.max(z, 1)
z = z - max[:, None]
num = tl.exp(z.to(tl.float32)).to(max.dtype)
den = tl.sum(num, 1)
z = num / den[:, None]
if CHAIN_DOT:
w = tl.load(Ws)
z = tl.dot(z.to(w.dtype), w, out_dtype=out_dtype)
tl.store(Zs, z)
# input
rs = RandomState(17)
if col_a:
x = numpy_random((K, M), dtype_str=in_dtype, rs=rs).T
else:
x = numpy_random((M, K), dtype_str=in_dtype, rs=rs)
if col_b:
y = numpy_random((N, K), dtype_str=in_dtype, rs=rs).T
else:
y = numpy_random((K, N), dtype_str=in_dtype, rs=rs)
w = numpy_random((N, N), dtype_str=in_dtype, rs=rs)
if 'int' not in in_dtype:
x *= .1
y *= .1
if in_dtype == 'float32' and allow_tf32:
x = (x.view('uint32') & np.uint32(0xffffe000)).view('float32')
y = (y.view('uint32') & np.uint32(0xffffe000)).view('float32')
w = (w.view('uint32') & np.uint32(0xffffe000)).view('float32')
x_tri = to_triton(x, device=device)
y_tri = to_triton(y, device=device)
w_tri = to_triton(w, device=device)
# triton result
if out_dtype == 'int8':
z = 1 + numpy_random((M, N), dtype_str='int32', rs=rs)
else:
z = 1 + numpy_random((M, N), dtype_str=in_dtype, rs=rs) * .1
z_tri = to_triton(z, device=device)
if epilogue == 'trans':
z_tri = torch.as_strided(z_tri, (M, N), z_tri.stride()[::-1])
if out_dtype == 'int8':
out_dtype = tl.int8
elif out_dtype == 'float16' and epilogue != 'softmax':
# TODO: for out_dtype == 'float16' and epilogue == 'softmax', it will
# fail with the following error: 'llvm.fmul' op requires the same type
# for all operands and results
out_dtype = tl.float16
else:
out_dtype = tl.float32
pgm = kernel[(1, 1)](x_tri, x_tri.stride(0), x_tri.stride(1),
y_tri, y_tri.stride(0), y_tri.stride(1),
w_tri, w_tri.stride(0), w_tri.stride(1),
z_tri, z_tri.stride(0), z_tri.stride(1),
out_dtype,
COL_A=col_a, COL_B=col_b,
BLOCK_M=M, BLOCK_K=K, BLOCK_N=N,
ADD_MATRIX=epilogue == 'add-matrix',
ADD_ROWS=epilogue == 'add-rows',
ADD_COLS=epilogue == 'add-cols',
DO_SOFTMAX=epilogue == 'softmax',
CHAIN_DOT=epilogue == 'chain-dot',
ALLOW_TF32=allow_tf32,
num_warps=num_warps)
if epilogue == 'softmax' and (in_dtype != 'float32' or allow_tf32):
ptx = pgm.asm["ptx"]
start = ptx.find("shfl.sync")
end = ptx.find("cvt.rn.f16.f32")
red_code = ptx[start:end]
assert len(red_code) > 0
assert "shared" not in red_code
assert "bar.sync" not in red_code
# torch result
if in_dtype == 'int8':
z_ref = np.matmul(x.astype(np.float32),
y.astype(np.float32())).astype(np.int32)
else:
z_ref = np.matmul(x, y)
if epilogue == 'add-matrix':
z_ref += z
if epilogue == 'add-rows':
z_ref += z[:, 0][:, None]
if epilogue == 'add-cols':
z_ref += z[0, :][None, :]
if epilogue == 'softmax':
num = np.exp(z_ref - np.max(z_ref, axis=-1, keepdims=True))
denom = np.sum(num, axis=-1, keepdims=True)
z_ref = num / denom
if epilogue == 'chain-dot':
z_ref = np.matmul(z_ref, w)
# compare
# print(z_ref[:,0], z_tri[:,0])
if in_dtype == 'float32':
# XXX: Somehow there's a larger difference when we use float32
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01, atol=1e-3)
elif out_dtype == tl.float16:
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01, atol=1e-3)
else:
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
# make sure ld/st are vectorized
ptx = pgm.asm['ptx']
if (K > 16 or N > 16 or M > 16) and (M * N // (num_warps * 32) >= 4):
# XXX: skip small sizes because they are not vectorized
assert 'ld.global.v4' in ptx
assert 'st.global.v4' in ptx
if in_dtype == 'float32' and allow_tf32:
assert 'mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32' in ptx
elif in_dtype == 'float32' and allow_tf32:
assert 'mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32' not in ptx
elif in_dtype == 'int8':
assert 'mma.sync.aligned.m16n8k32.row.col.satfinite.s32.s8.s8.s32' in ptx
elif out_dtype == tl.float16:
assert 'mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16' in ptx
@pytest.mark.parametrize('in_dtype', ['float32'])
def test_dot_mulbroadcastred(in_dtype, device):
@triton.jit
def kernel(Z, X, Y,
M: tl.constexpr, N: tl.constexpr, K: tl.constexpr,
BM: tl.constexpr, BN: tl.constexpr, BK: tl.constexpr):
pidn = tl.program_id(1)
pidm = tl.program_id(0)
offm = tl.arange(0, BM)[:, None]
offn = tl.arange(0, BN)[None, :]
offak = tl.arange(0, BK)[None, :]
offbk = tl.arange(0, BK)[:, None]
acc = tl.full((BM, BN), 0.0, tl.float32)
for ridx5 in range(0, K // BK):
x = tl.load(X + ((pidm * K * BM) + (offm * K) + (ridx5 * BK) + offak))
y = tl.load(Y + ((pidn * BN) + (offbk * N) + (ridx5 * N * BK) + offn))
x = tl.expand_dims(x, axis=2)
y = tl.expand_dims(y, axis=0)
t = tl.sum(x * y, axis=1)
acc = t + acc
tl.store(Z + ((pidm * BM * N) + (pidn * BN) + (offm * N) + offn), acc)
M, N, K = 256, 192, 160
BM, BN, BK = 128, 32, 32
rs = RandomState(17)
x = numpy_random((M, K), dtype_str=in_dtype, rs=rs)
y = numpy_random((K, N), dtype_str=in_dtype, rs=rs)
x = x * 0.1
y = y * 0.1
z = numpy_random((M, N), dtype_str=in_dtype, rs=rs)
x_tri = to_triton(x, device=device)
y_tri = to_triton(y, device=device)
z_tri = to_triton(z, device=device)
grid = M // BM, N // BN
h = kernel[grid](z_tri, x_tri, y_tri, M, N, K, BM, BN, BK)
z_ref = np.matmul(x, y)
np.testing.assert_allclose(z_ref, to_numpy(z_tri), atol=0.01)
assert "tt.dot" in h.asm['ttir']
assert "triton_gpu.async_wait {num = 2 : i32}" in h.asm['ttgir']
@pytest.mark.parametrize("dtype_str", int_dtypes + uint_dtypes + float_dtypes + ['bfloat16'])
def test_full(dtype_str, device):
if dtype_str in uint_dtypes and not hasattr(torch, dtype_str):
# PyTorch only has unsigned 8, but not 16, 32, or 64
dtype = getattr(torch, dtype_str[1:]) # uintx -> intx
else:
dtype = getattr(torch, dtype_str)
check_type_supported(dtype, device) # bfloat16 on cc < 80 will not be tested
@triton.jit
def kernel_static(out):
a = GENERATE_TEST_HERE
out_ptr = out + tl.arange(0, 128)[:]
tl.store(out_ptr, a)
@triton.jit
def kernel_dynamic(out, val, dtype: tl.constexpr):
a = tl.full((128,), val, dtype)
out_ptr = out + tl.arange(0, 128)[:]
tl.store(out_ptr, a)
kernel_static_patched = patch_kernel(kernel_static, {'GENERATE_TEST_HERE': f"tl.full((128,), 2, tl.{dtype_str})"})
out_static = torch.zeros((128), dtype=dtype, device=device)
kernel_static_patched[(1,)](out_static)
out_dynamic = torch.zeros((128), dtype=dtype, device=device)
kernel_dynamic[(1,)](out_dynamic, 2, getattr(triton.language, dtype_str))
assert torch.all(out_static == 2)
assert torch.all(out_dynamic == 2)
@pytest.mark.parametrize("literal, dtype_str",
[(1e+50, "f64"), (1e+10, "f32"), (1.0, "f32"),
('float("inf")', "f32"), ('float("-inf")', "f32"),
('float("nan")', "f32"), ('float("-nan")', "f32"),
(0., "f32"),
(5, "i32"), (2**40, "i64"),])
def test_constexpr(literal, dtype_str, device):
@triton.jit
def kernel(out_ptr):
val = GENERATE_TEST_HERE
tl.store(out_ptr.to(tl.pointer_type(val.dtype)), val)
kernel_patched = patch_kernel(kernel, {'GENERATE_TEST_HERE': f"{literal}"})
out = torch.zeros((1,), dtype=torch.float32, device=device)
h = kernel_patched[(1,)](out)
assert re.search(r"arith.constant .* : " + dtype_str, h.asm["ttir"]) is not None
@pytest.mark.parametrize("dtype_str", ['float32', 'float16'])
def test_dot_without_load(dtype_str, device):
@triton.jit
def _kernel(out):
a = GENERATE_TEST_HERE
b = GENERATE_TEST_HERE
c = tl.dot(a, b)
out_ptr = out + tl.arange(0, 32)[:, None] * 32 + tl.arange(0, 32)[None, :]
tl.store(out_ptr, c)
kernel = patch_kernel(_kernel, {'GENERATE_TEST_HERE': f"tl.full((32, 32), 1.0, tl.{dtype_str})"})
a = torch.ones((32, 32), dtype=getattr(torch, dtype_str), device=device)
b = torch.ones((32, 32), dtype=getattr(torch, dtype_str), device=device)
out_ref = torch.matmul(a, b)
out = torch.zeros((32, 32), dtype=getattr(torch, dtype_str), device=device)
kernel[(1,)](out)
assert torch.all(out == out_ref)
# ---------------
# test arange
# ---------------
@pytest.mark.parametrize("start", [0, 1, 7, 16])
def test_arange(start, device):
BLOCK = 128
z_tri = torch.empty(BLOCK, dtype=torch.int32, device=device)
@triton.jit
def _kernel(z, BLOCK: tl.constexpr,
START: tl.constexpr, END: tl.constexpr):
off = tl.arange(0, BLOCK)
val = tl.arange(START, END)
tl.store(z + off, val)
_kernel[(1,)](z_tri, START=start, END=start + BLOCK, BLOCK=BLOCK)
z_ref = torch.arange(start, BLOCK + start, dtype=torch.int32, device=device)
np.testing.assert_allclose(to_numpy(z_tri), to_numpy(z_ref))
# ---------------
# test load
# ---------------
@pytest.mark.parametrize("dtype_str, size, size_diff", [(dtype_str, size, size_diff) for dtype_str in torch_dtypes for size in [128, 512] for size_diff in [0, 1, 2, 3, 4]])
def test_masked_load(dtype_str, size, size_diff, device):
dtype = getattr(torch, dtype_str)
check_type_supported(dtype, device) # bfloat16 on cc < 80 will not be tested
input_size = size - size_diff
output_size = size
if dtype_str == 'bool':
input = torch.randint(0, 2, (input_size,), dtype=dtype, device=device)
elif dtype_str in int_dtypes or dtype_str in uint_dtypes:
input = torch.randint(0, 127, (input_size,), dtype=dtype, device=device)
else:
input = torch.rand(input_size, dtype=dtype, device=device)
output = torch.zeros((output_size,), dtype=dtype, device=device)
@triton.jit
def _kernel(in_ptr, out_ptr, in_size: tl.constexpr, out_size: tl.constexpr):
in_offsets = tl.arange(0, out_size)
# Load inputs.
x = GENERATE_TEST_HERE
# Store output
output_offsets = tl.arange(0, out_size)
tl.store(out_ptr + output_offsets, x)
mask_str = "mask=in_offsets < in_size, other=1" if size_diff > 0 else "None"
kernel = patch_kernel(_kernel, {'GENERATE_TEST_HERE': f"tl.load(in_ptr + in_offsets, {mask_str})"})
kernel[(1,)](input, output, input_size, output_size)
reference_out = torch.cat((input, torch.ones((size_diff,), dtype=dtype, device=device)))
# print((output - reference_out).nonzero())
torch.testing.assert_allclose(output, reference_out)
# Testing masked loads with an intermate copy to shared memory run.
@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32])
def test_masked_load_shared_memory(dtype, device):
check_type_supported(dtype, device) # bfloat16 on cc < 80 will not be tested
M = 32
N = 32
K = 16
in1 = torch.rand((M, K), dtype=dtype, device=device)
in2 = torch.rand((K, N), dtype=dtype, device=device)
out = torch.zeros((M, N), dtype=dtype, device=device)
@triton.jit
def _kernel(in1_ptr, in2_ptr, output_ptr,
in_stride, in2_stride, out_stride,
in_numel, in2_numel, out_numel,
M: tl.constexpr, N: tl.constexpr, K: tl.constexpr):
M_offsets = tl.arange(0, M)
N_offsets = tl.arange(0, N)
K_offsets = tl.arange(0, K)
in_offsets = M_offsets[:, None] * in_stride + K_offsets[None, :]
in2_offsets = K_offsets[:, None] * in2_stride + N_offsets[None, :]
# Load inputs.
x = tl.load(in1_ptr + in_offsets, mask=in_offsets < M * K)
w = tl.load(in2_ptr + in2_offsets, mask=in2_offsets < K * N)
# Without a dot product the memory doesn't get promoted to shared.
o = tl.dot(x, w, out_dtype=tl.float32)
# Store output
output_offsets = M_offsets[:, None] * out_stride + N_offsets[None, :]
tl.store(output_ptr + output_offsets, o, mask=output_offsets < M * N)
pgm = _kernel[(1,)](in1, in2, out,
in1.stride()[0],
in2.stride()[0],
out.stride()[0],
in1.numel(),
in2.numel(),
out.numel(),
M=M, N=N, K=K)
reference_out = torch.matmul(in1, in2)
torch.testing.assert_allclose(out, reference_out, atol=1e-2, rtol=0)
@pytest.mark.parametrize("cache", ["", ".ca", ".cg"])
def test_load_cache_modifier(cache, device):
src = torch.empty(128, device=device)
dst = torch.empty(128, device=device)
@triton.jit
def _kernel(dst, src, CACHE: tl.constexpr):
offsets = tl.arange(0, 128)
x = tl.load(src + offsets, cache_modifier=CACHE)
tl.store(dst + offsets, x)
pgm = _kernel[(1,)](dst, src, CACHE=cache)
ptx = pgm.asm['ptx']
if cache == '':
assert 'ld.global.ca' not in ptx
assert 'ld.global.cg' not in ptx
if cache == '.cg':
assert 'ld.global.cg' in ptx
assert 'ld.global.ca' not in ptx
if cache == '.ca':
assert 'ld.global.ca' in ptx
assert 'ld.global.cg' not in ptx
@pytest.mark.parametrize("N", [16, 10, 11, 1024])
def test_vectorization(N, device):
src = torch.empty(1024, device=device)
dst = torch.empty(1024, device=device)
@triton.jit
def _kernel(dst, src, N, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
x = tl.load(src + offsets, mask=offsets < N)
tl.store(dst + offsets, x, mask=offsets < N)
pgm = _kernel[(1,)](dst, src, N=N, BLOCK_SIZE=src.shape[0])
ptx = pgm.asm["ptx"]
if N % 16 == 0:
assert "ld.global.v4.b32" in ptx
else:
assert "ld.global.b32" in ptx
# np.testing.assert_allclose(dst, src[:N])
@pytest.mark.parametrize("has_hints", [False, True])
def test_vectorization_hints(has_hints, device):
src = torch.empty(1024, device=device)
dst = torch.empty(1024, device=device)
off = torch.zeros(1, device=device, dtype=torch.int32)
@triton.jit
def _kernel(dst, src, off, N, BLOCK_SIZE: tl.constexpr, HINT: tl.constexpr):
offsets = tl.program_id(0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
offsets = offsets + tl.load(off)
if HINT:
tl.max_contiguous(tl.multiple_of(offsets, 1024), 1024)
x = tl.load(src + offsets, mask=offsets < N)
tl.store(dst + offsets, x, mask=offsets < N)
pgm = _kernel[(1,)](dst, src, off, N=1024, BLOCK_SIZE=src.shape[0], HINT=has_hints)
ptx = pgm.asm["ptx"]
if has_hints:
assert "ld.global.v4.b32" in ptx
else:
assert "ld.global.v4.b32" not in ptx
# ---------------
# test store
# ---------------
@pytest.mark.parametrize("cache", ["", ".wb", ".cg", ".cs", ".wt"])
def test_store_cache_modifier(cache):
src = torch.empty(128, device='cuda')
dst = torch.empty(128, device='cuda')
@triton.jit
def _kernel(dst, src, CACHE: tl.constexpr):
offsets = tl.arange(0, 128)
x = tl.load(src + offsets)
tl.store(dst + offsets, x, cache_modifier=CACHE)
pgm = _kernel[(1,)](dst, src, CACHE=cache)
ptx = pgm.asm['ptx']
if cache == '':
assert 'st.global.wb' not in ptx
assert 'st.global.cg' not in ptx
assert 'st.global.cs' not in ptx
assert 'st.global.wt' not in ptx
if cache == '.wb':
assert 'st.global.wb' in ptx
assert 'st.global.cg' not in ptx
assert 'st.global.cs' not in ptx
assert 'st.global.wt' not in ptx
if cache == '.cg':
assert 'st.global.wb' not in ptx
assert 'st.global.cg' in ptx
assert 'st.global.cs' not in ptx
assert 'st.global.wt' not in ptx
if cache == '.cs':
assert 'st.global.wb' not in ptx
assert 'st.global.cg' not in ptx
assert 'st.global.cs' in ptx
assert 'st.global.wt' not in ptx
if cache == '.wt':
assert 'st.global.wb' not in ptx
assert 'st.global.cg' not in ptx
assert 'st.global.cs' not in ptx
assert 'st.global.wt' in ptx
# ---------------
# test if
# ---------------
# ---------------
# test for
# ---------------
# ---------------
# test while
# ---------------
# ---------------
# test default
# ---------------
# TODO: can't be local to test_default
@triton.jit
def _impl(value=10):
return value
def test_default(device):
value = 5
ret0 = torch.zeros(1, dtype=torch.int32, device=device)
ret1 = torch.zeros(1, dtype=torch.int32, device=device)
@triton.jit
def _kernel(ret0, ret1, value=3):
tl.store(ret0, _impl())
tl.store(ret1, _impl(value))
_kernel[(1,)](ret0, ret1, value)
assert ret0.item() == 10
assert ret1.item() == value
_kernel[(1,)](ret0, ret1)
assert ret0.item() == 10
assert ret1.item() == 3
# ---------------
# test noop
# ----------------
def test_noop(device):
@triton.jit
def kernel(x):
pass
x = to_triton(numpy_random((1,), dtype_str='int32'), device=device)
kernel[(1, )](x)
@pytest.mark.parametrize("device", ['cuda', 'cpu', 'cpu_pinned'])
def test_pointer_arguments(device):
@triton.jit
def kernel(x):
pass
pin_memory = 'pinned' in device
x = torch.empty(1024, device=device.split('_')[0], pin_memory=pin_memory)
if device == "cpu":
with pytest.raises(ValueError):
kernel[(1,)](x)
else:
kernel[(1, )](x)
@pytest.mark.parametrize("value, value_type", [
(-1, 'i32'), (0, 'i32'), (-2**31, 'i32'), (2**31 - 1, 'i32'),
(2**31, 'i64'), (2**32 - 1, 'i64'), (2**32, 'i64'), (2**63 - 1, 'i64'),
(-2**63, 'i64'), (2**63, 'u64'), (2**64 - 1, 'u64')
])
def test_value_specialization(value: int, value_type: str, device) -> None:
spec_type = None
def cache_hook(*args, **kwargs):
nonlocal spec_type
spec_type = kwargs["compile"]["signature"][0]
JITFunction.cache_hook = cache_hook
@triton.jit
def kernel(VALUE, X):
pass
x = torch.tensor([3.14159], device=device)
pgm = kernel[(1, )](value, x)
JITFunction.cache_hook = None
assert spec_type == value_type
# --------------------
# value specialization
# --------------------
@pytest.mark.parametrize(
"value, overflow",
[(2**64 - 1, False), (2**64, True), (-2**63, False), (-2**63 - 1, True)]
)
def test_value_specialization_overflow(value: int, overflow: bool, device) -> None:
@triton.jit
def kernel(VALUE, X):
pass
x = torch.tensor([3.14159], device=device)
if overflow:
with pytest.raises(OverflowError):
kernel[(1, )](value, x)
else:
kernel[(1, )](value, x)
# ----------------
# test constexpr
# ----------------
@pytest.mark.parametrize("op", ['+', '-', '*', '/', '%', '<', '>', '<<', '>>', '&', '^', '|'])
@pytest.mark.parametrize("is_lhs_constexpr", [False, True])
@pytest.mark.parametrize("is_rhs_constexpr", [True, False])
def test_bin_op_constexpr(op, is_lhs_constexpr, is_rhs_constexpr, device):
@triton.jit
def kernel(Z, X, Y):
x = tl.load(X)
y = tl.load(Y)
z = GENERATE_TEST_HERE
tl.store(Z, z)
if op in ['<<', '>>', '&', '^', '|']: # int op
x_str = "3" if is_lhs_constexpr else "x"
y_str = "4" if is_rhs_constexpr else "y"
x = numpy_random((1,), dtype_str="int32")
y = numpy_random((1,), dtype_str="int32")
else:
x_str = "3.14" if is_lhs_constexpr else "x"
y_str = "4.13" if is_rhs_constexpr else "y"
x = numpy_random((1,), dtype_str="float32")
y = numpy_random((1,), dtype_str="float32")
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f"{x_str} {op} {y_str}"})
z = np.array(eval(f"{x_str} {op} {y_str}"))
x_tri = to_triton(x, device=device)
y_tri = to_triton(y, device=device)
z_tri = to_triton(np.empty((1,), dtype=z.dtype), device=device)
kernel[(1,)](z_tri, x_tri, y_tri)
np.testing.assert_allclose(z, to_numpy(z_tri))
def test_constexpr_shape(device):
@triton.jit
def kernel(X):
off = tl.arange(0, 128 + 128)
tl.store(X + off, off)
x_tri = to_triton(np.empty((256, ), dtype=np.int32), device=device)
kernel[(1,)](x_tri)
np.testing.assert_equal(to_numpy(x_tri), np.arange(0, 256))
def test_constexpr_scalar_shape(device):
@triton.jit
def kernel(X, s):
off = tl.arange(0, 256)
val = off % (256 // s)
tl.store(X + off, val)
x_tri = to_triton(np.empty((256, ), dtype=np.int32), device=device)
kernel[(1,)](x_tri, 32)
np.testing.assert_equal(to_numpy(x_tri), np.arange(0, 256) % 8)
# -------------
# test call
# -------------
@triton.jit
def val_multiplier(val, i):
return val * i
@triton.jit(noinline=True)
def val_multiplier_noinline(val, i):
return val * i
@triton.jit
def vecmul_kernel(ptr, n_elements, rep, type: tl.constexpr):
pid = tl.program_id(axis=0)
offsets = pid * 128 + tl.arange(0, 128)
mask = offsets < n_elements
vec = tl.load(ptr + offsets, mask=mask)
for i in range(1, rep):
if type == "inline":
vec = val_multiplier(vec, i)
else:
vec = val_multiplier_noinline(vec, i)
tl.store(ptr + offsets, vec, mask=mask)
@pytest.mark.parametrize("type", ["inline", "noinline"])
def test_call(type, device):
@triton.jit
def kernel(ptr, n_elements, num1, num2, type: tl.constexpr):
vecmul_kernel(ptr, n_elements, num1, type)
vecmul_kernel(ptr, n_elements, num2, type)
size = 1024
rand_val = numpy_random((size,), dtype_str="float32")
rand_val_tri = to_triton(rand_val, device=device)
err_msg = ""
try:
kernel[(size // 128,)](rand_val_tri, size, 3, 5, type)
except Exception as e:
err_msg = str(e)
if type == "noinline":
assert err_msg is not ""
else:
ans = rand_val * 1 * 2 * 1 * 2 * 3 * 4
np.testing.assert_equal(to_numpy(rand_val_tri), ans)
# -------------
# test if
# -------------
@pytest.mark.parametrize("if_type", ["if", "if_exp", "if_and_dynamic", "if_and_static"])
def test_if(if_type, device):
@triton.jit
def kernel(Cond, XTrue, XFalse, Ret, IfType: tl.constexpr, BoolVar: tl.constexpr, StaticVaue: tl.constexpr):
pid = tl.program_id(0)
cond = tl.load(Cond)
if IfType == "if":
if pid % 2 == 0:
tl.store(Ret, tl.load(XTrue))
else:
tl.store(Ret, tl.load(XFalse))
elif IfType == "if_exp":
tl.store(Ret, tl.load(XTrue)) if pid % 2 else tl.store(Ret, tl.load(XFalse))
elif IfType == "if_and_dynamic":
if BoolVar and pid % 2 == 0:
tl.store(Ret, tl.load(XTrue))
else:
tl.store(Ret, tl.load(XFalse))
elif IfType == "if_and_static":
if StaticVaue != 0 and StaticVaue != 0:
tl.store(Ret, tl.load(XTrue))
else:
tl.store(Ret, tl.load(XFalse))
cond = torch.ones(1, dtype=torch.int32, device=device)
x_true = torch.tensor([3.14], dtype=torch.float32, device=device)
x_false = torch.tensor([1.51], dtype=torch.float32, device=device)
ret = torch.empty(1, dtype=torch.float32, device=device)
kernel[(1,)](cond, x_true, x_false, ret, if_type, True, 1)
assert torch.equal(ret, x_true)
def test_num_warps_pow2(device):
dst = torch.empty(128, device=device)
@triton.jit
def _kernel(dst):
pass
with pytest.raises(AssertionError, match='must be a power of 2'):
_kernel[(1,)](dst=dst, num_warps=3)
_kernel[(1,)](dst=dst, num_warps=1)
_kernel[(1,)](dst=dst, num_warps=2)
_kernel[(1,)](dst=dst, num_warps=4)
# -------------
# test extern
# -------------
@pytest.mark.parametrize("dtype_str, expr, lib_path",
[('int32', 'math.ffs', ''),
('float32', 'math.log2', ''),
('float32', 'math.scalbn', ''),
('float32', 'math.pow', tl.math.libdevice_path()),
('float64', 'math.pow_dtype', tl.math.libdevice_path()),
('float64', 'math.norm4d', '')])
def test_math_tensor(dtype_str, expr, lib_path, device):
@triton.jit
def kernel(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
y = GENERATE_TEST_HERE
tl.store(Y + tl.arange(0, BLOCK), y)
shape = (128, )
rs = RandomState(17)
# limit the range of integers so that the sum does not overflow
x = numpy_random(shape, dtype_str=dtype_str, rs=rs)
if expr == 'math.log2':
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.broadcast_to(tl.{expr}(5.0), x.shape)'})
y_ref = np.log2(5.0)
elif expr == 'math.ffs':
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.{expr}(x)'})
y_ref = np.zeros(shape, dtype=x.dtype)
for i in range(shape[0]):
y_ref[i] = (int(x[i]) & int(-x[i])).bit_length()
elif expr == 'math.scalbn':
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.{expr}(x, 2)'})
y_ref = x * pow(2, 2)
elif expr == 'math.pow_dtype':
x = np.abs(x)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.math.pow(x, 0.5)'})
y_ref = np.power(x, 0.5)
elif expr == 'math.pow':
# numpy does not allow negative factors in power, so we use abs()
x = np.abs(x)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.{expr}(x, x)'})
y_ref = np.power(x, x)
elif expr == 'math.pow_dtype':
x = np.abs(x)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': 'tl.math.pow(x, 0.5)'})
y_ref = np.power(x, 0.5)
elif expr == 'math.norm4d':
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.{expr}(x, x, x, x)'})
y_ref = np.sqrt(4 * np.power(x, 2))
x_tri = to_triton(x, device=device)
# triton result
y_tri = to_triton(numpy_random((shape[0],), dtype_str=dtype_str, rs=rs), device=device)
kernel[(1,)](x_tri, y_tri, BLOCK=shape[0], extern_libs={'libdevice': lib_path})
# compare
if expr == 'math.ffs':
np.testing.assert_equal(y_ref, to_numpy(y_tri))
else:
np.testing.assert_allclose(y_ref, to_numpy(y_tri), rtol=0.01)
@pytest.mark.parametrize("dtype_str, expr, lib_path",
[('float32', 'math.pow', ''),
('float64', 'math.pow_dtype', ''),
('float64', 'math.pow', tl.math.libdevice_path())])
def test_math_scalar(dtype_str, expr, lib_path, device):
@triton.jit
def kernel(X, Y, BLOCK: tl.constexpr):
x = X
y = GENERATE_TEST_HERE
tl.store(Y + tl.arange(0, BLOCK), y)
shape = (128, )
rs = RandomState(17)
# limit the range of integers so that the sum does not overflow
x = numpy_random((1,), dtype_str=dtype_str, rs=rs)
y_ref = np.zeros(shape, dtype=x.dtype)
# numpy does not allow negative factors in power, so we use abs()
if expr == 'math.pow':
x = np.abs(x)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': 'tl.math.pow(x, x)'})
y_ref[:] = np.power(x, x)
elif expr == 'math.pow_dtype':
x = np.abs(x)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': 'tl.math.pow(x, 0.5)'})
y_ref[:] = np.power(x, 0.5)
# triton result
x_tri = to_triton(x, device=device)[0].item()
y_tri = to_triton(numpy_random((shape[0],), dtype_str=dtype_str, rs=rs), device=device)
kernel[(1,)](x_tri, y_tri, BLOCK=shape[0], extern_libs={'libdevice': lib_path})
# compare
np.testing.assert_allclose(y_ref, to_numpy(y_tri), rtol=0.01)
# -----------------------
# test control flow
# -----------------------
@pytest.mark.parametrize("lo, hi, iv", [(2**35, 2**35 + 20, 1), (2**35, 2**35 + 20, 2), (2**35, 2**35 + 20, 3),
(15, -16, -1), (15, -16, -2), (15, -16, -3),
(-18, -22, -1), (22, 18, -1)])
def test_for_iv(lo, hi, iv, device):
@triton.jit
def kernel(Out, lo, hi, iv: tl.constexpr):
acc = 0
acc = acc.to(tl.int64)
for i in range(lo, hi, iv):
acc += i
tl.store(Out, acc)
lo = 2**35
hi = 2**35 + 20
out = to_triton(np.zeros((1,), dtype=np.int64), device=device)
kernel[(1,)](out, lo, hi, iv)
assert out[0] == sum(range(lo, hi, iv))
def test_if_else(device):
@triton.jit
def kernel(Cond, TrueVal, FalseVal, Out):
if tl.load(Cond):
val = tl.load(TrueVal)
else:
val = tl.load(FalseVal)
tl.store(Out, val)
out = to_triton(np.zeros((1,), dtype=np.int32), device=device)
true_val = to_triton(np.full((1,), 1, dtype=np.int32), device=device)
false_val = to_triton(np.full((1,), 2, dtype=np.int32), device=device)
cond = to_triton(np.zeros((1,), dtype=np.int32), device=device)
# True
cond[0] = True
kernel[(1,)](cond, true_val, false_val, out)
assert to_numpy(out)[0] == true_val[0]
# False
cond[0] = False
kernel[(1,)](cond, true_val, false_val, out)
assert to_numpy(out)[0] == false_val[0]
@pytest.mark.parametrize("mode", ["dynamic", "static"])
def test_if_return(mode, device):
@triton.jit
def kernel(ExitEarly, Out, cond: tl.constexpr, mode: tl.constexpr):
if mode == "dynamic":
if tl.load(ExitEarly):
tl.store(Out, 0)
return
else:
if cond:
tl.store(Out, 0)
return
tl.store(Out, 1)
out = to_triton(np.zeros((1,), dtype=np.int32), device=device)
exit_early = to_triton(np.zeros((1,), dtype=np.int32), device=device)
# exit early path taken
exit_early[0] = 1
kernel[(1,)](exit_early, out, True, mode)
assert to_numpy(out)[0] == 0
# exit early path not taken
exit_early[0] = 0
kernel[(1,)](exit_early, out, False, mode)
assert to_numpy(out)[0] == 1
@triton.jit
def add_fn(x):
return x + 1
@triton.jit(noinline=True)
def add_fn_noinline(x):
return x + 1
@triton.jit
def add_fn_return(x, pid):
if pid == 0:
return x + 1
else:
return x + 2
@triton.jit
def add_fn_expr(Out, x):
tl.store(Out, x)
@triton.jit
def add_fn_static_cond(x, cond: tl.constexpr):
if cond == "":
return x
else:
return x + 1
@pytest.mark.parametrize("call_type", ["attribute", "attribute_jit",
"jit", "jit_if", "jit_ifexp", "jit_expr",
"jit_static_cond", "jit_noinline", "jit_extern"])
def test_if_call(call_type, device):
@triton.jit
def kernel(Out, call_type: tl.constexpr):
pid = tl.program_id(0)
o = tl.load(Out)
if call_type == "attribute":
# call attribute
if pid == 0:
a = o
a = a.to(tl.int32).to(tl.int32) + 1
o = a
elif call_type == "attribute_jit":
# call attribute and jit function
if pid == 0:
a = o
a = tl.load(Out + add_fn(a) - 1).to(tl.int32) + 1
o = a
elif call_type == "jit":
if pid == 0:
# regular function call
a = o
a = add_fn(a)
o = a
elif call_type == "jit_if":
# function without end_if block
if pid == 0:
a = o
a = add_fn_return(a, pid)
o = a
elif call_type == "jit_ifexp":
# ifexp expression
if pid == 0:
a = o
a = add_fn(a) if pid == 0 else add_fn_return(a, pid)
o = a
elif call_type == "jit_expr":
# call without return
if pid == 0:
a = o + 1
add_fn_expr(Out, a)
o = a
elif call_type == "jit_static_cond":
if pid == 0:
a = o + 1
add_fn_static_cond(o, call_type)
o = a
elif call_type == "jit_noinline":
if pid == 0:
a = o + 1
add_fn_noinline(a)
o = a
elif call_type == "jit_extern":
if pid == 0:
a = o + 1
tl.cdiv(a, a)
o = a
tl.store(Out, o)
out = to_triton(np.zeros((1,), dtype=np.int32), device=device)
kernel[(1,)](out, call_type)
assert to_numpy(out)[0] == 1
@pytest.mark.parametrize("_cond1", [True, False])
@pytest.mark.parametrize("_cond2", [True, False])
@pytest.mark.parametrize("_cond3", [True, False])
def test_nested_if_else_return(_cond1, _cond2, _cond3, device):
@triton.jit
def kernel(Cond1, Cond2, Cond3, Val1, Val2, Val3, Out):
val = 0
if tl.load(Cond1):
if tl.load(Cond2):
val = tl.load(Val1)
else:
return
else:
if tl.load(Cond3):
val = tl.load(Val2)
else:
val = tl.load(Val3)
tl.store(Out, val)
out = to_triton(np.full((1,), -1, dtype=np.int32), device=device)
cond1 = to_triton(np.full((1,), _cond1, dtype=np.int32), device=device)
cond2 = to_triton(np.full((1,), _cond2, dtype=np.int32), device=device)
cond3 = to_triton(np.full((1,), _cond3, dtype=np.int32), device=device)
val1 = to_triton(np.full((1,), 1, dtype=np.int32), device=device)
val2 = to_triton(np.full((1,), 2, dtype=np.int32), device=device)
val3 = to_triton(np.full((1,), 3, dtype=np.int32), device=device)
kernel[(1,)](cond1, cond2, cond3, val1, val2, val3, out)
targets = {
(True, True, True): val1[0],
(True, True, False): val1[0],
(True, False, True): out[0],
(True, False, False): out[0],
(False, True, True): val2[0],
(False, True, False): val3[0],
(False, False, True): val2[0],
(False, False, False): val3[0],
}
assert out[0] == targets[(_cond1, _cond2, _cond3)]
def test_while(device):
@triton.jit
def kernel(InitI, Bound, CutOff, OutI, OutInitI, OutJ):
init_i = tl.load(InitI)
curr_i = init_i
j = 0
# Check that init_i is not updated by the loop
while j < tl.load(Bound):
curr_i = curr_i + (j == tl.load(CutOff))
j += 1
tl.store(OutInitI, init_i)
tl.store(OutI, curr_i)
tl.store(OutJ, j)
out_i = to_triton(np.zeros((1,), dtype=np.int32), device=device)
out_j = to_triton(np.zeros((1,), dtype=np.int32), device=device)
init_i = to_triton(np.full((1,), 1, dtype=np.int32), device=device)
out_init_i = to_triton(np.full((1,), 0, dtype=np.int32), device=device)
bound = to_triton(np.full((1,), 10, dtype=np.int32), device=device)
cut_off = to_triton(np.full((1,), 5, dtype=np.int32), device=device)
kernel[(1,)](init_i, bound, cut_off, out_i, out_init_i, out_j)
assert out_init_i[0] == init_i[0]
assert out_i[0] == init_i[0] + 1
assert out_j[0] == bound[0]
def test_while(device):
@triton.jit
def nested_while(data, countPtr):
for i in range(10):
count = tl.load(countPtr)
while count > 0:
tl.store(data, tl.load(data) + 1.0)
count = count - 2
counter = torch.tensor([8], dtype=torch.int32, device=device)
data = torch.zeros((1,), device=device, dtype=torch.float32)
nested_while[(1,)](data, counter)
assert data[0] == 40
# def test_for_if(device):
# @triton.jit
# def kernel(bound, cutoff, M, N):
# m = 0
# n = 0
# for i in range(bound):
# if i > cutoff:
# m = m + 1
# else:
# n = n + 1
# tl.store(M, m)
# tl.store(N, n)
# m = to_triton(np.zeros((1,), dtype=np.int32), device=device)
# n = to_triton(np.zeros((1,), dtype=np.int32), device=device)
# kernel[(1,)](10, 7, m, n)
# print(m[0])
# print(n[0])
# -----------------------
# test extra
# -----------------------
def test_globaltimer(device):
check_cuda_only(device)
@triton.jit
def kernel(Out1, Out2):
start = tl.extra.cuda.globaltimer()
off = tl.arange(0, 128)
for i in range(10000):
tl.store(Out1 + off, tl.load(Out1 + off) + 1)
end = tl.extra.cuda.globaltimer()
tl.store(Out2, end - start)
out1 = to_triton(np.zeros((128,), dtype=np.int64), device=device)
out2 = to_triton(np.zeros((1,), dtype=np.int64), device=device)
h = kernel[(1,)](out1, out2)
assert out2[0] > 0
# 2 inlined globaltimers + one extra in the wrapper extern function
assert h.asm["ptx"].count("%globaltimer") == 3
def test_smid(device):
check_cuda_only(device)
@triton.jit
def kernel(Out):
tl.store(Out + tl.program_id(0), tl.extra.cuda.smid())
out = to_triton(np.zeros((1024,), dtype=np.int32), device=device)
h = kernel[(out.shape[0],)](out)
assert out.sort()[0].unique().shape[0] > 0
assert h.asm["ptx"].count("%smid") == 2
# -----------------------
# test layout conversions
# -----------------------
# TODO: backend should be tested separately
layouts = [
# MmaLayout(version=1, warps_per_cta=[1, 4]),
MmaLayout(version=(2, 0), warps_per_cta=[1, 4]),
# MmaLayout(version=1, warps_per_cta=[4, 1]),
MmaLayout(version=(2, 0), warps_per_cta=[4, 1]),
BlockedLayout([1, 8], [2, 16], [4, 1], [1, 0]),
BlockedLayout([1, 4], [4, 8], [2, 2], [1, 0]),
BlockedLayout([1, 1], [1, 32], [2, 2], [1, 0]),
BlockedLayout([8, 1], [16, 2], [1, 4], [0, 1]),
BlockedLayout([4, 1], [8, 4], [2, 2], [0, 1]),
BlockedLayout([1, 1], [32, 1], [2, 2], [0, 1]),
BlockedLayout([4, 4], [1, 32], [4, 1], [1, 0])
]
intermediate_layouts = [
None,
SharedLayout(1, 1, 1, [1, 0]),
SharedLayout(4, 2, 4, [1, 0]),
SharedLayout(2, 2, 4, [1, 0]),
]
@pytest.mark.parametrize("shape", [(128, 128)])
@pytest.mark.parametrize("dtype", ['float16'])
@pytest.mark.parametrize("src_layout", layouts)
@pytest.mark.parametrize("interm_layout", intermediate_layouts)
@pytest.mark.parametrize("dst_layout", layouts)
def test_convert2d(dtype, shape, src_layout, interm_layout, dst_layout, device):
if str(src_layout) == str(dst_layout):
pytest.skip()
if 'mma' in str(src_layout) and 'mma' in str(dst_layout):
pytest.skip()
layouts = f"""
#src = {src_layout}
#dst = {dst_layout}
""" if interm_layout is None else f"""
#src = {src_layout}
#interm = {interm_layout}
#dst = {dst_layout}
"""
conversion = f"""
%12 = triton_gpu.convert_layout %9 : (tensor<128x128xi32, #src>) -> tensor<128x128xi32, #dst>
%13 = triton_gpu.convert_layout %11 : (tensor<128x128xf16, #src>) -> tensor<128x128xf16, #dst>
""" if interm_layout is None else f"""
%15 = triton_gpu.convert_layout %9 : (tensor<128x128xi32, #src>) -> tensor<128x128xi32, #interm>
%16 = triton_gpu.convert_layout %15 : (tensor<128x128xi32, #interm>) -> tensor<128x128xi32, #src>
%17 = triton_gpu.convert_layout %11 : (tensor<128x128xf16, #src>) -> tensor<128x128xf16, #interm>
%18 = triton_gpu.convert_layout %17 : (tensor<128x128xf16, #interm>) -> tensor<128x128xf16, #src>
%12 = triton_gpu.convert_layout %16 : (tensor<128x128xi32, #src>) -> tensor<128x128xi32, #dst>
%13 = triton_gpu.convert_layout %18 : (tensor<128x128xf16, #src>) -> tensor<128x128xf16, #dst>
"""
ir = layouts + """
module attributes {"triton_gpu.num-warps" = 4 : i32} {
tt.func public @kernel_0d1d(%arg0: !tt.ptr<f16> {tt.divisibility = 16 : i32}, %arg1: !tt.ptr<f16> {tt.divisibility = 16 : i32}) {
%cst = arith.constant dense<128> : tensor<128x1xi32, #src>
%0 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #src}>>
%1 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32, #triton_gpu.slice<{dim = 0, parent = #src}>>
%2 = tt.splat %arg0 : (!tt.ptr<f16>) -> tensor<128x128x!tt.ptr<f16>, #src>
%4 = tt.expand_dims %0 {axis = 1 : i32} : (tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #src}>>) -> tensor<128x1xi32, #src>
%5 = arith.muli %4, %cst : tensor<128x1xi32, #src>
%6 = tt.expand_dims %1 {axis = 0 : i32} : (tensor<128xi32, #triton_gpu.slice<{dim = 0, parent = #src}>>) -> tensor<1x128xi32, #src>
%7 = tt.broadcast %6 : (tensor<1x128xi32, #src>) -> tensor<128x128xi32, #src>
%8 = tt.broadcast %5 : (tensor<128x1xi32, #src>) -> tensor<128x128xi32, #src>
%9 = arith.addi %8, %7 : tensor<128x128xi32, #src>
%10 = tt.addptr %2, %9 : tensor<128x128x!tt.ptr<f16>, #src>, tensor<128x128xi32, #src>
%11 = tt.load %10 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x128xf16, #src>
%3 = tt.splat %arg1 : (!tt.ptr<f16>) -> tensor<128x128x!tt.ptr<f16>, #dst>
""" + conversion + """
%14 = tt.addptr %3, %12 : tensor<128x128x!tt.ptr<f16>, #dst>, tensor<128x128xi32, #dst>
tt.store %14, %13 : tensor<128x128xf16, #dst>
tt.return
}
}
"""
x = to_triton(numpy_random(shape, dtype_str=dtype), device=device)
z = torch.empty_like(x)
# write the IR to a temporary file using mkstemp
import tempfile
with tempfile.NamedTemporaryFile(mode='w', suffix='.ttgir') as f:
f.write(ir)
f.flush()
kernel = triton.compile(f.name)
kernel[(1, 1, 1)](x.data_ptr(), z.data_ptr())
assert torch.equal(z, x)
def test_load_scalar_with_mask(device):
@triton.jit
def kernel(Input, Index, Out, N: int):
index = tl.load(Index)
scalar = tl.load(Input + index, mask=index < N, other=0)
tl.store(Out, scalar, mask=index < N)
Index = torch.tensor([0], dtype=torch.int32, device=device)
Input = torch.tensor([0], dtype=torch.int32, device=device)
Out = torch.empty_like(Index, device=device)
kernel[(1,)](Input, Index, Out, Index.numel())
assert Out.data[0] == 0
# This test is used to test our own PTX codegen for float16 and int16 conversions
# maybe delete it later after ptxas has been fixed
@pytest.mark.parametrize("dtype_str", ['float16', 'int16'])
def test_ptx_cast(dtype_str, device):
@triton.jit
def kernel(in_ptr0, out_ptr2, xnumel, rnumel, dtype: tl.constexpr, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp4 = (tl.zeros([XBLOCK, RBLOCK], dtype) - 10000).to(dtype)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (197 * x0)), rmask & xmask).to(dtype)
tmp1 = 2
tmp2 = tmp0 * tmp1
tmp3 = tmp2.to(dtype)
tmp5 = _tmp4 < tmp3
_tmp4 = tl.where(rmask & xmask & tmp5, tmp3, _tmp4)
tl.store(out_ptr2 + (r1 + (197 * x0) + tl.zeros([XBLOCK, RBLOCK], tl.int32)), _tmp4, rmask & xmask)
torch.manual_seed(123)
if dtype_str == 'int16':
torch_dtype = torch.int16
triton_dtype = tl.int32
else:
torch_dtype = torch.float16
triton_dtype = tl.float32
s0 = 4
buf11 = -torch.ones((6 * s0, 197, 197), device=device, dtype=torch_dtype)
buf14 = -torch.ones((s0, 6, 197, 197), device=device, dtype=torch_dtype)
kernel[(4728,)](buf11, buf14, 1182 * s0, 197, triton_dtype, 1, 256, num_warps=2)
assert buf14.to(torch.float32).mean() == -2.0
| 125,776 | 37.033565 | 204 |
py
|
triton
|
triton-main/python/test/unit/language/conftest.py
|
# content of conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption(
"--device", action="store", default='cuda'
)
@pytest.fixture
def device(request):
return request.config.getoption("--device")
| 238 | 14.933333 | 50 |
py
|
triton
|
triton-main/python/test/unit/language/assert_helper.py
|
import sys
import torch
from torch.testing import assert_close
import triton
import triton.language as tl
@triton.jit
def kernel_device_assert(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
tl.device_assert(x == 0, "x != 0")
tl.store(Y + tl.arange(0, BLOCK), x)
@triton.jit
def kernel_device_assert_scalar(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
# Trivial assert
tl.device_assert(0 == 0, "x != 0")
tl.store(Y + tl.arange(0, BLOCK), x)
@triton.jit(debug=False)
def kernel_device_assert_no_debug(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
tl.device_assert(x == 0, "x != 0")
tl.store(Y + tl.arange(0, BLOCK), x)
@triton.jit
def kernel_assert(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
assert x == 0, "x != 0"
tl.store(Y + tl.arange(0, BLOCK), x)
@triton.jit
def kernel_static_assert(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
tl.static_assert(BLOCK == 128, "BLOCK != 128")
tl.store(Y + tl.arange(0, BLOCK), x)
def test_assert(func: str):
shape = (128, )
x = torch.arange(0, shape[0], dtype=torch.int32, device='cuda')
y = torch.zeros(shape, dtype=x.dtype, device="cuda")
if func == "device_assert":
kernel_device_assert[(1,)](x, y, BLOCK=shape[0])
kernel_device_assert_scalar[(1,)](x, y, BLOCK=shape[0])
elif func == "no_debug":
# TRITON_DEBUG=1 can override the debug flag
kernel_device_assert_no_debug[(1,)](x, y, BLOCK=shape[0])
elif func == "assert":
kernel_assert[(1,)](x, y, BLOCK=shape[0])
elif func == "static_assert":
kernel_static_assert[(1,)](x, y, BLOCK=shape[0])
assert_close(y, x)
@triton.jit
def jit_device_assert_none(x):
tl.device_assert(x == 0, "x != 0")
@triton.jit(debug=True)
def jit_device_assert_true(x):
tl.device_assert(x == 0, "x != 0")
@triton.jit(debug=False)
def jit_device_assert_false(x):
tl.device_assert(x == 0, "x != 0")
@triton.jit
def kernel_device_assert_nested(X, Y, BLOCK: tl.constexpr, jit_debug: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
if jit_debug == "true":
jit_device_assert_true(x)
elif jit_debug == "false":
jit_device_assert_false(x)
else:
jit_device_assert_none(x)
tl.store(Y + tl.arange(0, BLOCK), x)
@triton.jit(debug=True)
def kernel_device_assert_nested_true(X, Y, BLOCK: tl.constexpr, jit_debug: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
if jit_debug == "true":
jit_device_assert_true(x)
elif jit_debug == "false":
jit_device_assert_false(x)
else:
jit_device_assert_none(x)
tl.store(Y + tl.arange(0, BLOCK), x)
@triton.jit(debug=False)
def kernel_device_assert_nested_false(X, Y, BLOCK: tl.constexpr, jit_debug: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
if jit_debug == "true":
jit_device_assert_true(x)
elif jit_debug == "false":
jit_device_assert_false(x)
else:
jit_device_assert_none(x)
tl.store(Y + tl.arange(0, BLOCK), x)
def test_assert_nested(caller: str, callee: str):
shape = (128, )
x = torch.arange(0, shape[0], dtype=torch.int32, device='cuda')
y = torch.zeros(shape, dtype=x.dtype, device="cuda")
if caller == "none":
kernel_device_assert_nested[(1,)](x, y, BLOCK=shape[0], jit_debug=callee)
elif caller == "true":
kernel_device_assert_nested_true[(1,)](x, y, BLOCK=shape[0], jit_debug=callee)
elif caller == "false":
kernel_device_assert_nested_false[(1,)](x, y, BLOCK=shape[0], jit_debug=callee)
assert_close(y, x)
if __name__ == "__main__":
if len(sys.argv) == 3:
test_assert_nested(sys.argv[1], sys.argv[2])
else:
test_assert(sys.argv[1])
| 3,841 | 28.106061 | 90 |
py
|
triton
|
triton-main/python/test/unit/language/test_random.py
|
import numpy as np
import pytest
import scipy.stats
import torch
import triton
import triton.language as tl
#####################################
# Reference Philox Implementation
#####################################
class PhiloxConfig:
def __init__(self, PHILOX_ROUND_A, PHILOX_ROUND_B, PHILOX_KEY_A, PHILOX_KEY_B, DTYPE):
self.PHILOX_ROUND_A = np.array(PHILOX_ROUND_A, dtype=DTYPE)
self.PHILOX_ROUND_B = np.array(PHILOX_ROUND_B, dtype=DTYPE)
self.PHILOX_KEY_A = np.array(PHILOX_KEY_A, dtype=DTYPE)
self.PHILOX_KEY_B = np.array(PHILOX_KEY_B, dtype=DTYPE)
self.DTYPE = DTYPE
# This is better for GPU
PHILOX_32 = PhiloxConfig(
PHILOX_KEY_A=0x9E3779B9,
PHILOX_KEY_B=0xBB67AE85,
PHILOX_ROUND_A=0xD2511F53,
PHILOX_ROUND_B=0xCD9E8D57,
DTYPE=np.uint32,
)
# This is what numpy implements
PHILOX_64 = PhiloxConfig(
PHILOX_KEY_A=0x9E3779B97F4A7C15,
PHILOX_KEY_B=0xBB67AE8584CAA73B,
PHILOX_ROUND_A=0xD2E7470EE14C6C93,
PHILOX_ROUND_B=0xCA5A826395121157,
DTYPE=np.uint64,
)
class CustomPhilox4x:
def __init__(self, seed, config):
self._config = config
seed = self._into_pieces(seed)
self._key = np.array(seed[:2], dtype=self._dtype)
self._counter = np.array((0, 0) + seed[2:], dtype=self._dtype)
@property
def _dtype(self):
return self._config.DTYPE
def _into_pieces(self, n, pad=4):
res = []
while len(res) < pad:
res.append(np.array(n, dtype=self._dtype))
n >>= (np.dtype(self._dtype).itemsize * 8)
assert n == 0
return tuple(res)
def _multiply_low_high(self, a, b):
low = a * b
high = int(a) * int(b)
high = np.array(high >> (np.dtype(self._dtype).itemsize * 8), dtype=self._dtype)
return low, high
def _single_round(self, counter, key):
lo0, hi0 = self._multiply_low_high(self._config.PHILOX_ROUND_A, counter[0])
lo1, hi1 = self._multiply_low_high(self._config.PHILOX_ROUND_B, counter[2])
ret0 = hi1 ^ counter[1] ^ key[0]
ret1 = lo1
ret2 = hi0 ^ counter[3] ^ key[1]
ret3 = lo0
return np.array([ret0, ret1, ret2, ret3], dtype=self._dtype)
def _raise_key(self, key):
pk = [self._config.PHILOX_KEY_A, self._config.PHILOX_KEY_B]
return key + np.array(pk, dtype=self._dtype)
def random_raw(self):
counter = self._counter
key = self._key
for _ in range(10):
counter = self._single_round(counter, key)
key = self._raise_key(key)
self.advance(1)
return counter
def advance(self, n_steps):
self._counter[0] += n_steps
assert self._counter[0] < 2**32, "FIXME: doesn't work for large offsets"
class CustomPhilox(CustomPhilox4x):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.buffer = []
def random_raw(self):
if len(self.buffer) == 0:
self.buffer = list(super().random_raw())[::-1]
return int(self.buffer.pop())
#####################################
# Unit Tests
#####################################
BLOCK = 1024
# test generation of random uint32
@pytest.mark.parametrize('size, seed',
[(size, seed) for size in ['10', '4,53', '10000']
for seed in [0, 42, 124, 54, 0xffffffff, 0xdeadbeefcafeb0ba]]
)
def test_randint(size, seed, device):
size = list(map(int, size.split(',')))
@triton.jit
def kernel(X, N, seed):
offset = tl.program_id(0) * BLOCK + tl.arange(0, BLOCK)
rand = tl.randint(seed, offset)
tl.store(X + offset, rand, mask=offset < N)
# triton result
x = torch.empty(size, dtype=torch.int32, device=device)
N = x.numel()
grid = (triton.cdiv(N, BLOCK),)
kernel[grid](x, N, seed)
out_tri = x.cpu().numpy().astype(np.uint32).flatten().tolist()
# reference result
gen = CustomPhilox4x(seed, config=PHILOX_32)
out_ref = [gen.random_raw()[0] for _ in out_tri]
assert out_tri == out_ref
# test uniform PRNG
@pytest.mark.parametrize('size, seed',
[(size, seed) for size in [1000000]
for seed in [0, 42, 124, 54]]
)
def test_rand(size, seed, device):
@triton.jit
def kernel(X, N, seed):
offset = tl.program_id(0) * BLOCK + tl.arange(0, BLOCK)
rand = tl.rand(seed, offset)
tl.store(X + offset, rand, mask=offset < N)
# triton result
x = torch.empty(size, dtype=torch.float32, device=device)
N = x.numel()
grid = (triton.cdiv(N, BLOCK),)
kernel[grid](x, N, seed)
assert all((x >= 0) & (x <= 1))
assert scipy.stats.kstest(x.tolist(), 'uniform', args=(0, 1)).statistic < 0.01
# test normal PRNG
@pytest.mark.parametrize('size, seed',
[(size, seed) for size in [1000000]
for seed in [0, 42, 124, 54]]
)
def test_randn(size, seed, device):
@triton.jit
def kernel(X, N, seed):
offset = tl.program_id(0) * BLOCK + tl.arange(0, BLOCK)
rand = tl.randn(seed, offset)
tl.store(X + offset, rand, mask=offset < N)
# triton result
x = torch.empty(size, dtype=torch.float32, device=device)
N = x.numel()
grid = (triton.cdiv(N, BLOCK),)
kernel[grid](x, N, seed)
assert abs(x.mean()) < 1e-2
assert abs(x.std() - 1) < 1e-2
# tl.rand() should never produce >=1.0
def test_rand_limits(device):
@triton.jit
def kernel(input, output, n: tl.constexpr):
idx = tl.arange(0, n)
x = tl.load(input + idx)
y = tl.random.uint32_to_uniform_float(x)
tl.store(output + idx, y)
min_max_int32 = torch.tensor([
torch.iinfo(torch.int32).min,
torch.iinfo(torch.int32).max,
], dtype=torch.int32, device=device)
output = torch.empty(2, dtype=torch.float32, device=device)
kernel[(1,)](min_max_int32, output, 2)
assert output[0] == output[1]
assert 1.0 - torch.finfo(torch.float32).eps <= output[0].item() < 1.0
| 6,178 | 30.050251 | 90 |
py
|
triton
|
triton-main/python/test/unit/language/print_helper.py
|
import sys
import torch
from torch.testing import assert_close
import triton
import triton.language as tl
@triton.jit
def kernel_device_print(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
tl.device_print("", x)
tl.store(Y + tl.arange(0, BLOCK), x)
@triton.jit
def kernel_print(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
print("", x)
tl.store(Y + tl.arange(0, BLOCK), x)
@triton.jit
def kernel_static_print(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
tl.static_print(x)
tl.store(Y + tl.arange(0, BLOCK), x)
def test_print(func: str, data_type: str):
shape = (128, )
# limit the range of integers so that the sum does not overflow
x = torch.arange(0, shape[0], dtype=torch.int32, device='cuda').to(getattr(torch, data_type))
y = torch.zeros(shape, dtype=x.dtype, device="cuda")
if func == "device_print":
kernel_device_print[(1,)](x, y, BLOCK=shape[0])
elif func == "print":
kernel_print[(1,)](x, y, BLOCK=shape[0])
elif func == "static_print":
kernel_static_print[(1,)](x, y, BLOCK=shape[0])
assert_close(y, x)
if __name__ == "__main__":
test_print(sys.argv[1], sys.argv[2])
| 1,244 | 25.489362 | 97 |
py
|
triton
|
triton-main/python/test/unit/language/test_subprocess.py
|
import os
import subprocess
import sys
import pytest
dir_path = os.path.dirname(os.path.realpath(__file__))
print_path = os.path.join(dir_path, "print_helper.py")
assert_path = os.path.join(dir_path, "assert_helper.py")
# TODO: bfloat16 after LLVM-15
assert_types = ["device_assert", "assert", "static_assert", "no_debug"]
nested_types = [(caller, callee) for caller in ["true", "false", "none"] for callee in ["true", "false", "none"]]
torch_types = ["int8", "uint8", "int16", "int32", "long", "float16", "float32", "float64"]
@pytest.mark.parametrize("func_type, data_type",
[("device_print", data_type) for data_type in torch_types] + [("print", "int32"), ("static_print", "int32")])
def test_print(func_type: str, data_type: str):
proc = subprocess.Popen([sys.executable, print_path, func_type, data_type], stdout=subprocess.PIPE, shell=False)
outs, _ = proc.communicate()
outs = outs.split()
new_lines = set()
for line in outs:
try:
value = line
if func_type != "static_print":
value = int(float(line))
new_lines.add(value)
except Exception as e:
print(e)
if func_type != "static_print":
for i in range(128):
assert i in new_lines
assert len(new_lines) == 128
else:
assert len(new_lines) == 1
@pytest.mark.parametrize("func_type", assert_types)
def test_assert(func_type: str):
os.environ["TRITON_DEBUG"] = "1"
proc = subprocess.Popen([sys.executable, assert_path, func_type], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
_, errs = proc.communicate()
errs = errs.splitlines()
num_errs = 0
for err in errs:
if "x != 0" in err.decode("utf-8"):
num_errs += 1
os.environ["TRITON_DEBUG"] = "0"
if func_type != "static_assert":
assert num_errs == 127
else:
assert num_errs == 0
@pytest.mark.parametrize("caller_type, callee_type", nested_types)
def test_assert_nested(caller_type, callee_type):
proc = subprocess.Popen([sys.executable, assert_path, caller_type, callee_type], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
_, errs = proc.communicate()
errs = errs.splitlines()
num_errs = 0
for err in errs:
if "x != 0" in err.decode("utf-8"):
num_errs += 1
if caller_type == "none":
if callee_type == "true":
assert num_errs == 127
else:
assert num_errs == 0
elif caller_type == "true":
if callee_type == "false":
assert num_errs == 0
else:
assert num_errs == 127
elif caller_type == "false":
if callee_type == "true":
assert num_errs == 127
else:
assert num_errs == 0
| 2,820 | 33.82716 | 145 |
py
|
triton
|
triton-main/python/test/unit/language/test_block_pointer.py
|
import pytest
import torch
import triton
import triton.language as tl
@triton.jit
def block_copy_kernel(a_ptr, b_ptr, N, BLOCK_SIZE: tl.constexpr, padding_option: tl.constexpr):
pid = tl.program_id(0)
# We only copy half of the data to see if the padding works
a_block_ptr = tl.make_block_ptr(base=a_ptr, shape=(N // 2, ), strides=(1, ), offsets=(pid * BLOCK_SIZE, ),
block_shape=(BLOCK_SIZE, ), order=(0, ))
b_block_ptr = tl.make_block_ptr(base=b_ptr, shape=(N, ), strides=(1, ), offsets=(pid * BLOCK_SIZE, ),
block_shape=(BLOCK_SIZE, ), order=(0, ))
a = tl.load(a_block_ptr, boundary_check=(0, ), padding_option=padding_option)
tl.store(b_block_ptr, a, boundary_check=(0, ))
@pytest.mark.parametrize("dtype_str, n, padding_option",
[(dtype_str, n, padding) for dtype_str in ("bool", "int16", "float16")
for n in (64, 128, 256, 512, 1024)
for padding in ("zero", "nan")])
def test_block_copy(dtype_str, n, padding_option):
capability = torch.cuda.get_device_capability()
if capability[0] >= 9:
pytest.skip("Hopper support is working in progress")
dtype = getattr(torch, dtype_str)
if dtype_str in ("bool", "int16"):
if padding_option == "nan":
pytest.skip("Padding with NaN is not supported for integer types")
a = torch.randint(0, 2, (n, ), device="cuda", dtype=dtype)
else:
a = torch.randn((n, ), device="cuda", dtype=dtype)
b = torch.zeros((n, ), device="cuda", dtype=dtype)
grid = lambda meta: (triton.cdiv(n, meta["BLOCK_SIZE"]),)
block_copy_kernel[grid](a_ptr=a, b_ptr=b, N=n, BLOCK_SIZE=64, padding_option=padding_option)
assert torch.all(a[0: n // 2] == b[0: n // 2])
if padding_option == "zero":
assert torch.all(b[n // 2: n] == 0)
else:
assert torch.all(torch.isnan(b[n // 2: n]))
@triton.jit
def matmul_no_scf_with_advance_kernel(
a_ptr, b_ptr, c_ptr,
M, N, K,
stride_am, stride_ak,
stride_bk, stride_bn,
stride_cm, stride_cn,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr
):
offs_m = tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
a_block_ptr = tl.make_block_ptr(base=a_ptr, shape=(M, K), strides=(stride_am, stride_ak),
offsets=(0, 0), block_shape=(BLOCK_M, BLOCK_K), order=(1, 0))
b_block_ptr = tl.make_block_ptr(base=b_ptr, shape=(K, N), strides=(stride_bk, stride_bn),
offsets=(0, 0), block_shape=(BLOCK_K, BLOCK_N), order=(1, 0))
# Below two lines are just for testing negative offsets for the `advance` API, which could be removed
a_block_ptr = tl.advance(a_block_ptr, (BLOCK_M, -BLOCK_K))
a_block_ptr = tl.advance(a_block_ptr, (-BLOCK_M, BLOCK_K))
a = tl.load(a_block_ptr, boundary_check=(1, ), padding_option="zero")
b = tl.load(b_block_ptr, boundary_check=(0, ), padding_option="zero")
c = tl.dot(a, b)
c_ptrs = c_ptr + offs_m[:, None] * stride_cm + offs_n[None, :] * stride_cn
tl.store(c_ptrs, c)
@pytest.mark.parametrize("shape, num_warps", [
(shape, num_warps)
for shape in [
[64, 64, 16],
[64, 64, 32],
[64, 64, 64],
]
for num_warps in [4, 8]
])
def test_block_ptr_matmul_no_scf(shape, num_warps):
capability = torch.cuda.get_device_capability()
if capability[0] >= 9:
pytest.skip("Hopper support is working in progress")
m, n, k = shape
a = torch.randn((m, k), device="cuda", dtype=torch.float16)
b = torch.randn((k, n), device="cuda", dtype=torch.float16)
c = torch.empty((m, n), device="cuda", dtype=torch.float32)
grid = lambda META: (1, )
matmul_no_scf_with_advance_kernel[grid](a_ptr=a, b_ptr=b, c_ptr=c,
M=m, N=n, K=k,
stride_am=a.stride(0), stride_ak=a.stride(1),
stride_bk=b.stride(0), stride_bn=b.stride(1),
stride_cm=c.stride(0), stride_cn=c.stride(1),
BLOCK_M=m, BLOCK_N=n, BLOCK_K=k,
num_warps=num_warps)
golden = torch.matmul(a, b)
torch.testing.assert_allclose(c, golden)
| 4,453 | 42.242718 | 110 |
py
|
triton
|
triton-main/python/test/unit/language/test_line_info.py
|
import subprocess
import tempfile
import pytest
import torch
import triton
import triton.language as tl
@triton.jit
def kernel_single(X,
Y,
BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
tl.store(Y + tl.arange(0, BLOCK), x)
@triton.jit
def device_inline(x):
return x + x
@triton.jit
def kernel_call(X,
Y,
BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
y = device_inline(x)
tl.store(Y + tl.arange(0, BLOCK), y)
@triton.jit(noinline=True)
def device_noinline(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
y = x + x
tl.store(Y + tl.arange(0, BLOCK), y)
@triton.jit
def kernel_call_noinline(X, Y, BLOCK: tl.constexpr):
device_noinline(X, Y, BLOCK)
@triton.jit
def kernel_multi_files(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
y = tl.softmax(x)
tl.store(Y + tl.arange(0, BLOCK), y)
def extract_file_lines(asm):
fd, path = tempfile.mkstemp()
with open(fd, 'wb') as cubin:
cubin.write(asm)
asm = subprocess.check_output(["nvdisasm", "-g", path]).decode("utf-8")
file_lines = []
lines = asm.splitlines()
for line in lines:
if "## File" in line:
entries = line[line.index("## File"):].split(",")
file_lines.append((entries[0].strip(), entries[1].strip()))
return file_lines
def check_file_lines(file_lines, file_name, lineno):
for file, line in file_lines:
# -1 means do not check line number
if lineno == -1:
if file_name in file:
return True
if file_name in file and str(lineno) in line:
return True
return False
func_types = ["single", "call", "call_noinline", "multi_files"]
@pytest.mark.parametrize("func", func_types)
def test_line_info(func: str):
try:
subprocess.check_output(["nvdisasm", "-h"])
except BaseException:
pytest.skip("nvdisasm is not available")
shape = (128, )
x = torch.arange(0, shape[0], dtype=torch.float32, device='cuda')
y = torch.zeros(shape, dtype=x.dtype, device="cuda")
kernel_info = {}
if func == "single":
kernel_info = kernel_single[(1,)](x, y, BLOCK=shape[0])
elif func == "call":
kernel_info = kernel_call[(1,)](x, y, BLOCK=shape[0])
elif func == "call_noinline":
kernel_info = kernel_call_noinline[(1,)](x, y, BLOCK=shape[0])
elif func == "multi_files":
kernel_info = kernel_multi_files[(1,)](x, y, BLOCK=shape[0])
file_lines = extract_file_lines(kernel_info.asm["cubin"])
if func == "single":
assert (check_file_lines(file_lines, "test_line_info.py", 15))
assert (check_file_lines(file_lines, "test_line_info.py", 16))
elif func == "call":
assert (check_file_lines(file_lines, "test_line_info.py", 28))
assert (check_file_lines(file_lines, "test_line_info.py", 21))
assert (check_file_lines(file_lines, "test_line_info.py", 30))
elif func == "call_noinline":
assert (check_file_lines(file_lines, "test_line_info.py", 42))
assert (check_file_lines(file_lines, "test_line_info.py", 35))
assert (check_file_lines(file_lines, "test_line_info.py", 36))
assert (check_file_lines(file_lines, "test_line_info.py", 37))
elif func == "multi_files":
assert (check_file_lines(file_lines, "test_line_info.py", 47))
assert (check_file_lines(file_lines, "test_line_info.py", 49))
assert (check_file_lines(file_lines, "standard.py", 33))
assert (check_file_lines(file_lines, "standard.py", 34))
assert (check_file_lines(file_lines, "standard.py", 36))
# core.py is changed frequently, so we only check if it exists
assert (check_file_lines(file_lines, "core.py", -1))
| 3,867 | 30.966942 | 75 |
py
|
triton
|
triton-main/python/test/unit/language/test_annotations.py
|
from __future__ import annotations
import torch
import triton
import triton.language as tl
def test_annotations(device):
@triton.jit
def _kernel(X: torch.Tensor, N: int, BLOCK_SIZE: tl.constexpr):
pass
x = torch.empty(1, device=device)
_kernel[(1,)](x, x.shape[0], 32)
try:
_kernel[(1,)](x.shape[0], x.shape[0], 32)
except AttributeError:
pass
| 399 | 17.181818 | 67 |
py
|
triton
|
triton-main/python/test/backend/test_device_backend.py
|
import functools
import hashlib
import importlib
import os
import shutil
import subprocess
import sysconfig
import tempfile
from pathlib import Path
import setuptools
import torch
import triton
import triton.language as tl
from triton.common.backend import BaseBackend, register_backend
from triton.common.build import quiet
from triton.compiler.make_launcher import make_so_cache_key
from triton.runtime.cache import get_cache_manager
from triton.runtime.driver import DriverBase
from triton.runtime.jit import version_key
def build_for_backend(name, src, srcdir):
suffix = sysconfig.get_config_var('EXT_SUFFIX')
so = os.path.join(srcdir, '{name}{suffix}'.format(name=name, suffix=suffix))
# try to avoid setuptools if possible
cc = os.environ.get("CC")
if cc is None:
# TODO: support more things here.
clang = shutil.which("clang")
gcc = shutil.which("gcc")
cc = gcc if gcc is not None else clang
if cc is None:
raise RuntimeError("Failed to find C compiler. Please specify via CC environment variable.")
# This function was renamed and made public in Python 3.10
if hasattr(sysconfig, 'get_default_scheme'):
scheme = sysconfig.get_default_scheme()
else:
scheme = sysconfig._get_default_scheme()
# 'posix_local' is a custom scheme on Debian. However, starting Python 3.10, the default install
# path changes to include 'local'. This change is required to use triton with system-wide python.
if scheme == 'posix_local':
scheme = 'posix_prefix'
py_include_dir = sysconfig.get_paths(scheme=scheme)["include"]
ret = subprocess.check_call([cc, src, f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", "-o", so])
if ret == 0:
return so
# fallback on setuptools
extra_compile_args = []
library_dirs = []
include_dirs = [srcdir]
libraries = []
# extra arguments
extra_link_args = []
# create extension module
ext = setuptools.Extension(
name=name,
language='c',
sources=[src],
include_dirs=include_dirs,
extra_compile_args=extra_compile_args + ['-O3'],
extra_link_args=extra_link_args,
library_dirs=library_dirs,
libraries=libraries,
)
# build extension module
args = ['build_ext']
args.append('--build-temp=' + srcdir)
args.append('--build-lib=' + srcdir)
args.append('-q')
args = dict(
name=name,
ext_modules=[ext],
script_args=args,
)
with quiet():
setuptools.setup(**args)
return so
class ExtensionUtils:
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(ExtensionUtils, cls).__new__(cls)
return cls.instance
def __init__(self):
dirname = os.path.dirname(os.path.realpath(__file__))
src = Path(os.path.join(dirname, "extension_backend.c")).read_text()
key = hashlib.md5(src.encode("utf-8")).hexdigest()
cache = get_cache_manager(key)
fname = "ext_utils.so"
cache_path = cache.get_file(fname)
if cache_path is None:
with tempfile.TemporaryDirectory() as tmpdir:
src_path = os.path.join(tmpdir, "main.c")
with open(src_path, "w") as f:
f.write(src)
so = build_for_backend("ext_utils", src_path, tmpdir)
with open(so, "rb") as f:
cache_path = cache.put(f.read(), fname, binary=True)
import importlib.util
spec = importlib.util.spec_from_file_location("ext_utils", cache_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
self.load_binary = mod.load_binary
self.get_device_properties = mod.get_device_properties
class ExtensionDriver(DriverBase):
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(ExtensionDriver, cls).__new__(cls)
return cls.instance
def __init__(self):
self.utils = ExtensionUtils()
class ExtensionBackend(BaseBackend):
stub_so_path = ""
def __init__(self, device_type: str) -> None:
super(ExtensionBackend, self).__init__(device_type)
self.driver = ExtensionDriver()
def add_stages(self, arch, extern_libs, stages):
filter_in_stages = ["ast", "ttir", "ttgir"]
filter_out_stages = []
for key, _ in stages.items():
if key not in filter_in_stages:
filter_out_stages.append(key)
for filter_out_key in filter_out_stages:
stages.pop(filter_out_key)
def add_meta_info(self, ir, cur_module, next_module, metadata, asm):
metadata["name"] = "extension_backend_name"
def get_driver(self):
return self.driver
def get_stream(self):
return ""
@functools.lru_cache(None)
def get_device_properties(self, device):
return self.driver.utils.get_device_properties()
def get_current_device(self):
return torch.device("cpu")
def set_current_device(self, device):
pass
def get_load_binary_fn(self):
return self.driver.utils.load_binary
def get_kernel_bin(self):
return "ttgir"
def get_architecture_descriptor(self, **kwargs):
return ""
def make_launcher_stub(self, name, signature, constants):
# name of files that are cached
so_cache_key = make_so_cache_key(version_key(), signature, constants)
so_cache_manager = get_cache_manager(so_cache_key)
so_name = f"{name}.so"
# retrieve stub from cache if it exists
cache_path = so_cache_manager.get_file(so_name)
if cache_path is None:
with tempfile.TemporaryDirectory() as tmpdir:
src = self._generate_launcher(constants, signature)
src_path = os.path.join(tmpdir, "main.c")
with open(src_path, "w") as f:
f.write(src)
so = build_for_backend(name, src_path, tmpdir)
with open(so, "rb") as f:
so_path = so_cache_manager.put(f.read(), so_name, binary=True)
type(self).stub_so_path = so_path
return so_path
else:
type(self).stub_so_path = cache_path
return cache_path
def _generate_launcher(self, constants, signature):
# generate glue code
src = """
#define __EXTENSION_BACKEND__
#include <Python.h>
#include <stdio.h>
static PyObject* launch_counter(PyObject* self, PyObject* args) {
static int64_t launch_counter = 0;
launch_counter += 1;
return PyLong_FromLong(launch_counter);
}
static PyObject* launch(PyObject* self, PyObject* args) {
if (PyErr_Occurred()) {
return NULL;
}
launch_counter(self, args);
// return None
Py_INCREF(Py_None);
return Py_None;
}
static PyMethodDef ModuleMethods[] = {
{"launch", launch, METH_VARARGS, "Entry point for all kernels with this signature"},
{"launch_counter", launch_counter, METH_VARARGS, "Entry point to get launch counter"},
{NULL, NULL, 0, NULL} // sentinel
};
static struct PyModuleDef ModuleDef = {
PyModuleDef_HEAD_INIT,
\"__triton_launcher\",
NULL, //documentation
-1, //size
ModuleMethods
};
PyMODINIT_FUNC PyInit___triton_launcher(void) {
PyObject *m = PyModule_Create(&ModuleDef);
if(m == NULL) {
return NULL;
}
PyModule_AddFunctions(m, ModuleMethods);
return m;
}
"""
return src
def test_dummy_backend():
register_backend("cpu", ExtensionBackend)
@triton.jit
def kernel(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 10
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0 + tl.zeros([XBLOCK], tl.int32)), tmp0, xmask)
inp = torch.randn(10)
out = torch.randn(10)
kernel[(10,)](inp, out, 10, XBLOCK=16)
spec = importlib.util.spec_from_file_location("__triton_launcher", ExtensionBackend.stub_so_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
launch_counter = getattr(mod, "launch_counter")
for _ in range(100):
kernel[(10,)](inp, out, 10, XBLOCK=16)
assert launch_counter() > 0
| 8,692 | 32.053232 | 110 |
py
|
triton
|
triton-main/python/test/backend/third_party_backends/conftest.py
|
# content of conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption(
"--backend", action="store", default="", help="Codegen backend"
)
@pytest.fixture
def cmdopt(request):
return request.config.getoption("--backend")
| 260 | 16.4 | 71 |
py
|
triton
|
triton-main/python/test/backend/third_party_backends/test_xpu_backend.py
|
import torch
import triton
import triton.language as tl
def test_xpu_backend(cmdopt):
if cmdopt == "xpu":
has_ipex = False
try:
# Import IPEX to provide Intel GPU runtime
import intel_extension_for_pytorch # type: ignore # noqa: F401
has_ipex = True if hasattr(torch, "xpu") else False
except Exception:
has_ipex = False
@triton.jit()
def kernel(x_ptr, y_ptr, out_ptr):
pid = tl.program_id(axis=0)
x = tl.load(x_ptr + pid)
y = tl.load(y_ptr + pid)
out = x + y
tl.store(out_ptr + pid, out)
if has_ipex:
for _ in range(1000):
x = torch.randn((65536,), device="xpu", dtype=torch.float32)
y = torch.randn((65536,), device="xpu", dtype=torch.float32)
z = torch.zeros((65536,), device="xpu", dtype=torch.float32)
kernel[(65536,)](x, y, z, num_warps=32)
assert torch.all(x + y == z)
else:
return
| 1,059 | 30.176471 | 76 |
py
|
triton
|
triton-main/python/test/regression/test_functional_regressions.py
|
import numpy as np
import pytest
import torch
from numpy.random import RandomState
import triton
import triton.language as tl
def test_chained_matmul():
# Regression test for issue #1601
def chained_matmul_reference(a, b, c):
intermediate = torch.einsum('MK,NK->MN', a, b)
return torch.einsum('MN,NK->MK', intermediate, c)
@triton.jit
def chained_matmul_kernel(
A, # shape: (m, k)
B, # shape: (n, k)
C, # shape: (n, k)
out, # shape: (m, k)
m, n, k: tl.constexpr,
block_m: tl.constexpr,
block_n: tl.constexpr,
block_k: tl.constexpr):
tl.static_assert(block_k == k,
f"expected block_k == k but got {block_k} != {k}")
block_ix = tl.program_id(0)
a_tile = (block_ix * block_m + tl.arange(0, block_m))[:, None] * block_k \
+ tl.arange(0, block_k)[None, :]
a = tl.load(A + a_tile, mask=a_tile < m * k, other=0.0)
acc = tl.zeros([block_m, block_k], dtype=tl.float32)
for loop_block_start in range(0, n, block_n):
bc_tile = (loop_block_start + tl.arange(0, block_n))[:, None] * block_k \
+ tl.arange(0, block_k)[None, :]
b = tl.load(B + bc_tile, mask=bc_tile < n * k, other=0.0)
intermediate = tl.dot(a, tl.trans(b))
intermediate_mask = ((loop_block_start + tl.arange(0, block_n)) < n)[None, :] \
* (tl.arange(0, block_m) < m)[:, None]
intermediate = tl.where(intermediate_mask, intermediate, 0.0)
c = tl.load(C + bc_tile, mask=bc_tile < n * k)
acc += tl.dot(intermediate.to(A.dtype.element_ty), c)
tl.store(out + a_tile, acc.to(A.dtype.element_ty), mask=a_tile < m * k)
m, n, k = 32, 64, 128
block_m, block_n, block_k = 16, 32, k
grid = (triton.cdiv(m, block_m),)
a = torch.randint(low=0, high=2, size=(m, k), dtype=torch.float16,
device='cuda')
b = torch.randint(low=0, high=2, size=(n, k), dtype=torch.float16,
device='cuda')
c = torch.randint_like(b, low=0, high=2)
triton_result = torch.zeros_like(a)
torch_result = chained_matmul_reference(a, b, c)
chained_matmul_kernel[grid](a, b, c, triton_result, m, n, k,
block_m=block_m, block_n=block_n,
block_k=block_k)
assert (torch_result == triton_result).all()
def test_vecmat():
@triton.jit
def batched_vecmat(
# inputs
A, # shape: [dim_m, dim_k]
B, # shape: [dim_m, dim_n, dim_k]
# dimensions
dim_m, dim_n, dim_k,
# outputs
output,
# block information
block_m: tl.constexpr, block_n: tl.constexpr, block_k: tl.constexpr
):
m_index = tl.program_id(0)
n_index = tl.program_id(1)
# Output tile
output_tile = (m_index * block_m + tl.arange(0, block_m))[:, None] * dim_n \
+ (n_index * block_n + tl.arange(0, block_n))[None, :]
vecmat = tl.zeros([block_m, block_n], dtype=A.dtype.element_ty)
k_blocks = dim_k // block_k
for k_index in range(k_blocks):
# Load A tile
a_tile = (m_index * block_m + tl.arange(0, block_m))[:, None] * dim_k \
+ (k_index * block_k + tl.arange(0, block_k))[None, :]
a = tl.load(A + a_tile)
# Load B tile, transposed to [n, m, k] in order to broadcast A on a
# leading dimension.
b_tile = (m_index * block_m + tl.arange(0, block_m))[None, :, None] * dim_n * dim_k \
+ (n_index * block_n + tl.arange(0, block_n))[:, None, None] * dim_k \
+ (k_index * block_k + tl.arange(0, block_k))[None, None, :]
b = tl.load(B + b_tile)
expanded_a, _ = tl.broadcast(a, b)
vecmat += tl.trans(tl.sum(expanded_a * b, axis=2))
tl.store(output + output_tile, vecmat)
M, N, K = 128, 128, 128
block_m, block_n, block_k = 16, 32, 64
rs = RandomState(17)
A_vec = rs.randint(0, 4, (M, K)).astype('float32')
B_vec = rs.randint(0, 4, (M, N, K)).astype('float32')
A = A_vec
B = B_vec
A_tri = torch.tensor(A, device='cuda')
B_tri = torch.tensor(B, device='cuda')
C_tri = torch.zeros((M, N), dtype=torch.float32, device='cuda')
grid = (M // block_m, N // block_n)
batched_vecmat[grid](A_tri, B_tri, M, N, K, C_tri,
block_m=block_m, block_n=block_n, block_k=block_k,
num_warps=4, num_stages=1)
A_expanded = A[:, np.newaxis, :]
A_broadcasted = np.broadcast_to(A_expanded, (M, N, K))
AB = A_broadcasted * B
C_ref = np.sum(AB, axis=2)
np.testing.assert_allclose(C_ref, C_tri.cpu().numpy(), rtol=0.01, atol=1e-3)
@pytest.mark.parametrize("type", ["pre_load", "post_load", "post_pre_mixed", "post_load_two_iters", "post_load_three_iters"])
def test_iv_dependent_matmul(type):
@triton.jit
def kernel(
a_ptr, b_ptr, c_ptr,
M, N, K,
stride_am, stride_ak,
stride_bk, stride_bn,
stride_cm, stride_cn,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
type: tl.constexpr
):
pid = tl.program_id(axis=0)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
pid_m = pid // num_pid_n
pid_n = pid % num_pid_n
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptr = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptr = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
a_ptrs = a_ptr
b_ptrs = b_ptr
if type == "post_load_two_iters":
a_ptrs_next = a_ptr + BLOCK_SIZE_K * stride_ak
b_ptrs_next = b_ptr + BLOCK_SIZE_K * stride_bk
elif type == "post_load_three_iters":
a_ptrs_next = a_ptr + BLOCK_SIZE_K * stride_ak
b_ptrs_next = b_ptr + BLOCK_SIZE_K * stride_bk
a_ptrs_next_next = a_ptr + 2 * BLOCK_SIZE_K * stride_ak
b_ptrs_next_next = b_ptr + 2 * BLOCK_SIZE_K * stride_bk
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
if type == "pre_load":
a_ptrs = a_ptr + k * BLOCK_SIZE_K * stride_ak
b_ptrs = b_ptr + k * BLOCK_SIZE_K * stride_bk
elif type == "post_pre_mixed":
a_ptrs = a_ptr + k * BLOCK_SIZE_K * stride_ak
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
accumulator += tl.dot(a, b)
if type == "post_load":
a_ptrs = a_ptr + (k + 1) * BLOCK_SIZE_K * stride_ak
b_ptrs = b_ptr + (k + 1) * BLOCK_SIZE_K * stride_bk
elif type == "post_pre_mixed":
b_ptrs = b_ptr + (k + 1) * BLOCK_SIZE_K * stride_bk
elif type == "post_load_two_iters":
a_ptrs = a_ptrs_next
b_ptrs = b_ptrs_next
a_ptrs_next = a_ptr + (k + 2) * BLOCK_SIZE_K * stride_ak
b_ptrs_next = b_ptr + (k + 2) * BLOCK_SIZE_K * stride_bk
elif type == "post_load_three_iters":
a_ptrs = a_ptrs_next
b_ptrs = b_ptrs_next
a_ptrs_next = a_ptrs_next_next
b_ptrs_next = b_ptrs_next_next
a_ptrs_next_next = a_ptr + (k + 3) * BLOCK_SIZE_K * stride_ak
b_ptrs_next_next = b_ptr + (k + 3) * BLOCK_SIZE_K * stride_bk
c = accumulator.to(tl.float16)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
M = 256
K = 256
N = 256
BLOCK_SIZE_K = 32
BLOCK_SIZE_N = 32
BLOCK_SIZE_M = 32
a = torch.rand((M, K), device='cuda')
b = torch.rand((K, N), device='cuda')
torch_output = torch.mm(a, b)
triton_output = torch.empty_like(
torch_output, device=torch_output.device)
def grid(META):
return (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),)
num_stages = 4 if type == "post_load_three_iters" else 3
kernel[grid](a, b, triton_output, M, N, K, a.stride(0), a.stride(1),
b.stride(0), b.stride(1), triton_output.stride(0), triton_output.stride(1),
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
type=type, num_stages=num_stages)
torch.testing.assert_allclose(torch_output, triton_output, rtol=1e-2, atol=1e-2)
| 9,166 | 38.683983 | 125 |
py
|
triton
|
triton-main/python/test/regression/test_performance.py
|
import subprocess
import sys
import pytest
import torch
import triton
import triton.language as tl
import triton.ops
from triton.testing import get_dram_gbps, get_max_tensorcore_tflops
DEVICE_NAME = {7: 'v100', 8: 'a100'}[torch.cuda.get_device_capability()[0]]
#######################
# Utilities
#######################
def print_perf(cur_ms, cur_util, ref_util):
# print on the same line cur_ms, cur_util and ref_util with 3 decimal places
print(f'{cur_ms:.3f} ms \t cur: {cur_util:.3f} \t ref: {ref_util:.3f} \t dif={cur_util - ref_util:.3f}', end='\t')
def nvsmi(attrs):
attrs = ','.join(attrs)
cmd = ['nvidia-smi', '-i', '0', '--query-gpu=' + attrs, '--format=csv,noheader,nounits']
out = subprocess.check_output(cmd)
ret = out.decode(sys.stdout.encoding).split(',')
ret = [int(x) for x in ret]
return ret
#######################
# Matrix Multiplication
#######################
sm_clocks = {'v100': 1350, 'a100': 1350}
mem_clocks = {'v100': 877, 'a100': 1215}
matmul_data = {
# NOTE:
'a100': {
# square
(512, 512, 512): {'float16': 0.061, 'float32': 0.097, 'int8': 0.05},
(1024, 1024, 1024): {'float16': 0.283, 'float32': 0.313, 'int8': 0.169},
(2048, 2048, 2048): {'float16': 0.618, 'float32': 0.532, 'int8': 0.34},
(8192, 8192, 8192): {'float16': 0.786, 'float32': 0.754, 'int8': 0.51},
# tall-skinny
(16, 1024, 1024): {'float16': 0.006, 'float32': 0.009, 'int8': 0.005},
(16, 4096, 4096): {'float16': 0.057, 'float32': 0.051, 'int8': 0.026},
(16, 8192, 8192): {'float16': 0.077, 'float32': 0.077, 'int8': 0.043},
(64, 1024, 1024): {'float16': 0.018, 'float32': 0.023, 'int8': 0.017},
(64, 4096, 4096): {'float16': 0.150, 'float32': 0.000, 'int8': 0.097},
(64, 8192, 8192): {'float16': 0.338, 'float32': 0.000, 'int8': 0.174},
(1024, 64, 1024): {'float16': 0.029, 'float32': 0.046, 'int8': 0.017},
(4096, 64, 4096): {'float16': 0.179, 'float32': 0.214, 'int8': 0.102},
(8192, 64, 8192): {'float16': 0.278, 'float32': 0.000, 'int8': 0.177},
# test EVEN_K==False
(8192, 8192, 8176): {'float16': 0.786, 'float32': 0.696, 'int8': 0.51},
}
}
@pytest.mark.parametrize('M, N, K, dtype_str',
[(M, N, K, dtype_str)
for M, N, K in matmul_data[DEVICE_NAME].keys()
for dtype_str in ['float16', 'float32']])
def test_matmul(M, N, K, dtype_str):
stream = torch.cuda.Stream()
torch.cuda.set_stream(stream)
if dtype_str in ['float32', 'int8'] and DEVICE_NAME != 'a100':
pytest.skip('Only test float32 & int8 on a100')
if (M, N, K) in [(64, 4096, 4096), (64, 8192, 8192), (8192, 64, 8192)] and dtype_str == 'float32':
pytest.skip('Out of shared memory in float32')
dtype = {'float16': torch.float16, 'float32': torch.float32, 'int8': torch.int8}[dtype_str]
torch.manual_seed(0)
ref_gpu_util = matmul_data[DEVICE_NAME][(M, N, K)][dtype_str]
cur_sm_clock = nvsmi(['clocks.current.sm'])[0]
max_gpu_perf = get_max_tensorcore_tflops(dtype, clock_rate=cur_sm_clock * 1e3)
if dtype == torch.int8:
a = torch.randint(-128, 127, (M, K), dtype=dtype, device='cuda')
b = torch.randint(-128, 127, (N, K), dtype=dtype, device='cuda')
b = b.t() # only test row-col layout
else:
a = torch.randn((M, K), dtype=dtype, device='cuda')
b = torch.randn((K, N), dtype=dtype, device='cuda')
fn = lambda: triton.ops.matmul(a, b)
ms = triton.testing.do_bench_cudagraph(fn)
cur_gpu_perf = 2. * M * N * K / ms * 1e-9
cur_gpu_util = cur_gpu_perf / max_gpu_perf
print_perf(ms, cur_gpu_util, ref_gpu_util)
triton.testing.assert_close(cur_gpu_util, ref_gpu_util, atol=0.02, rtol=0.01)
#######################
# Element-Wise
#######################
@triton.jit
def _add(x_ptr, y_ptr, output_ptr, n_elements,
BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
tl.store(output_ptr + offsets, output, mask=mask)
elementwise_data = {
'a100': {
1024 * 16: {'float16': 0.003, 'float32': 0.007},
1024 * 64: {'float16': 0.013, 'float32': 0.026},
1024 * 256: {'float16': 0.053, 'float32': 0.105},
1024 * 1024: {'float16': 0.212, 'float32': 0.420},
1024 * 16384: {'float16': 0.762, 'float32': 0.812},
1024 * 65536: {'float16': 0.846, 'float32': 0.869},
# Non pow 2
1020 * 100: {'float16': 0.020, 'float32': 0.041},
10003 * 7007: {'float16': 0.513, 'float32': 0.861},
}
}
@pytest.mark.parametrize('N', elementwise_data[DEVICE_NAME].keys())
@pytest.mark.parametrize("dtype_str", ['float16', 'bfloat16', 'float32'])
def test_elementwise(N, dtype_str):
stream = torch.cuda.Stream()
torch.cuda.set_stream(stream)
torch.manual_seed(0)
if dtype_str in ['bfloat16'] and DEVICE_NAME != 'a100':
pytest.skip('Only test bfloat16 on a100')
dtype = {'float16': torch.float16, 'bfloat16': torch.bfloat16, 'float32': torch.float32}[dtype_str]
ref_dtype_str = 'float16' if dtype_str == 'bfloat16' else dtype_str
ref_gpu_util = elementwise_data[DEVICE_NAME][N][ref_dtype_str]
max_gpu_perf = get_dram_gbps()
z = torch.empty((N, ), dtype=dtype, device='cuda')
x = torch.randn_like(z)
y = torch.randn_like(z)
grid = lambda args: (triton.cdiv(N, args['BLOCK_SIZE']), )
fn = lambda: _add[grid](x, y, z, N, BLOCK_SIZE=1024)
ms = triton.testing.do_bench_cudagraph(fn)
cur_gpu_perf = 3. * N * z.element_size() / ms * 1e-6
cur_gpu_util = cur_gpu_perf / max_gpu_perf
print_perf(ms, cur_gpu_util, ref_gpu_util)
triton.testing.assert_close(cur_gpu_util, ref_gpu_util, atol=0.02, rtol=0.01)
#######################
# Flash-Attention
#######################
flash_attention_data = {
"a100": {
(4, 48, 4096, 64, True, True, 'forward', 'float16'): 0.433,
(4, 48, 4096, 64, True, True, 'forward', 'bfloat16'): 0.392,
(4, 48, 1024, 16, True, True, 'forward', 'float32'): 0.106,
(4, 48, 4096, 64, True, True, 'backward', 'float16'): 0.204,
(4, 48, 4096, 64, True, True, 'backward', 'bfloat16'): 0.202,
(4, 48, 1024, 16, True, True, 'backward', 'float32'): 0.089,
(4, 48, 4096, 64, True, False, 'forward', 'float16'): 0.242,
(4, 48, 4096, 64, True, False, 'forward', 'bfloat16'): 0.220,
(4, 48, 1024, 16, True, False, 'forward', 'float32'): 0.069,
(4, 48, 4096, 64, True, False, 'backward', 'float16'): 0.136,
(4, 48, 4096, 64, True, False, 'backward', 'bfloat16'): 0.135,
(4, 48, 1024, 16, True, False, 'backward', 'float32'): 0.052,
(4, 48, 4096, 64, False, True, 'forward', 'float16'): 0.432,
(4, 48, 4096, 64, False, True, 'forward', 'bfloat16'): 0.392,
(4, 48, 1024, 16, False, True, 'forward', 'float32'): 0.107,
(4, 48, 4096, 64, False, True, 'backward', 'float16'): 0.265,
(4, 48, 4096, 64, False, True, 'backward', 'bfloat16'): 0.257,
(4, 48, 1024, 16, False, True, 'backward', 'float32'): 0.128,
(4, 48, 4096, 64, False, False, 'forward', 'float16'): 0.251,
(4, 48, 4096, 64, False, False, 'forward', 'bfloat16'): 0.220,
(4, 48, 1024, 16, False, False, 'forward', 'float32'): 0.069,
(4, 48, 4096, 64, False, False, 'backward', 'float16'): 0.159,
(4, 48, 4096, 64, False, False, 'backward', 'bfloat16'): 0.138,
(4, 48, 1024, 16, False, False, 'backward', 'float32'): 0.076,
}
}
@pytest.mark.parametrize("dtype_str", ['float16', 'bfloat16', 'float32'])
@pytest.mark.parametrize("mode", ['forward', 'backward'])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("seq_par", [True, False])
@pytest.mark.parametrize("Z, H, N_CTX, D_HEAD", [[4, 48, 4096, 64]])
def test_flash_attention(Z, H, N_CTX, D_HEAD, seq_par, causal, mode, dtype_str):
stream = torch.cuda.Stream()
torch.cuda.set_stream(stream)
is_backward = mode == 'backward'
capability = torch.cuda.get_device_capability()
if capability[0] < 8:
pytest.skip("Flash attention only supported for compute capability < 80")
torch.manual_seed(20)
dtype = {'float16': torch.float16, 'bfloat16': torch.bfloat16, 'float32': torch.float32}[dtype_str]
# init data
if dtype_str == 'float32':
N_CTX = 1024
D_HEAD = 16
q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0.1, std=0.2).requires_grad_()
k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0.4, std=0.2).requires_grad_()
v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0.3, std=0.2).requires_grad_()
sm_scale = 0.2
# benchmark
fn = lambda: triton.ops.attention(q, k, v, causal, sm_scale, seq_par)
if is_backward:
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench_cudagraph(fn)
# compute flops
flops_per_matmul = 2. * Z * H * N_CTX * N_CTX * D_HEAD * 0.5
total_flops = 2 * flops_per_matmul
if is_backward:
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
cur_gpu_perf = total_flops / ms * 1e-9
# maximum flops
cur_sm_clock = nvsmi(['clocks.current.sm'])[0]
max_gpu_perf = get_max_tensorcore_tflops(dtype, clock_rate=cur_sm_clock * 1e3)
cur_gpu_util = cur_gpu_perf / max_gpu_perf
ref_gpu_util = flash_attention_data[DEVICE_NAME][(Z, H, N_CTX, D_HEAD, seq_par, causal, mode, dtype_str)]
print_perf(ms, cur_gpu_util, ref_gpu_util)
triton.testing.assert_close(cur_gpu_util, ref_gpu_util, atol=0.02, rtol=0.01)
| 9,987 | 42.807018 | 118 |
py
|
triton
|
triton-main/python/triton/testing.py
|
import functools
import os
import subprocess
import sys
from contextlib import contextmanager
from ._C.libtriton.triton import runtime
def nvsmi(attrs):
attrs = ','.join(attrs)
cmd = ['nvidia-smi', '-i', '0', '--query-gpu=' + attrs, '--format=csv,noheader,nounits']
out = subprocess.check_output(cmd)
ret = out.decode(sys.stdout.encoding).split(',')
ret = [int(x) for x in ret]
return ret
def do_bench_cudagraph(fn, rep=20, grad_to_none=None):
import torch
"""
Benchmark the runtime of the provided function.
:param fn: Function to benchmark
:type fn: Callable
:param rep: Repetition time (in ms)
:type rep: int
:param grad_to_none: Reset the gradient of the provided tensor to None
:type grad_to_none: torch.tensor, optional
"""
if torch.cuda.current_stream() == torch.cuda.default_stream():
raise RuntimeError("Cannot capture graph in default stream. Please use side stream in benchmark code.")
# record CUDAGraph
fn()
if grad_to_none is not None:
for x in grad_to_none:
x.detach_()
x.requires_grad_(True)
x.grad = None
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
fn()
torch.cuda.synchronize()
fn = lambda: g.replay()
# Estimate the runtime of the function
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
fn()
end_event.record()
torch.cuda.synchronize()
estimate_ms = start_event.elapsed_time(end_event)
# compute number of repetition to last `rep` ms
n_repeat = max(1, int(rep / estimate_ms))
# compute number of repetition to last `rep` ms
start_event = [torch.cuda.Event(enable_timing=True) for i in range(n_repeat)]
end_event = [torch.cuda.Event(enable_timing=True) for i in range(n_repeat)]
ret = []
n_retries = 50
for _ in range(n_retries):
# Benchmark
torch.cuda.synchronize()
for i in range(n_repeat):
# we don't want `fn` to accumulate gradient values
# if it contains a backward pass. So we clear the
# provided gradients
if grad_to_none is not None:
for x in grad_to_none:
x.grad = None
# record time of `fn`
start_event[i].record()
fn()
end_event[i].record()
torch.cuda.synchronize()
times = torch.tensor([s.elapsed_time(e) for s, e in zip(start_event, end_event)])
ret.append(torch.min(times))
return torch.mean(torch.tensor(ret)).item()
def do_bench(fn, warmup=25, rep=100, grad_to_none=None,
quantiles=None,
fast_flush=True,
return_mode="mean"):
assert return_mode in ["min", "max", "mean", "median"]
import torch
"""
Benchmark the runtime of the provided function. By default, return the median runtime of :code:`fn` along with
the 20-th and 80-th performance percentile.
:param fn: Function to benchmark
:type fn: Callable
:param warmup: Warmup time (in ms)
:type warmup: int
:param rep: Repetition time (in ms)
:type rep: int
:param grad_to_none: Reset the gradient of the provided tensor to None
:type grad_to_none: torch.tensor, optional
:param quantiles: Performance percentile to return in addition to the median.
:type quantiles: list[float]
:param fast_flush: Use faster kernel to flush L2 between measurements
:type fast_flush: bool
"""
fn()
torch.cuda.synchronize()
# We maintain a buffer of 256 MB that we clear
# before each kernel call to make sure that the L2
# doesn't contain any input data before the run
if fast_flush:
cache = torch.empty(int(256e6 // 4), dtype=torch.int, device='cuda')
else:
cache = torch.empty(int(256e6), dtype=torch.int8, device='cuda')
# Estimate the runtime of the function
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(5):
cache.zero_()
fn()
end_event.record()
torch.cuda.synchronize()
estimate_ms = start_event.elapsed_time(end_event) / 5
# compute number of warmup and repeat
n_warmup = max(1, int(warmup / estimate_ms))
n_repeat = max(1, int(rep / estimate_ms))
start_event = [torch.cuda.Event(enable_timing=True) for i in range(n_repeat)]
end_event = [torch.cuda.Event(enable_timing=True) for i in range(n_repeat)]
# Warm-up
for _ in range(n_warmup):
fn()
# Benchmark
for i in range(n_repeat):
# we don't want `fn` to accumulate gradient values
# if it contains a backward pass. So we clear the
# provided gradients
if grad_to_none is not None:
for x in grad_to_none:
x.grad = None
# we clear the L2 cache before each run
cache.zero_()
# record time of `fn`
start_event[i].record()
fn()
end_event[i].record()
# Record clocks
torch.cuda.synchronize()
times = torch.tensor([s.elapsed_time(e) for s, e in zip(start_event, end_event)], dtype=torch.float)
if quantiles is not None:
ret = torch.quantile(times, torch.tensor(quantiles, dtype=torch.float)).tolist()
if len(ret) == 1:
ret = ret[0]
return ret
return getattr(torch, return_mode)(times).item()
def assert_close(x, y, atol=None, rtol=None, err_msg=''):
import numpy as np
import torch
# canonicalize arguments to be tensors
if not isinstance(x, torch.Tensor):
x = torch.tensor(x)
if not isinstance(y, torch.Tensor):
y = torch.tensor(y)
# absolute tolerance
if atol is None:
atol = 1e-2
atol = atol(x.dtype) if callable(atol) else atol
# relative tolerance hook
if rtol is None:
rtol = 0.
rtol = rtol(x.dtype) if callable(rtol) else rtol
# we use numpy instead of pytorch
# as it seems more memory efficient
# pytorch tends to oom on large tensors
if isinstance(x, torch.Tensor):
if x.dtype == torch.bfloat16:
x = x.float()
x = x.cpu().detach().numpy()
if isinstance(y, torch.Tensor):
if y.dtype == torch.bfloat16:
y = y.float()
y = y.cpu().detach().numpy()
# we handle size==1 case separately as we can
# provide better error message there
if x.size > 1 or y.size > 1:
np.testing.assert_allclose(x, y, atol=atol, rtol=rtol, equal_nan=True)
return
if not np.allclose(x, y, atol=atol, rtol=rtol):
raise AssertionError(f'{err_msg} {x} is not close to {y} (atol={atol}, rtol={rtol})')
class Benchmark:
"""
This class is used by the :code:`perf_report` function to generate line plots with a concise API.
"""
def __init__(
self,
x_names,
x_vals,
line_arg,
line_vals,
line_names,
plot_name,
args,
xlabel='',
ylabel='',
x_log=False,
y_log=False,
color=None,
styles=None,
):
"""
Constructor
:param x_names: Name of the arguments that should appear on the x axis of the plot. If the list contains more than one element, all the arguments are assumed to have the same value.
:type x_names: List[str]
:param x_vals: List of values to use for the arguments in :code:`x_names`.
:type x_vals: List[Any]
:param line_arg: Argument name for which different values correspond to different lines in the plot.
:type line_arg: str
:param line_vals: List of values to use for the arguments in :code:`line_arg`.
:type line_vals: List[str]
:param line_names: Label names for the different lines.
:type line_names: List[str]
:param plot_name: Name of the plot.
:type plot_name: str
:param args: List of arguments to remain fixed throughout the benchmark.
:type args: List[str]
:param xlabel: Label for the x axis of the plot.
:type xlabel: str, optional
:param ylabel: Label for the y axis of the plot.
:type ylabel: str, optional
:param x_log: Whether the x axis should be log scale.
:type x_log: bool, optional
:param y_log: Whether the y axis should be log scale.
:type y_log: bool, optional
"""
self.x_names = x_names
self.x_vals = x_vals
self.x_log = x_log
self.line_arg = line_arg
self.line_vals = line_vals
self.line_names = line_names
self.y_log = y_log
self.styles = styles
# plot info
self.xlabel = xlabel
self.ylabel = ylabel
self.plot_name = plot_name
self.args = args
class Mark:
def __init__(self, fn, benchmarks):
self.fn = fn
self.benchmarks = benchmarks
def _run(self, bench, save_path, show_plots, print_data):
import os
import matplotlib.pyplot as plt
import pandas as pd
y_mean = bench.line_names
y_min = [f'{x}-min' for x in bench.line_names]
y_max = [f'{x}-max' for x in bench.line_names]
df = pd.DataFrame(columns=[bench.x_names[0]] + y_mean + y_min + y_max)
for x in bench.x_vals:
x_args = {x_name: x for x_name in bench.x_names}
row_mean, row_min, row_max = [], [], []
for y in bench.line_vals:
ret = self.fn(**x_args, **{bench.line_arg: y}, **bench.args)
try:
y_mean, y_min, y_max = ret
except TypeError:
y_mean, y_min, y_max = ret, None, None
row_mean += [y_mean]
row_min += [y_min]
row_max += [y_max]
df.loc[len(df)] = [x] + row_mean + row_min + row_max
if bench.plot_name:
plt.figure()
ax = plt.subplot()
x = bench.x_names[0]
for i, y in enumerate(bench.line_names):
y_min, y_max = df[y + '-min'], df[y + '-max']
col = bench.styles[i][0] if bench.styles else None
sty = bench.styles[i][1] if bench.styles else None
ax.plot(df[x], df[y], label=y, color=col, ls=sty)
if y_min is not None and y_max is not None:
ax.fill_between(df[x], y_min, y_max, alpha=0.15, color=col)
ax.legend()
xlabel = bench.xlabel if bench.xlabel else " = ".join(bench.x_names)
ax.set_xlabel(xlabel)
ax.set_ylabel(bench.ylabel)
# ax.set_title(bench.plot_name)
ax.set_xscale("log" if bench.x_log else "linear")
ax.set_yscale("log" if bench.y_log else "linear")
if show_plots:
plt.show()
if save_path:
plt.savefig(os.path.join(save_path, f"{bench.plot_name}.png"))
df = df[[bench.x_names[0]] + bench.line_names]
if print_data:
print(bench.plot_name + ':')
print(df)
if save_path:
df.to_csv(os.path.join(save_path, f"{bench.plot_name}.csv"), float_format='%.1f', index=False)
def run(self, show_plots=False, print_data=False, save_path=''):
has_single_bench = isinstance(self.benchmarks, Benchmark)
benchmarks = [self.benchmarks] if has_single_bench else self.benchmarks
if save_path:
html = open(os.path.join(save_path, "results.html"), "w")
html.write("<html><body>\n")
for bench in benchmarks:
self._run(bench, save_path, show_plots, print_data)
if save_path:
html.write(f"<image src=\"{bench.plot_name}.png\"/>\n")
if save_path:
html.write("</body></html>\n")
def perf_report(benchmarks):
"""
Mark a function for benchmarking. The benchmark can then be executed by using the :code:`.run` method on the return value.
:param benchmarks: Benchmarking configurations.
:type benchmarks: List of :class:`Benchmark`
"""
wrapper = lambda fn: Mark(fn, benchmarks)
return wrapper
def get_dram_gbps(backend=None, device=None):
''' return DRAM bandwidth in GB/s '''
import torch
from .runtime import driver
if not backend:
backend = runtime.backend.CUDA
if not device:
device = torch.cuda.current_device()
mem_clock_khz = driver.utils.get_device_properties(device)["mem_clock_rate"] # in kHz
bus_width = driver.utils.get_device_properties(device)["mem_bus_width"]
bw_gbps = mem_clock_khz * bus_width * 2 / 1e6 / 8 # In GB/s
return bw_gbps
def get_max_tensorcore_tflops(dtype, backend=None, device=None, clock_rate=None):
import torch
from .runtime import driver
if not backend:
backend = runtime.backend.CUDA
if not device:
device = torch.cuda.current_device()
num_subcores = driver.utils.get_device_properties(device)["multiprocessor_count"] * 4
if not clock_rate:
clock_rate = driver.utils.get_device_properties(device)["sm_clock_rate"] # in kHz
capability = torch.cuda.get_device_capability(device)
if capability[0] < 8:
assert dtype == torch.float16
ops_per_sub_core = 256 # 2 4x4x4 Tensor Cores
else:
if dtype == torch.float32:
ops_per_sub_core = 256
elif dtype in [torch.float16, torch.bfloat16]:
ops_per_sub_core = 512
elif dtype == torch.int8:
ops_per_sub_core = 1024
else:
raise RuntimeError("dtype not supported")
tflops = num_subcores * clock_rate * ops_per_sub_core * 1e-9
return tflops
# create decorator that wraps test function into
# a cuda-memcheck system call
def cuda_memcheck(**target_kwargs):
def decorator(test_fn):
@functools.wraps(test_fn)
def wrapper(*args, **kwargs):
import psutil
ppid_name = psutil.Process(os.getppid()).name()
run_cuda_memcheck = target_kwargs.items() <= kwargs.items()
if run_cuda_memcheck and ppid_name != "cuda-memcheck":
path = os.path.realpath(test_fn.__globals__["__file__"])
# get path of current file
env = {"PATH": os.environ["PATH"], "PYTORCH_NO_CUDA_MEMORY_CACHING": "1"}
assert 'request' in kwargs, "memcheck'ed test must have a (possibly unused) `request` fixture"
test_id = kwargs['request'].node.callspec.id
cmd = f"{path}::{test_fn.__name__}[{test_id}]"
out = subprocess.run(["cuda-memcheck", "pytest", "-vs", cmd], capture_output=True, env=env)
assert out.returncode == 0, "cuda-memcheck returned an error: bounds checking failed"
assert "ERROR SUMMARY: 0 errors" in str(out.stdout)
else:
test_fn(*args, **kwargs)
return wrapper
return decorator
def nvsmi_attr(attrs):
attrs = ",".join(attrs)
cmd = [
"nvidia-smi",
"-i",
"0",
"--query-gpu=" + attrs,
"--format=csv,noheader,nounits",
]
out = subprocess.check_output(cmd)
ret = out.decode(sys.stdout.encoding).split(",")
ret = [int(x) for x in ret]
return ret
@contextmanager
def set_gpu_clock(ref_sm_clock=1350, ref_mem_clock=1215):
try:
subprocess.check_output(["nvidia-smi", "-i", "0", "-pm", "1"])
subprocess.check_output(
[
"nvidia-smi",
"-i",
"0",
f"--lock-gpu-clocks={ref_sm_clock},{ref_sm_clock}",
]
)
subprocess.check_output(
[
"nvidia-smi",
"-i",
"0",
f"--lock-memory-clocks={ref_mem_clock},{ref_mem_clock}",
]
)
cur_sm_clock = nvsmi_attr(["clocks.current.sm"])[0]
cur_mem_clock = nvsmi_attr(["clocks.current.memory"])[0]
assert abs(cur_sm_clock - ref_sm_clock) < 10, f"GPU SMs must run at {ref_sm_clock} MHz"
assert abs(cur_mem_clock - ref_mem_clock) < 10, f"GPU SMs must run at {ref_mem_clock} MHz"
tflops = 1e-6 * 2 * 108 * 4 * 256 * ref_sm_clock
gbps = 640 * 2 * ref_mem_clock * 1e-3
yield tflops, gbps
finally:
subprocess.check_output(["nvidia-smi", "-i", "0", "-pm", "0"])
subprocess.check_output(["nvidia-smi", "-i", "0", "-rgc"])
subprocess.check_output(["nvidia-smi", "-i", "0", "-rmc"])
def get_max_simd_tflops(dtype, backend=None, device=None):
import torch
from .runtime import driver
if not backend:
backend = runtime.backend.CUDA
if not device:
device = torch.cuda.current_device()
num_subcores = driver.utils.get_device_properties(device)["multiprocessor_count"] * 4
clock_rate = driver.utils.get_device_properties(device)["sm_clock_rate"] # in kHz
capability = torch.cuda.get_device_capability()
if capability[0] < 8:
if dtype == torch.float32:
ops_per_sub_core = 32 # 2*16
elif dtype == torch.float16:
ops_per_sub_core = 64
else:
raise RuntimeError("dtype not supported")
else:
if dtype == torch.float32:
ops_per_sub_core = 32
elif dtype in [torch.float16, torch.bfloat16]:
ops_per_sub_core = 64
else:
raise RuntimeError("dtype not supported")
tflops = num_subcores * clock_rate * ops_per_sub_core * 1e-9
return tflops
| 17,704 | 35.505155 | 189 |
py
|
triton
|
triton-main/python/triton/__init__.py
|
"""isort:skip_file"""
__version__ = '2.1.0'
# ---------------------------------------
# Note: import order is significant here.
# submodules
from .runtime import (
autotune,
Config,
heuristics,
JITFunction,
KernelInterface,
reinterpret,
TensorWrapper,
OutOfResources,
MockTensor,
)
from .runtime.jit import jit
from .compiler import compile, CompilationError
from . import language
from . import testing
__all__ = [
"autotune",
"cdiv",
"CompilationError",
"compile",
"Config",
"heuristics",
"impl",
"jit",
"JITFunction",
"KernelInterface",
"language",
"MockTensor",
"next_power_of_2",
"ops",
"OutOfResources",
"reinterpret",
"runtime",
"TensorWrapper",
"testing",
"tools",
]
# -------------------------------------
# misc. utilities that don't fit well
# into any specific module
# -------------------------------------
def cdiv(x: int, y: int):
return (x + y - 1) // y
def next_power_of_2(n: int):
"""Return the smallest power of 2 greater than or equal to n"""
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n |= n >> 32
n += 1
return n
| 1,230 | 16.84058 | 67 |
py
|
triton
|
triton-main/python/triton/tools/disasm.py
|
# MIT License
# Copyright (c) 2020 Da Yan @ HKUST
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import subprocess
FLINE_RE = re.compile(r'\s*/\*\w{4}\*/\s*([^;]*;)\s*/\* 0x(\w{16}) \*/\s*')
SLINE_RE = re.compile(r'\s*/\* 0x(\w{16}) \*/\s*')
FNAME_RE = re.compile(r'\s*Function : (\w+)\s*')
BRA_RE = re.compile(r'(.*BRA(?:\.U)? )(0x\w+);')
def parseCtrl(sline):
enc = int(SLINE_RE.match(sline).group(1), 16)
stall = (enc >> 41) & 0xf
yld = (enc >> 45) & 0x1
wrtdb = (enc >> 46) & 0x7
readb = (enc >> 49) & 0x7
watdb = (enc >> 52) & 0x3f
yld_str = 'Y' if yld == 0 else '-'
wrtdb_str = '-' if wrtdb == 7 else str(wrtdb)
readb_str = '-' if readb == 7 else str(readb)
watdb_str = '--' if watdb == 0 else f'{watdb:02d}'
return f'{watdb_str}:{readb_str}:{wrtdb_str}:{yld_str}:{stall:x}'
def processSassLines(fline, sline, labels):
asm = FLINE_RE.match(fline).group(1)
# Remove tailing space
if asm.endswith(" ;"):
asm = asm[:-2] + ";"
ctrl = parseCtrl(sline)
# BRA target address
if BRA_RE.match(asm) is not None:
target = int(BRA_RE.match(asm).group(2), 16)
if target in labels:
pass
else:
labels[target] = len(labels)
return (f'{ctrl}', f'{asm}')
def extract(file_path, fun):
if fun is None:
sass_str = subprocess.check_output(["cuobjdump", "-sass", file_path])
else:
sass_str = subprocess.check_output(["cuobjdump", "-fun", fun, "-sass", file_path])
sass_lines = sass_str.splitlines()
line_idx = 0
while line_idx < len(sass_lines):
line = sass_lines[line_idx].decode()
# format:
# function : <function_name>
# .headerflags: ...
# /*0000*/ asmstr /*0x...*/
# /*0x...*/
# Looking for new function header (function: <name>)
while FNAME_RE.match(line) is None:
line_idx += 1
if line_idx < len(sass_lines):
line = sass_lines[line_idx].decode()
else:
return
fname = FNAME_RE.match(line).group(1)
ret = ''
ret += f'Function:{fname}\n'
line_idx += 2 # bypass .headerflags
line = sass_lines[line_idx].decode()
# Remapping address to label
labels = {} # address -> label_idx
# store sass asm in buffer and them print them (for labels)
# (ctrl, asm)
asm_buffer = []
while FLINE_RE.match(line) is not None:
# First line (Offset ASM Encoding)
fline = sass_lines[line_idx].decode()
line_idx += 1
# Second line (Encoding)
sline = sass_lines[line_idx].decode()
line_idx += 1
asm_buffer.append(processSassLines(fline, sline, labels))
# peek the next line
line = sass_lines[line_idx].decode()
# Print sass
# label naming convention: LBB#i
for idx, (ctrl, asm) in enumerate(asm_buffer):
# Print label if this is BRA target
offset = idx * 16
if offset in labels:
label_name = f'LBB{labels[offset]}'
ret += f'{label_name}:\n'
ret += ctrl + '\t'
# if this is BRA, remap offset to label
if BRA_RE.match(asm):
target = int(BRA_RE.match(asm).group(2), 16)
target_name = f'LBB{labels[target]}'
asm = BRA_RE.sub(rf'\1{target_name};', asm)
ret += asm + '\n'
ret += '\n'
return ret
| 4,593 | 36.349593 | 90 |
py
|
triton
|
triton-main/python/triton/tools/link.py
|
from collections import defaultdict
from pathlib import Path
from typing import Sequence, Union
from dataclasses import dataclass
def _exists(x):
return x is not None
class LinkerError(Exception):
pass
@dataclass
class KernelLinkerMeta:
arg_names: Sequence[str]
arg_ctypes: Sequence[str]
sizes: Sequence[Union[int, None]]
sig_hash: str
suffix: str
num_specs: int
""" number of specialized arguments """
class HeaderParser:
def __init__(self) -> None:
import re
# [kernel_name, c signature]
self.linker_directives = re.compile("//[\\s]*tt-linker:[\\s]*([\\w]+):(.+)")
# [name, suffix]
self.kernel_name = re.compile("^([\\w]+)_([\\w]+)_([\\w]+)$")
# [(argnum, d|c)]
self.kernel_suffix = re.compile("([0-9]+)([c,d])")
# [(type, name)]
self.c_sig = re.compile("[\\s]*(\\w+)\\s(\\w+)[,]?")
self.kernels = defaultdict(list)
def extract_linker_meta(self, header: str):
for ln in header.splitlines():
if ln.startswith("//"):
m = self.linker_directives.match(ln)
if _exists(m):
ker_name, c_sig = m.group(1), m.group(2)
name, sig_hash, suffix = self._match_name(ker_name)
c_types, arg_names = self._match_c_sig(c_sig)
num_specs, sizes = self._match_suffix(suffix)
self._add_kernel(
name,
KernelLinkerMeta(
arg_names=arg_names,
arg_ctypes=c_types,
sizes=sizes,
sig_hash=sig_hash,
suffix=suffix,
num_specs=num_specs,
),
)
def _match_name(self, ker_name: str):
m = self.kernel_name.match(ker_name)
if _exists(m):
name, sig_hash, suffix = m.group(1), m.group(2), m.group(3)
return name, sig_hash, suffix
raise LinkerError(f"{ker_name} is not a valid kernel name")
def _match_c_sig(self, c_sig: str):
m = self.c_sig.findall(c_sig)
if len(m):
tys, args = [], []
for (ty, arg_name) in m:
tys.append(ty)
args.append(arg_name)
return tys, args
raise LinkerError(f"{c_sig} is not a valid argument signature")
def _match_suffix(self, suffix: str):
m = self.kernel_suffix.findall(suffix)
if not len(m):
raise LinkerError(f"{suffix} is not a valid kernel suffix")
sizes = []
num_specs = len(m)
s2i = {"c": 1, "d": 16}
for (argnum, arg_size_ann) in m:
while len(sizes) < int(argnum):
sizes.append(None)
sizes.append(s2i[arg_size_ann])
return num_specs, sizes
def _add_kernel(self, name: str, ker: KernelLinkerMeta):
if name in self.kernels:
last: KernelLinkerMeta = self.kernels[name][-1]
for (cur, new_) in zip(last.arg_ctypes, ker.arg_ctypes):
if cur != new_:
raise LinkerError(
f"Mismatched signature for kernel {name}: \n\texisting sig is: {','.join(last.arg_ctypes)}\n\tcurrent is: {','.join(ker.arg_ctypes)}"
)
self.kernels[name].append(ker)
def gen_signature(m):
return ", ".join([f"{ty} {arg}" for ty, arg in zip(m.arg_ctypes, m.arg_names)])
def make_decls(name: str, metas: Sequence[KernelLinkerMeta]) -> str:
return f"""
CUresult {name}(CUstream stream, unsigned int gX, unsigned int gY, unsigned int gZ, {gen_signature(metas[-1])});
void load_{name}();
void unload_{name}();
"""
def make_kernel_dispatcher(name: str, metas: Sequence[KernelLinkerMeta]) -> str:
src = f"// launcher for: {name}\n"
for meta in sorted(metas, key=lambda m: -m.num_specs):
src += f"CUresult {name}_{meta.sig_hash}_{meta.suffix}(CUstream stream, unsigned int gX, unsigned int gY, unsigned int gZ, {gen_signature(meta)});\n"
src += "\n"
src += f"CUresult {name}(CUstream stream, unsigned int gX, unsigned int gY, unsigned int gZ, {gen_signature(metas[-1])}){{"
src += "\n"
for meta in sorted(metas, key=lambda m: -m.num_specs):
cond_fn = lambda val, hint: f"({val} % {hint} == 0)" if hint == 16 else f"({val} == {hint})" if hint == 1 else None
conds = " && ".join([cond_fn(val, hint) for val, hint in zip(meta.arg_names, meta.sizes) if hint is not None])
src += f" if ({conds})\n"
src += f" return {name}_{meta.sig_hash}_{meta.suffix}(stream, gX, gY, gZ, {', '.join(meta.arg_names)});\n"
src += "}\n"
for mode in ["load", "unload"]:
src += f"\n// {mode} for: {name}\n"
for meta in sorted(metas, key=lambda m: -m.num_specs):
src += f"void {mode}_{name}_{meta.sig_hash}_{meta.suffix}();\n"
src += f"void {mode}_{name}() {{"
src += "\n"
for meta in sorted(metas, key=lambda m: -m.num_specs):
src += f" {mode}_{name}_{meta.sig_hash}_{meta.suffix}();\n"
src += "}\n"
return src
desc = """
Triton ahead-of-time linker:
This program takes in header files generated by compile.py, and generates a
single entry-point responsible for dispatching the user's input to the right
kernel given the specializations that were compiled.
Example usage:
python link.py /path/to/headers/*.h -o kernel_name
"""
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description=desc)
parser.add_argument(
"headers",
nargs="+",
help="Paths to header files to link. Must include linker directive annotations (autogenerated by ttc)",
)
parser.add_argument("--out", "-o", type=Path, help="Out filename")
parser.add_argument("--prefix", type=str, default="", help="String to prefix kernel dispatcher names")
args = parser.parse_args()
# metadata
parser = HeaderParser()
includes = []
for header in args.headers:
h_path = Path(header)
h_str = h_path.read_text()
includes.append(h_path.name)
parser.extract_linker_meta(h_str)
# generate headers
decls = [make_decls(name, meta) for name, meta in parser.kernels.items()]
with args.out.with_suffix(".h").open("w") as fp:
fp.write("#include <cuda.h>\n" + "\n".join(decls))
# generate source
defs = [make_kernel_dispatcher(name, meta) for name, meta in parser.kernels.items()]
with args.out.with_suffix(".c").open("w") as fp:
out = ""
out += "#include <cuda.h>\n"
out += "#include <stdint.h>\n"
out += "\n"
out += "\n".join(defs)
fp.write(out)
| 6,857 | 34.169231 | 157 |
py
|
triton
|
triton-main/python/triton/tools/compile.py
|
import binascii
import hashlib
import importlib.util
import sys
from argparse import ArgumentParser
from pathlib import Path
from typing import List
import triton
from triton.compiler.code_generator import kernel_suffix
from triton.compiler.make_launcher import ty_to_cpp
desc = """
Triton ahead-of-time compiler:
This program compiles the kernel with name `kernel-name` in the file at the
provided `path` into self-contained C source-code that embeds the `cubin`
data along with utilities to load, unload and launch the kernel.
signature is provided as a list of (optionally divisibility-hinted) types
or constexpr values, e.g.
`compile.py --kernel-name kernel --signature "*f32:16, i32:16, 1024, i32" --out-name kernel /path/to/kernel.py`
will compile triton.JITFunction of name `kernel` inside the file `/path/to/kernel.py`.
Said kernel will be specialized such that argument 0, 1 are assumed to be multiple of 16,
and argument 2 is assumed to be a compile-time constant of value 1024, i.e. it won't be part of the generated prototype.
The resulting entry point will have signature
CUresult kernel_{specialization_suffix}(CUstream stream, unsigned gX, unsigned gY, unsigned gZ, float* arg0, int32_t arg1, int32_t arg2)
Different such specialized entry points can be combined using the `linker.py` script.
NOTE: when resolving the scope of /path/to/kernel.py, the file will be executed from within its parent directory with the python interpreter
used to run this `compile.py` script
"""
if __name__ == "__main__":
# command-line arguments
parser = ArgumentParser(description=desc)
parser.add_argument("path", help="Path to Python source containing desired kernel in its scope. File will be executed.")
parser.add_argument("--kernel-name", "-n", type=str, default="", help="Name of the kernel to compile", required=True)
parser.add_argument("--num-warps", "-w", type=int, default=1, help="Number of warps to launch the kernel")
parser.add_argument("--out-name", "-on", type=str, default=None, help="Out name for the compiled kernel")
parser.add_argument("--out-path", "-o", type=Path, default=None, help="Out filename")
parser.add_argument("--signature", "-s", type=str, help="Signature of the kernel", required=True)
args = parser.parse_args()
out_name = args.out_name if args.out_name else args.kernel_name
out_path = args.out_path if args.out_path else out_name
# execute python sources and extract functions wrapped in JITFunction
arg_path = Path(args.path)
sys.path.insert(0, str(arg_path.parent))
spec = importlib.util.spec_from_file_location(arg_path.stem, arg_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
kernel = getattr(mod, args.kernel_name)
# validate and parse signature
signature = list(map(lambda s: s.strip(" "), args.signature.split(",")))
def hash_signature(signature: List[str]):
m = hashlib.sha256()
m.update(" ".join(signature).encode())
return m.hexdigest()[:8]
sig_hash = hash_signature(signature)
def constexpr(s):
try:
ret = int(s)
return ret
except ValueError:
pass
try:
ret = float(s)
return ret
except ValueError:
pass
return None
hints = {i: constexpr(s.split(":")[1]) for i, s in enumerate(signature) if ":" in s}
hints = {k: v for k, v in hints.items() if v is not None}
constexprs = {i: constexpr(s) for i, s in enumerate(signature)}
constexprs = {k: v for k, v in constexprs.items() if v is not None}
signature = {i: s.split(":")[0] for i, s in enumerate(signature) if i not in constexprs}
# compile ast into cubin
for h in hints.values():
assert h in [1, 16], f"Only 1 and 16 are valid hints, got {h}"
divisible_by_16 = [i for i, h in hints.items() if h == 16]
equal_to_1 = [i for i, h in hints.items() if h == 1]
config = triton.compiler.instance_descriptor(divisible_by_16=divisible_by_16, equal_to_1=equal_to_1)
ccinfo = triton.compile(kernel, signature=signature, constants=constexprs, configs=[config], num_warps=args.num_warps)
arg_names = [kernel.arg_names[i] for i in signature.keys()]
# dump C stub code
suffix = kernel_suffix(signature.values(), config)
func_name = '_'.join([out_name, sig_hash, suffix])
triton_kernel_name = '_'.join([args.kernel_name, suffix])
hex_ = str(binascii.hexlify(ccinfo.asm["cubin"]))[2:-1]
params = {
"kernel_name": func_name,
"triton_kernel_name": triton_kernel_name,
"bin_size": len(hex_),
"bin_data": ", ".join([f"0x{x}{y}" for x, y in zip(hex_[::2], hex_[1::2])]),
"signature": ", ".join([f"{ty_to_cpp(ty)} {name}" for name, ty in zip(arg_names, signature.values())]),
"arg_pointers": ", ".join([f"&{arg}" for arg in arg_names]),
"num_args": len(arg_names),
"kernel_docstring": "",
"shared": ccinfo.shared,
"num_warps": args.num_warps,
"_placeholder": "",
}
for ext in ['h', 'c']:
template_path = Path(__file__).parent / f"compile.{ext}"
with out_path.with_suffix(f".{sig_hash}_{suffix}.{ext}").open("w") as fp:
fp.write(Path(template_path).read_text().format(**params))
| 5,337 | 42.754098 | 140 |
py
|
triton
|
triton-main/python/triton/tools/build_extern.py
|
import argparse
import subprocess
from abc import ABC, abstractmethod
from typing import Dict, List, Optional
class Symbol:
_name: str
_op_name: str
_ret_type: str
_arg_names: List[str]
_arg_types: List[str]
def __init__(
self,
name: str,
op_name: str,
ret_type: str,
arg_names: List[str],
arg_types: List[str],
) -> None:
'''
A symbol is a function declaration.
:param name: name of the symbol
:param op_name: name of the operation
:param ret_type: return type of the operation
:param arg_names: names of the arguments
:param arg_types: types of the arguments
'''
self._name = name
self._op_name = op_name
self._ret_type = ret_type
self._arg_names = list(arg_names)
self._arg_types = list(arg_types)
@property
def name(self) -> str:
return self._name
@property
def op_name(self) -> str:
return self._op_name
@property
def ret_type(self) -> str:
return self._ret_type
@property
def arg_names(self) -> List[str]:
return self._arg_names
@property
def arg_types(self) -> List[str]:
return self._arg_types
def convert_type(type_str) -> Optional[str]:
if type_str == "i32":
return "int32"
elif type_str == "u32":
return "uint32"
elif type_str == "i64":
return "int64"
elif type_str == "u64":
return "uint64"
elif type_str == "float":
return "fp32"
elif type_str == "double":
return "fp64"
else:
# ignore other types, such as pointer types
return None
def to_unsigned(type_str) -> str:
if type_str == "int32":
return "uint32"
elif type_str == "int64":
return "uint64"
else:
return type_str
class ExternLibrary(ABC):
_name: str
_path: str
_symbols: Dict[str, Symbol]
_format: bool
_grouping: bool
def __init__(
self,
name: str,
path: str,
format: bool = True,
grouping: bool = True,
) -> None:
'''
Abstract class for extern library.
:param name: name of the library
:param path: path of the library
:param format: whether to format the generated stub file
'''
self._name = name
self._path = path
self._symbols = {}
self._format = format
self._grouping = grouping
@property
def name(self) -> str:
return self._name
@property
def path(self) -> str:
return self._path
@property
def symbols(self) -> Dict[str, Symbol]:
return self._symbols
@property
def grouping(self) -> bool:
return self._grouping
@abstractmethod
def parse_symbols(self, input_file) -> None:
pass
@abstractmethod
def _output_stubs(self) -> str:
pass
def generate_stub_file(self, output_dir) -> None:
file_str = self._output_stubs()
if file_str is None or len(file_str) == 0:
raise Exception("file_str is empty")
output_file = f"{output_dir}/{self._name}.py"
with open(output_file, "w") as f:
f.write(file_str)
f.close()
if self._format:
subprocess.Popen(["autopep8", "-a", "-r", "-i", output_file],
stdout=subprocess.PIPE).communicate()
subprocess.Popen(["isort", output_file], stdout=subprocess.PIPE).communicate()
class Libdevice(ExternLibrary):
_symbol_groups: Dict[str, List[Symbol]]
def __init__(self, path) -> None:
'''
Constructor for Libdevice.
:param path: path of the libdevice library
'''
super().__init__("libdevice", path)
self._symbol_groups = {}
self.is_pure = True
@staticmethod
def _extract_symbol(line) -> Optional[Symbol]:
# Extract symbols from line in the following format:
# "define [internal] <ret_type> @<name>(<arg_types>,)"
entries = line.split("@")
ret_str = entries[0]
func_str = entries[1]
# Get ret_type, skip internal symbols
ret_strs = ret_str.split()
if ret_strs[1] == "internal":
return None
ret_type = convert_type(ret_strs[1])
if ret_type is None:
return None
# Get function name
func_strs = func_str.split("(")
func_name = func_strs[0].replace("@", "")
op_name = func_name.replace("__nv_", "")
if 'ieee' in op_name:
return None
# Get arg_types
arg_strs = func_strs[1].split(",")
arg_types = []
arg_names = []
for i, arg_str in enumerate(arg_strs):
arg_type = convert_type(arg_str.split()[0])
if arg_type is None:
return None
arg_name = 'arg' + str(i)
arg_types.append(arg_type)
arg_names.append(arg_name)
if op_name == "sad":
# Special case for sad, where the last argument is an unsigned int
arg_types[-1] = to_unsigned(arg_types[-1])
elif op_name.startswith("u"):
# LLVM does not differentiate between signed and unsigned integer type.
# We have to convert the types to unsigned
ret_type = to_unsigned(ret_type)
for i, arg_type in enumerate(arg_types):
arg_types[i] = to_unsigned(arg_type)
return Symbol(func_name, op_name, ret_type, arg_names, arg_types)
def _group_symbols(self) -> None:
symbol_set = {}
for symbol in self._symbols.values():
op_name = symbol.op_name
symbol_set[op_name] = symbol
# Group functions together by renaming.
renaming = {
'llabs': 'abs', 'acosf': 'acos', 'acoshf': 'acosh',
'dadd_rd': 'add_rd', 'fadd_rd': 'add_rd', 'dadd_rn': 'add_rn',
'fadd_rn': 'add_rn', 'dadd_ru': 'add_ru', 'fadd_ru': 'add_ru',
'dadd_rz': 'add_rz', 'fadd_rz': 'add_rz', 'asinf': 'asin',
'asinhf': 'asinh', 'atanf': 'atan', 'atan2f': 'atan2',
'atanhf': 'atanh', 'brevll': 'brev', 'cbrtf': 'cbrt',
'ceilf': 'ceil', 'clzll': 'clz', 'copysignf': 'copysign',
'cosf': 'cos', 'coshf': 'cosh', 'cospif': 'cospi',
'cyl_bessel_i0f': 'cyl_bessel_i0', 'cyl_bessel_i1f': 'cyl_bessel_i1',
'fdiv_rd': 'div_rd', 'ddiv_rd': 'div_rd', 'fdiv_rn': 'div_rn',
'ddiv_rn': 'div_rn', 'fdiv_ru': 'div_ru', 'ddiv_ru': 'div_ru',
'fdiv_rz': 'div_rz', 'ddiv_rz': 'div_rz', 'erff': 'erf',
'erfcf': 'erfc', 'erfcinvf': 'erfcinv', 'erfcxf': 'erfcx',
'erfinvf': 'erfinv', 'expf': 'exp', 'exp10f': 'exp10',
'exp2f': 'exp2', 'expm1f': 'expm1', 'fabsf': 'abs',
'fabs': 'abs', 'fast_fdividef': 'fast_dividef',
'fdimf': 'fdim', 'ffsll': 'ffs', 'floorf': 'floor',
'fmaf': 'fma', 'fmaf_rd': 'fma_rd', 'fmaf_rn': 'fma_rn',
'fmaf_ru': 'fma_ru', 'fmaf_rz': 'fma_rz', 'fmodf': 'fmod',
'uhadd': 'hadd', 'hypotf': 'hypot', 'ilogbf': 'ilogb',
'isinff': 'isinf', 'isinfd': 'isinf', 'isnanf': 'isnan',
'isnand': 'isnan', 'j0f': 'j0', 'j1f': 'j1', 'jnf': 'jn',
'ldexpf': 'ldexp', 'lgammaf': 'lgamma', 'llrintf': 'llrint',
'llroundf': 'llround', 'logf': 'log', 'log10f': 'log10',
'log1pf': 'log1p', 'log2f': 'log2', 'logbf': 'logb',
'umax': 'max', 'llmax': 'max', 'ullmax': 'max', 'fmaxf': 'max',
'fmax': 'max', 'umin': 'min', 'llmin': 'min', 'ullmin': 'min',
'fminf': 'min', 'fmin': 'min', 'dmul_rd': 'mul_rd', 'fmul_rd': 'mul_rd',
'dmul_rn': 'mul_rn', 'fmul_rn': 'mul_rn', 'dmul_ru': 'mul_ru',
'fmul_ru': 'mul_ru', 'dmul_rz': 'mul_rz', 'fmul_rz': 'mul_rz',
'umul24': 'mul24', 'umulhi': 'mulhi', 'mul64hi': 'mulhi',
'umul64hi': 'mulhi', 'nearbyintf': 'nearbyint', 'nextafterf': 'nextafter',
'norm3df': 'norm3d', 'norm4df': 'norm4d', 'normcdff': 'normcdf',
'normcdfinvf': 'normcdfinv', 'popcll': 'popc', 'powif': 'pow', 'powi': 'pow',
'powf': 'pow', 'rcbrtf': 'rcbrt', 'frcp_rd': 'rcp_rd', 'drcp_rd': 'rcp_rd',
'frcp_rn': 'rcp_rn', 'drcp_rn': 'rcp_rn', 'frcp_ru': 'rcp_ru',
'drcp_ru': 'rcp_ru', 'frcp_rz': 'rcp_rz', 'drcp_rz': 'rcp_rz',
'remainderf': 'remainder', 'urhadd': 'rhadd', 'rhypotf': 'rhypot',
'rintf': 'rint', 'rnorm3df': 'rnorm3d', 'rnorm4df': 'rnorm4d',
'roundf': 'round', 'rsqrtf': 'rsqrt', 'frsqrt_rn': 'rsqrt_rn',
'usad': 'sad', 'scalbnf': 'scalbn', 'signbitf': 'signbit',
'signbitd': 'signbit', 'sinf': 'sin', 'sinhf': 'sinh',
'sinpif': 'sinpi', 'sqrtf': 'sqrt', 'fsqrt_rd': 'sqrt_rd',
'dsqrt_rd': 'sqrt_rd', 'fsqrt_rn': 'sqrt_rn', 'dsqrt_rn': 'sqrt_rn',
'fsqrt_ru': 'sqrt_ru', 'dsqrt_ru': 'sqrt_ru', 'fsqrt_rz': 'sqrt_rz',
'dsqrt_rz': 'sqrt_rz', 'fsub_rd': 'sub_rd', 'dsub_rd': 'sub_rd',
'fsub_rn': 'sub_rn', 'dsub_rn': 'sub_rn', 'fsub_ru': 'sub_ru',
'dsub_ru': 'sub_ru', 'fsub_rz': 'sub_rz', 'dsub_rz': 'sub_rz',
'tanf': 'tan', 'tanhf': 'tanh', 'tgammaf': 'tgamma', 'truncf': 'trunc',
'y0f': 'y0', 'y1f': 'y1', 'ynf': 'yn'
}
for symbol in self._symbols.values():
op_name = symbol.op_name
if op_name in renaming:
op_name = renaming[op_name]
symbol._op_name = op_name
if op_name in self._symbol_groups:
self._symbol_groups[op_name].append(symbol)
else:
self._symbol_groups[op_name] = [symbol]
def parse_symbols(self, input_file) -> None:
if len(self.symbols) > 0:
return
output = subprocess.check_output(["grep", "define", input_file]).decode().splitlines()
for line in output:
symbol = self._extract_symbol(line)
if symbol is None:
continue
self._symbols[symbol.name] = symbol
self._group_symbols()
def _output_stubs(self) -> str:
# Generate python functions in the following format:
# @extern.extern
# def <op_name>(<args>, _builder=None):
# arg_type_symbol_dict = {[arg_type]: {(symbol, ret_type)}}
# return core.extern_elementwise("libdevice", <path>, <args>, <arg_type_symbol_dict>, _builder)
import_str = "from . import core\n"
import_str += "import os\n"
import_str += "import functools\n"
header_str = ""
header_str += "@functools.lru_cache()\n"
header_str += "def libdevice_path():\n"
header_str += " import torch\n"
header_str += " third_party_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"third_party\")\n"
header_str += " if torch.version.hip is None:\n"
header_str += " default = os.path.join(third_party_dir, \"cuda\", \"lib\", \"libdevice.10.bc\")\n"
header_str += " else:\n"
header_str += " default = ''\n"
header_str += " return os.getenv(\"TRITON_LIBDEVICE_PATH\", default)\n"
func_str = ""
for symbols in self._symbol_groups.values():
func_str += "@core.extern\n"
func_name_str = f"def {symbols[0].op_name}("
for arg_name in symbols[0].arg_names:
func_name_str += f"{arg_name}, "
func_name_str += "_builder=None):\n"
return_str = f"\treturn core.extern_elementwise(\"{self._name}\", libdevice_path(), ["
for arg_name in symbols[0].arg_names:
return_str += f"{arg_name}, "
return_str += "], \n"
arg_type_symbol_dict_str = "{"
for symbol in symbols:
arg_type_symbol_dict_str += "("
for arg_type in symbol.arg_types:
arg_type_symbol_dict_str += f'core.dtype("{arg_type}"),'
ret_type = f'core.dtype("{symbol.ret_type}")'
arg_type_symbol_dict_str += "): (\"" + symbol.name + "\", " + ret_type + "),\n"
arg_type_symbol_dict_str += "}"
return_str += arg_type_symbol_dict_str
return_str += f", is_pure={self.is_pure}"
return_str += ", _builder=_builder)\n"
func_str += func_name_str + return_str + "\n"
file_str = import_str + header_str + func_str
return file_str
class LLVMDisassembler:
_path: str
_ll_file: str
def __init__(self, path) -> None:
'''
Invoke llvm-dis to disassemble the given file.
:param path: path to llvm-dis
'''
self._path = path
self._ll_file = "/tmp/extern_lib.ll"
def disasm(self, lib_path: str) -> None:
subprocess.Popen([self._path, lib_path, "-o", self.ll_file],
stdout=subprocess.PIPE).communicate()
@property
def ll_file(self) -> str:
return self._ll_file
@property
def path(self) -> str:
return self._path
extern_libs = ["libdevice"]
def build(
llvm_dis_path: str,
lib_path: str,
lib_name: str,
output_dir: str,
) -> None:
'''
Interface function to build the library file.
:param llvm_dis_path: path to the llvm-dis binary
:param lib_path: path to the external library file
:param lib_name: name of the library
:param output_dir: path to the output directory
'''
if lib_name == "libdevice":
extern_lib = Libdevice(lib_path)
else:
raise Exception(f"Unknown extern library: {lib_name}")
llvm_disassembler = LLVMDisassembler(llvm_dis_path)
llvm_disassembler.disasm(lib_path)
extern_lib.parse_symbols(llvm_disassembler.ll_file)
extern_lib.generate_stub_file(output_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--llvm-dis", dest="llvm_dis_path", help="Path to llvm-dis", default="llvm-dis")
parser.add_argument("--lib-path", dest="lib_path", help="Path to the extern library")
parser.add_argument("--lib-name", dest="lib_name", help="Name of the extern library")
parser.add_argument("--output", dest="output_dir", help="Output file path", default="/tmp/")
args = parser.parse_args()
build(args.llvm_dis_path, args.lib_path, args.lib_name, args.output_dir)
| 14,661 | 35.746867 | 130 |
py
|
triton
|
triton-main/python/triton/tools/__init__.py
| 0 | 0 | 0 |
py
|
|
triton
|
triton-main/python/triton/common/backend.py
|
import importlib
import importlib.util
from typing import Dict
from ..runtime.driver import DriverBase
class BaseBackend:
def __init__(self, device_type: str) -> None:
self.device_type = device_type
def add_stages(self, arch, extern_libs, stages):
"""
Custom the arch, extern_libs and stages per backend specific requirement
"""
raise NotImplementedError
def add_meta_info(self, ir, cur_module, next_module, metadata, asm):
"""
Custom the ir, module, metadata and asm per backend specific requirement
"""
raise NotImplementedError
def get_load_binary_fn(self):
"""
Return a callable to load binary
"""
raise NotImplementedError
def get_driver(self) -> DriverBase:
"""
Get the backend driver. Please refer to "DriverBase" for more details
"""
raise NotImplementedError
def get_stream(self):
"""
Get stream for current device
"""
raise NotImplementedError
def get_device_properties(self, device):
raise NotImplementedError
def get_current_device(self):
"""
Get current device
"""
raise NotImplementedError
def set_current_device(self, device):
"""
Set current device as the given device
"""
raise NotImplementedError
def get_kernel_bin(self):
raise NotImplementedError
def make_launcher_stub(self, name, signature, constants):
"""
Generate the launcher stub to launch the kernel
"""
raise NotImplementedError
def get_architecture_descriptor(self, **kwargs):
"""
Get the architecture descriptor the backend
"""
raise NotImplementedError
@classmethod
def create_backend(cls, device_type: str):
return cls(device_type)
_backends: Dict[str, BaseBackend] = {}
def register_backend(device_type: str, backend_cls: type):
if device_type not in _backends:
_backends[device_type] = backend_cls.create_backend(device_type)
def get_backend(device_type: str):
if device_type not in _backends:
device_backend_package_name = f"...third_party.{device_type}"
if importlib.util.find_spec(device_backend_package_name, package=__spec__.name):
try:
importlib.import_module(device_backend_package_name, package=__spec__.name)
except Exception:
return None
else:
return None
return _backends[device_type] if device_type in _backends else None
| 2,628 | 26.103093 | 91 |
py
|
triton
|
triton-main/python/triton/common/__init__.py
|
from .build import _build, cuda_include_dir, libcuda_dirs
__all__ = ["_build", "libcuda_dirs", "cuda_include_dir"]
| 116 | 28.25 | 57 |
py
|
triton
|
triton-main/python/triton/common/build.py
|
import contextlib
import functools
import io
import os
import shutil
import subprocess
import sys
import sysconfig
import setuptools
# TODO: is_hip shouldn't be here
def is_hip():
import torch
return torch.version.hip is not None
@functools.lru_cache()
def libcuda_dirs():
locs = subprocess.check_output(["whereis", "libcuda.so"]).decode().strip().split()[1:]
return [os.path.dirname(loc) for loc in locs]
@functools.lru_cache()
def rocm_path_dir():
return os.getenv("ROCM_PATH", default="/opt/rocm")
@contextlib.contextmanager
def quiet():
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = io.StringIO(), io.StringIO()
try:
yield
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
@functools.lru_cache()
def cuda_include_dir():
base_dir = os.path.join(os.path.dirname(__file__), os.path.pardir)
cuda_path = os.path.join(base_dir, "third_party", "cuda")
return os.path.join(cuda_path, "include")
def _build(name, src, srcdir):
if is_hip():
hip_lib_dir = os.path.join(rocm_path_dir(), "lib")
hip_include_dir = os.path.join(rocm_path_dir(), "include")
else:
cuda_lib_dirs = libcuda_dirs()
cu_include_dir = cuda_include_dir()
suffix = sysconfig.get_config_var('EXT_SUFFIX')
so = os.path.join(srcdir, '{name}{suffix}'.format(name=name, suffix=suffix))
# try to avoid setuptools if possible
cc = os.environ.get("CC")
if cc is None:
# TODO: support more things here.
clang = shutil.which("clang")
gcc = shutil.which("gcc")
cc = gcc if gcc is not None else clang
if cc is None:
raise RuntimeError("Failed to find C compiler. Please specify via CC environment variable.")
# This function was renamed and made public in Python 3.10
if hasattr(sysconfig, 'get_default_scheme'):
scheme = sysconfig.get_default_scheme()
else:
scheme = sysconfig._get_default_scheme()
# 'posix_local' is a custom scheme on Debian. However, starting Python 3.10, the default install
# path changes to include 'local'. This change is required to use triton with system-wide python.
if scheme == 'posix_local':
scheme = 'posix_prefix'
py_include_dir = sysconfig.get_paths(scheme=scheme)["include"]
if is_hip():
ret = subprocess.check_call([cc, src, f"-I{hip_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", f"-L{hip_lib_dir}", "-lamdhip64", "-o", so])
else:
cc_cmd = [cc, src, "-O3", f"-I{cu_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", "-lcuda", "-o", so]
cc_cmd += [f"-L{dir}" for dir in cuda_lib_dirs]
ret = subprocess.check_call(cc_cmd)
if ret == 0:
return so
# fallback on setuptools
extra_compile_args = []
library_dirs = cuda_lib_dirs
include_dirs = [srcdir, cu_include_dir]
libraries = ['cuda']
# extra arguments
extra_link_args = []
# create extension module
ext = setuptools.Extension(
name=name,
language='c',
sources=[src],
include_dirs=include_dirs,
extra_compile_args=extra_compile_args + ['-O3'],
extra_link_args=extra_link_args,
library_dirs=library_dirs,
libraries=libraries,
)
# build extension module
args = ['build_ext']
args.append('--build-temp=' + srcdir)
args.append('--build-lib=' + srcdir)
args.append('-q')
args = dict(
name=name,
ext_modules=[ext],
script_args=args,
)
with quiet():
setuptools.setup(**args)
return so
| 3,661 | 30.568966 | 172 |
py
|
triton
|
triton-main/python/triton/runtime/jit.py
|
from __future__ import annotations, division
import ast
import functools
import hashlib
import inspect
import os
import subprocess
import textwrap
from collections import defaultdict, namedtuple
from typing import (Callable, Generic, Iterable, List, Optional, TypeVar, Union, cast,
overload)
from ..common.backend import get_backend
TRITON_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TRITON_VERSION = "2.1.0"
def get_cuda_stream(idx=None):
if idx is None:
idx = get_current_device()
try:
from torch._C import _cuda_getCurrentRawStream
return _cuda_getCurrentRawStream(idx)
except ImportError:
import torch
return torch.cuda.current_stream(idx).cuda_stream
def get_current_device():
import torch
return torch.cuda.current_device()
def set_current_device(idx):
import torch
torch.cuda.set_device(idx)
def get_device_capability(idx):
import torch
return torch.cuda.get_device_capability(idx)
T = TypeVar('T')
# -----------------------------------------------------------------------------
# Dependencies Finder
# -----------------------------------------------------------------------------
class DependenciesFinder(ast.NodeVisitor):
"""
This AST visitor is used to find dependencies of a JITFunction. This can
be used to invalidate a JITFunction's hash when its source code -- or
that of its dependencies -- changes.
"""
def __init__(self, globals, src) -> None:
super().__init__()
self.ret = hashlib.md5(src.encode("utf-8")).hexdigest()
self.globals = globals
def visit_Name(self, node):
return self.globals.get(node.id, None)
def visit_Attribute(self, node):
lhs = self.visit(node.value)
while isinstance(lhs, ast.Attribute):
lhs = self.visit(lhs.value)
if lhs is None or (getattr(lhs, "__name__", "") == "triton" or getattr(lhs, "__name__", "").endswith(".triton")):
return None
return getattr(lhs, node.attr)
def visit_Call(self, node):
func = self.visit(node.func)
if func is None:
return
if inspect.isbuiltin(func):
return
if func.__module__ and (func.__module__.startswith('triton.') or '.triton.' in func.__module__):
return
assert isinstance(func, JITFunction), f"Function \"{func.__name__}\" is being called from a Triton function but is not a Triton function itself. Decorate it with @triton.jit to fix this"
if func.hash is None:
tree = ast.parse(func.src)
finder = DependenciesFinder(func.__globals__, func.src)
finder.visit(tree)
func.hash = finder.ret
noinline = str(getattr(func, 'noinline', False))
self.ret = (self.ret + func.hash + noinline).encode("utf-8")
self.ret = hashlib.md5(self.ret).hexdigest()
# -----------------------------------------------------------------------------
# JITFunction
# -----------------------------------------------------------------------------
@functools.lru_cache()
def version_key():
import pkgutil
contents = []
# frontend
with open(__file__, "rb") as f:
contents += [hashlib.md5(f.read()).hexdigest()]
# compiler
compiler_path = os.path.join(TRITON_PATH, 'compiler')
for lib in pkgutil.iter_modules([compiler_path]):
with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f:
contents += [hashlib.md5(f.read()).hexdigest()]
# backend
with open(os.path.join(TRITON_PATH, "_C/libtriton.so"), "rb") as f:
contents += [hashlib.md5(f.read()).hexdigest()]
# language
language_path = os.path.join(TRITON_PATH, 'language')
for lib in pkgutil.iter_modules([language_path]):
with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f:
contents += [hashlib.md5(f.read()).hexdigest()]
# ptxas version
try:
ptxas_version = hashlib.md5(subprocess.check_output(["ptxas", "--version"])).hexdigest()
except Exception:
ptxas_version = ''
return '-'.join(TRITON_VERSION) + '-' + ptxas_version + '-' + '-'.join(contents)
class KernelInterface(Generic[T]):
run: T
def __getitem__(self, grid) -> T:
"""
A JIT function is launched with: fn[grid](*args, **kwargs).
Hence JITFunction.__getitem__ returns a callable proxy that
memorizes the grid.
"""
return cast(T, functools.partial(cast(Callable, self.run), grid=grid))
class JITFunction(KernelInterface[T]):
# Hook for inspecting compiled functions and modules
cache_hook = None
divisibility = 16
@staticmethod
def _key_of(arg):
if hasattr(arg, "dtype"):
return arg.dtype
elif isinstance(arg, bool):
return "i1"
elif isinstance(arg, int):
if -2**31 <= arg and arg <= 2**31 - 1:
return "i32"
elif 2**63 <= arg and arg <= 2**64 - 1:
return "u64"
else:
return "i64"
elif isinstance(arg, float):
return 'fp32'
elif arg is None:
return None
else:
raise TypeError(f'Unsupported type {type(arg)} for {arg}')
@staticmethod
def _device_of(arg):
if hasattr(arg, "device"):
if hasattr(arg.device, 'type'):
return arg.device.type
return ''
@staticmethod
def _pinned_memory_of(arg):
if hasattr(arg, "is_pinned"):
if isinstance(arg.is_pinned, Callable):
return arg.is_pinned()
return False
@staticmethod
def _spec_of(arg):
if hasattr(arg, "data_ptr"):
return (arg.data_ptr() % JITFunction.divisibility == 0)
elif isinstance(arg, int):
return (arg % 16 == 0, arg == 1)
return (arg is None, )
def _get_config(self, *args):
def is_divisible_by_16(x):
if hasattr(x, "data_ptr"):
return x.data_ptr() % JITFunction.divisibility == 0
elif isinstance(x, int):
return x % JITFunction.divisibility == 0
if x is None:
return True
return False
divisible_by_16 = {i for i, arg in enumerate(args) if is_divisible_by_16(arg) and i not in self.do_not_specialize}
equal_to_1 = {i for i, arg in enumerate(args) if not isinstance(arg, bool) and isinstance(arg, int) and arg == 1 and i not in self.do_not_specialize}
return namedtuple("instance_descriptor", ["divisible_by_16", "equal_to_1"])(tuple(divisible_by_16), tuple(equal_to_1))
# return _triton.code_gen.instance_descriptor(divisible_by_16, equal_to_1)
@staticmethod
def _type_of(key):
# None are nullptr -- implicitly converted to *i8
if key is None:
return '*i8'
dtype_str = str(key).split(".")[-1]
tys = {
"bool": "i1",
"float8e4": "fp8e4",
"float8e5": "fp8e5",
"float8e4b15": "fp8e4b15",
"float16": "fp16",
"bfloat16": "bf16",
"float32": "fp32",
"float64": "fp64",
"int8": "i8",
"int16": "i16",
"int32": "i32",
"int64": "i64",
"uint8": "u8",
"uint16": "u16",
"uint32": "u32",
"uint64": "u64",
}
# reinterpret can create triton type
for v in list(tys.values()):
tys[v] = v
return key if isinstance(key, str) else f"*{tys[dtype_str]}"
def _make_signature(self, sig_key):
signature = ",".join([self._type_of(k) for i, k in enumerate(sig_key)])
return signature
def _make_constants(self, constexpr_key):
constants = dict(zip(self.constexprs, constexpr_key))
return constants
def _call_hook(self, key, signature, device, constants, num_warps, num_stages, extern_libs, configs):
if JITFunction.cache_hook is None:
return False
name = self.fn.__name__
module = self.fn.__module__
arg_reprs = ', '.join([f'{name}: {ty}' for name, ty in zip(self.arg_names, key[1])])
repr = f"{name}[num_warps={num_warps}, num_stages={num_stages}]({arg_reprs})"
key = str(key)
class LegacyCompiler:
def __init__(self, module, name):
self.module = module
self.name = name
pass
kwargs = dict(signature=signature, device=device, constants=constants,
num_warps=num_warps, num_stages=num_stages, extern_libs=extern_libs,
configs=configs)
return JITFunction.cache_hook(key=key, repr=repr, fn=LegacyCompiler(module, name), compile={"key": key, **kwargs}, is_manual_warmup=False, already_compiled=False)
def _get_arg_specialization_key(self, arg) -> str:
arg_annotation = self.__annotations__.get(arg, '')
if arg_annotation == '':
return f'({arg}.data_ptr() % {JITFunction.divisibility} == 0) if hasattr({arg}, "data_ptr") \
else ({arg} % {JITFunction.divisibility} == 0, {arg} == 1) if isinstance({arg}, int) \
else (False,)'
elif 'Tensor' in arg_annotation:
return f'({arg}.data_ptr() % {JITFunction.divisibility} == 0)'
elif arg_annotation == 'int':
return f'({arg} % {JITFunction.divisibility} == 0, {arg} == 1)'
else:
return '(False,)'
def _get_arg_sig_key(self, arg) -> str:
arg_annotation = self.__annotations__.get(arg, '')
if 'Tensor' in arg_annotation:
return f'{arg}.dtype'
elif arg_annotation == 'bool':
return "i1"
elif arg_annotation == 'float':
return 'fp32'
else:
return f'_key_of({arg})'
def _conclude_device_type(self, device_types: List[str], pinned_memory_flags: List[bool]) -> str:
device_types = [device_type for device_type in device_types if device_type != '']
# Return cuda if one of the input tensors is cuda
if 'cuda' in device_types:
import torch
return 'hip' if torch.version.hip else 'cuda'
is_cpu = all(device_type == 'cpu' for device_type in device_types)
is_pinned_memory = any(pinned_memory_flag for pinned_memory_flag in pinned_memory_flags)
# Return cuda if all the input tensors are cpu while the memory is pinned
if is_cpu and is_pinned_memory:
return 'cuda'
return device_types[0] if len(device_types) > 0 else 'cuda'
def _make_launcher(self):
regular_args = [f'{arg}' for i, arg in enumerate(self.arg_names) if i not in self.constexprs]
constexpr_args = [f'{arg}' for i, arg in enumerate(self.arg_names) if i in self.constexprs]
args = ', '.join(regular_args)
# cache key for regular argument type
sig_keys = ', '.join([self._get_arg_sig_key(arg) for arg in regular_args])
device_types = '[' + ', '.join([f'_device_of({arg})' for arg in regular_args]) + ']'
pinned_memory_flags = '[' + ', '.join([f'_pinned_memory_of({arg})' for arg in regular_args]) + ']'
# cache key for constexpr argument values
constexpr_keys = ', '.join(constexpr_args)
# cache key for argument specialization
specializations = []
for i, arg in enumerate(regular_args):
if i in self.do_not_specialize:
continue
specializations += [self._get_arg_specialization_key(arg)]
spec_keys = ', '.join(specializations)
grid_args = ','.join([f'"{arg}": {arg}' for arg in self.arg_names])
args_signature = ', '.join(name if dflt == inspect._empty else f'{name} = {dflt}' for name, dflt in zip(self.arg_names, self.arg_defaults))
src = f"""
def {self.fn.__name__}({args_signature}, grid=None, num_warps=4, num_stages=3, extern_libs=None, stream=None, warmup=False, device=None, device_type=None):
from ..compiler import compile, CompiledKernel
sig_key = {sig_keys},
constexpr_key = {f'{constexpr_keys},' if len(constexpr_keys) > 0 else ()}
spec_key = {f'{spec_keys},' if len(spec_keys) > 0 else ()}
key = (version_key, sig_key, constexpr_key, spec_key, num_warps, num_stages, self.debug)
if not extern_libs is None:
key = (key, tuple(extern_libs.items()))
assert num_warps > 0 and (num_warps & (num_warps - 1)) == 0, "num_warps must be a power of 2"
assert grid is not None
if callable(grid):
grid = grid({{{grid_args}}})
grid_size = len(grid)
grid_0 = grid[0]
grid_1 = grid[1] if grid_size > 1 else 1
grid_2 = grid[2] if grid_size > 2 else 1
if device_type is None:
device_types = [_device_type for _device_type in {device_types} if _device_type != '']
device_type = self._conclude_device_type(device_types, {pinned_memory_flags})
device_backend = None
if device_type not in ['cuda', 'hip']:
device_backend = get_backend(device_type)
if device_backend is None:
raise ValueError('Cannot find backend for ' + device_type)
if device is None:
if device_type in ['cuda', 'hip']:
device = get_current_device()
set_current_device(device)
else:
device = device_backend.get_current_device()
device_backend.set_current_device(device)
if stream is None and not warmup:
if device_type in ['cuda', 'hip']:
stream = get_cuda_stream(device)
else:
stream = device_backend.get_stream()
bin = cache[device].get(key, None)
if bin is not None:
if not warmup:
bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps, bin.shared, stream, bin.cu_function, CompiledKernel.launch_enter_hook, CompiledKernel.launch_exit_hook, bin, {args})
return bin
# kernel not cached -- compile
else:
# build dict of constant values
args = [{args}]
all_args = {', '.join([f'{arg}' for arg in self.arg_names])},
configs = self._get_config(*all_args),
constants = self._make_constants(constexpr_key)
constants.update({{i: None for i, arg in enumerate(all_args) if arg is None}})
constants.update({{i: 1 for i in configs[0].equal_to_1}})
# build kernel signature -- doesn't include specialized arguments
signature = {{ i: self._type_of(_key_of(arg)) for i, arg in enumerate(all_args) if i not in self.constexprs }}
# build stub signature -- includes arguments that are specialized
for i, arg in constants.items():
if callable(arg):
raise TypeError(f"Callable constexpr at index {{i}} is not supported")
if not self._call_hook(key, signature, device, constants, num_warps, num_stages, extern_libs, configs):
bin = compile(self, signature=signature, device=device, constants=constants, num_warps=num_warps, num_stages=num_stages, extern_libs=extern_libs, configs=configs, debug=self.debug, device_type=device_type)
if not warmup:
bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps, bin.shared, stream, bin.cu_function, CompiledKernel.launch_enter_hook, CompiledKernel.launch_exit_hook, bin, *args)
self.cache[device][key] = bin
return bin
return None
"""
scope = {"version_key": version_key(),
"get_cuda_stream": get_cuda_stream,
"self": self,
"_spec_of": self._spec_of,
"_key_of": self._key_of,
"_device_of": self._device_of,
"_pinned_memory_of": self._pinned_memory_of,
"cache": self.cache,
"__spec__": __spec__,
"get_backend": get_backend,
"get_current_device": get_current_device,
"set_current_device": set_current_device}
exec(src, scope)
return scope[self.fn.__name__]
def __init__(self, fn, version=None, do_not_specialize=None, debug=None, noinline=None):
self.fn = fn
self.module = fn.__module__
self.version = version
# function signature information
signature = inspect.signature(fn)
self.arg_names = [v.name for v in signature.parameters.values()]
self.arg_defaults = [v.default for v in signature.parameters.values()]
self.has_defaults = any(v != inspect._empty for v in self.arg_defaults)
# specialization hints
self.do_not_specialize = [] if do_not_specialize is None else do_not_specialize
self.do_not_specialize = {self.arg_names.index(arg) if isinstance(arg, str) else arg for arg in self.do_not_specialize}
# function source code (without decorators)
self.src = textwrap.dedent(inspect.getsource(fn))
self.src = self.src[self.src.find("def"):]
# cache of just-in-time compiled kernels
self.cache = defaultdict(dict)
self.hash = None
# JITFunction can be instantiated as kernel
# when called with a grid using __getitem__
self.kernel_decorators = []
self.kernel = None
self.debug = True if os.environ.get("TRITON_DEBUG", "0") == "1" else debug
self.noinline = noinline
# annotations
normalize_ty = lambda ty: ty.__name__ if isinstance(ty, type) else ty
self.__annotations__ = {name: normalize_ty(ty) for name, ty in fn.__annotations__.items()}
# index of constexprs
self.constexprs = [self.arg_names.index(name) for name, ty in self.__annotations__.items() if 'constexpr' in ty]
# launcher
self.run = self._make_launcher()
# re-use docs of wrapped function
self.__doc__ = fn.__doc__
self.__name__ = fn.__name__
self.__globals__ = fn.__globals__
self.__module__ = fn.__module__
@property
def cache_key(self):
# TODO : hash should be attribute of `self`
if self.hash is None:
dependencies_finder = DependenciesFinder(globals=self.__globals__, src=self.src)
dependencies_finder.visit(self.parse())
self.hash = dependencies_finder.ret + version_key()
return self.hash
def warmup(self, *args, **kwargs):
return self.run(*map(MockTensor.wrap_dtype, args), **kwargs, warmup=True)
# we do not parse `src` in the constructor because
# the user might want to monkey-patch self.src dynamically.
# Our unit tests do this, for example.
def parse(self):
tree = ast.parse(self.src)
assert isinstance(tree, ast.Module)
assert len(tree.body) == 1
assert isinstance(tree.body[0], ast.FunctionDef)
return tree
def __call__(self, *args, **kwargs):
raise RuntimeError("Cannot call @triton.jit'd outside of the scope of a kernel")
def __setattr__(self, name, value):
# - when kernel decorators change, cached kernel
# needs to be cleared
if name == 'kernel_decorators':
self.kernel = None
super(JITFunction, self).__setattr__(name, value)
# - when `.src` attribute is set, cache path needs
# to be reinitialized
if name == 'src':
self.hash = None
def __repr__(self):
return f"JITFunction({self.module}:{self.fn.__name__})"
# -----------------------------------------------------------------------------
# `jit` decorator
# -----------------------------------------------------------------------------
@overload
def jit(fn: T) -> JITFunction[T]:
...
@overload
def jit(
*,
version=None,
do_not_specialize: Optional[Iterable[int]] = None,
debug: Optional[bool] = None,
noinline: Optional[bool] = None,
) -> Callable[[T], JITFunction[T]]:
...
def jit(
fn: Optional[T] = None,
*,
version=None,
do_not_specialize: Optional[Iterable[int]] = None,
debug: Optional[bool] = None,
noinline: Optional[bool] = None,
interpret: Optional[bool] = None,
) -> Union[JITFunction[T], Callable[[T], JITFunction[T]]]:
"""
Decorator for JIT-compiling a function using the Triton compiler.
:note: When a jit'd function is called, arguments are
implicitly converted to pointers if they have a :code:`.data_ptr()` method
and a `.dtype` attribute.
:note: This function will be compiled and run on the GPU. It will only have access to:
* python primitives,
* builtins within the triton package,
* arguments to this function,
* other jit'd functions
:param fn: the function to be jit-compiled
:type fn: Callable
"""
def decorator(fn: T) -> JITFunction[T]:
assert callable(fn)
if interpret:
from ..interpreter.interpreter import GridSelector
return GridSelector(fn)
else:
return JITFunction(
fn,
version=version,
do_not_specialize=do_not_specialize,
debug=debug,
noinline=noinline,
)
if fn is not None:
return decorator(fn)
else:
return decorator
# -----------------------------------------------------------------------------
# Utilities for mocking tensors
# -----------------------------------------------------------------------------
class MockTensor:
"""
Can be used in place of real tensors when calling:
kernel.warmup(MockTensor(torch.float32), ...)
"""
@staticmethod
def wrap_dtype(arg):
if arg.__class__.__name__ == "dtype" and\
arg.__module__ == "torch":
return MockTensor(arg)
return arg
def __init__(self, dtype):
self.dtype = dtype
@staticmethod
def data_ptr():
return 0 # optimistically assumes multiple of 16
class TensorWrapper:
def __init__(self, base, dtype):
self.dtype = dtype
self.base = base
self.is_cuda = base.is_cuda
self.device = base.device
self.shape = self.base.shape
def data_ptr(self):
return self.base.data_ptr()
def stride(self, i):
return self.base.stride(i)
def __str__(self) -> str:
return f'TensorWrapper[{self.dtype}]({self.base})'
def reinterpret(tensor, dtype):
if isinstance(tensor, TensorWrapper):
if dtype == tensor.base.dtype:
# Reinterpreting to the original interpretation; return the base.
return tensor.base
else:
# Reinterpreting a wrapped tensor to a different type.
return TensorWrapper(tensor.base, dtype)
elif hasattr(tensor, "data_ptr"):
# A new wrapper is needed around an unwrapped tensor.
return TensorWrapper(tensor, dtype)
else:
raise TypeError(f'Cannot reinterpret a {type(tensor)}.')
| 23,031 | 37.069421 | 213 |
py
|
triton
|
triton-main/python/triton/runtime/autotuner.py
|
from __future__ import annotations
import builtins
import time
from typing import Dict
from ..testing import do_bench
from .jit import KernelInterface
class OutOfResources(Exception):
def __init__(self, required, limit, name):
self.message = f'out of resource: {name}, '\
f'Required: {required}, '\
f'Hardware limit: {limit}'
self.message += '. Reducing block sizes or `num_stages` may help.'
self.required = required
self.limit = limit
self.name = name
super().__init__(self.message)
def __reduce__(self):
# this is necessary to make CompilationError picklable
return (type(self), (self.required, self.limit, self.name))
class Autotuner(KernelInterface):
def __init__(self, fn, arg_names, configs, key, reset_to_zero, prune_configs_by: Dict = None, warmup=25, rep=100):
'''
:param prune_configs_by: a dict of functions that are used to prune configs, fields:
'perf_model': performance model used to predicate running time with different configs, returns running time
'top_k': number of configs to bench
'prune_num_stages_by'(optional): a function used to prune num_stages. It takes configs:List[Config] as its input, and returns pruned configs.
'''
if not configs:
self.configs = [Config({}, num_warps=4, num_stages=2)]
else:
self.configs = configs
self.key_idx = [arg_names.index(k) for k in key]
self.cache = {}
# hook to reset all required tensor to zeros before relaunching a kernel
self.hook = lambda args: 0
if reset_to_zero is not None:
self.reset_idx = [arg_names.index(k) for k in reset_to_zero]
def _hook(args):
for i in self.reset_idx:
args[i].zero_()
self.hook = _hook
self.arg_names = arg_names
# prune configs
if prune_configs_by:
perf_model, top_k = prune_configs_by['perf_model'], prune_configs_by['top_k']
if 'early_config_prune' in prune_configs_by:
early_config_prune = prune_configs_by['early_config_prune']
else:
perf_model, top_k, early_config_prune = None, None, None
self.perf_model, self.configs_top_k = perf_model, top_k
self.early_config_prune = early_config_prune
self.fn = fn
self.warmup = warmup
self.rep = rep
def _bench(self, *args, config, **meta):
# check for conflicts, i.e. meta-parameters both provided
# as kwargs and by the autotuner
conflicts = meta.keys() & config.kwargs.keys()
if conflicts:
raise ValueError(
f"Conflicting meta-parameters: {', '.join(conflicts)}."
" Make sure that you don't re-define auto-tuned symbols."
)
# augment meta-parameters with tunable ones
current = dict(meta, **config.kwargs)
full_nargs = {**self.nargs, **current}
def kernel_call():
if config.pre_hook:
config.pre_hook(full_nargs)
self.hook(args)
self.fn.run(*args, num_warps=config.num_warps, num_stages=config.num_stages, **current)
try:
return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
except OutOfResources:
return [float('inf'), float('inf'), float('inf')]
def run(self, *args, **kwargs):
self.nargs = dict(zip(self.arg_names, args))
if len(self.configs) > 1:
all_args = {**self.nargs, **kwargs}
_args = []
for name in self.arg_names:
if name in all_args:
_args.append(all_args[name])
key = tuple(_args[i] for i in self.key_idx)
if key not in self.cache:
# prune configs
pruned_configs = self.prune_configs(kwargs)
bench_start = time.time()
timings = {config: self._bench(*args, config=config, **kwargs)
for config in pruned_configs}
bench_end = time.time()
self.bench_time = bench_end - bench_start
self.cache[key] = builtins.min(timings, key=timings.get)
self.hook(args)
self.configs_timings = timings
config = self.cache[key]
else:
config = self.configs[0]
self.best_config = config
if config.pre_hook is not None:
full_nargs = {**self.nargs, **kwargs, **self.best_config.kwargs}
config.pre_hook(full_nargs)
ret = self.fn.run(*args, num_warps=config.num_warps, num_stages=config.num_stages, **kwargs, **config.kwargs)
self.nargs = None
return ret
def prune_configs(self, kwargs):
pruned_configs = self.configs
if self.early_config_prune:
pruned_configs = self.early_config_prune(self.configs, self.nargs)
if self.perf_model:
top_k = self.configs_top_k
if isinstance(top_k, float) and top_k <= 1.0:
top_k = int(len(self.configs) * top_k)
if len(pruned_configs) > top_k:
est_timing = {
config: self.perf_model(**self.nargs, **kwargs, **config.kwargs, num_stages=config.num_stages,
num_warps=config.num_warps)
for config in pruned_configs
}
pruned_configs = sorted(est_timing.keys(), key=lambda x: est_timing[x])[:top_k]
return pruned_configs
def warmup(self, *args, **kwargs):
self.nargs = dict(zip(self.arg_names, args))
for config in self.prune_configs(kwargs):
self.fn.warmup(
*args,
num_warps=config.num_warps,
num_stages=config.num_stages,
**kwargs,
**config.kwargs,
)
self.nargs = None
class Config:
"""
An object that represents a possible kernel configuration for the auto-tuner to try.
:ivar meta: a dictionary of meta-parameters to pass to the kernel as keyword arguments.
:type meta: dict[Str, Any]
:ivar num_warps: the number of warps to use for the kernel when compiled for GPUs. For example, if
`num_warps=8`, then each kernel instance will be automatically parallelized to
cooperatively execute using `8 * 32 = 256` threads.
:type num_warps: int
:ivar num_stages: the number of stages that the compiler should use when software-pipelining loops.
Mostly useful for matrix multiplication workloads on SM80+ GPUs.
:type num_stages: int
:ivar pre_hook: a function that will be called before the kernel is called. Parameters of this
function are args.
"""
def __init__(self, kwargs, num_warps=4, num_stages=2, pre_hook=None):
self.kwargs = kwargs
self.num_warps = num_warps
self.num_stages = num_stages
self.pre_hook = pre_hook
def __str__(self):
res = []
for k, v in self.kwargs.items():
res.append(f'{k}: {v}')
res.append(f'num_warps: {self.num_warps}')
res.append(f'num_stages: {self.num_stages}')
return ', '.join(res)
def autotune(configs, key, prune_configs_by=None, reset_to_zero=None, warmup=25, rep=100):
"""
Decorator for auto-tuning a :code:`triton.jit`'d function.
.. highlight:: python
.. code-block:: python
@triton.autotune(configs=[
triton.Config(meta={'BLOCK_SIZE': 128}, num_warps=4),
triton.Config(meta={'BLOCK_SIZE': 1024}, num_warps=8),
],
key=['x_size'] # the two above configs will be evaluated anytime
# the value of x_size changes
)
@triton.jit
def kernel(x_ptr, x_size, **META):
BLOCK_SIZE = META['BLOCK_SIZE']
:note: When all the configurations are evaluated, the kernel will run multiple times.
This means that whatever value the kernel updates will be updated multiple times.
To avoid this undesired behavior, you can use the `reset_to_zero` argument, which
resets the value of the provided tensor to `zero` before running any configuration.
:param configs: a list of :code:`triton.Config` objects
:type configs: list[triton.Config]
:param key: a list of argument names whose change in value will trigger the evaluation of all provided configs.
:type key: list[str]
:param prune_configs_by: a dict of functions that are used to prune configs, fields:
'perf_model': performance model used to predicate running time with different configs, returns running time
'top_k': number of configs to bench
'early_config_prune'(optional): a function used to do early prune (eg, num_stages). It takes configs:List[Config] as its input, and returns pruned configs.
:param reset_to_zero: a list of argument names whose value will be reset to zero before evaluating any configs.
:type reset_to_zero: list[str]
:param warmup: Warmup time (in ms) to pass to benchmarking, defaults to 25.
:type warmup: int
:param rep: Repetition time (in ms) to pass to benchmarking, defaults to 100.
:type rep: int
"""
def decorator(fn):
return Autotuner(fn, fn.arg_names, configs, key, reset_to_zero, prune_configs_by, warmup, rep)
return decorator
class Heuristics(KernelInterface):
def __init__(self, fn, arg_names, values) -> None:
self.fn = fn
self.values = values
self.arg_names = arg_names
def run(self, *args, **kwargs):
for v, heur in self.values.items():
kwargs[v] = heur({**dict(zip(self.arg_names, args)), **kwargs})
return self.fn.run(*args, **kwargs)
def heuristics(values):
"""
Decorator for specifying how the values of certain meta-parameters may be computed.
This is useful for cases where auto-tuning is prohibitevely expensive, or just not applicable.
.. highlight:: python
.. code-block:: python
@triton.heuristics(values={'BLOCK_SIZE': lambda args: 2 ** int(math.ceil(math.log2(args[1])))})
@triton.jit
def kernel(x_ptr, x_size, **META):
BLOCK_SIZE = META['BLOCK_SIZE'] # smallest power-of-two >= x_size
:param values: a dictionary of meta-parameter names and functions that compute the value of the meta-parameter.
each such function takes a list of positional arguments as input.
:type values: dict[str, Callable[[list[Any]], Any]]
"""
def decorator(fn):
return Heuristics(fn, fn.arg_names, values)
return decorator
| 10,907 | 41.776471 | 163 |
py
|
triton
|
triton-main/python/triton/runtime/driver.py
|
import abc
import hashlib
import os
import tempfile
from pathlib import Path
from ..common.build import _build
from .cache import get_cache_manager
class DriverBase(metaclass=abc.ABCMeta):
CUDA = 0
HIP = 1
@staticmethod
def third_party_dir():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "third_party")
def __init__(self) -> None:
pass
# -----------------------------
# CUDA
# -----------------------------
class CudaUtils(object):
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(CudaUtils, cls).__new__(cls)
return cls.instance
def __init__(self):
dirname = os.path.dirname(os.path.realpath(__file__))
src = Path(os.path.join(dirname, "backends", "cuda.c")).read_text()
key = hashlib.md5(src.encode("utf-8")).hexdigest()
cache = get_cache_manager(key)
fname = "cuda_utils.so"
cache_path = cache.get_file(fname)
if cache_path is None:
with tempfile.TemporaryDirectory() as tmpdir:
src_path = os.path.join(tmpdir, "main.c")
with open(src_path, "w") as f:
f.write(src)
so = _build("cuda_utils", src_path, tmpdir)
with open(so, "rb") as f:
cache_path = cache.put(f.read(), fname, binary=True)
import importlib.util
spec = importlib.util.spec_from_file_location("cuda_utils", cache_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
self.load_binary = mod.load_binary
self.get_device_properties = mod.get_device_properties
class CudaDriver(DriverBase):
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(CudaDriver, cls).__new__(cls)
return cls.instance
def __init__(self):
self.utils = CudaUtils()
self.backend = self.CUDA
# -----------------------------
# HIP
# -----------------------------
class HIPUtils(object):
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(HIPUtils, cls).__new__(cls)
return cls.instance
def __init__(self):
dirname = os.path.dirname(os.path.realpath(__file__))
src = Path(os.path.join(dirname, "backends", "hip.c")).read_text()
key = hashlib.md5(src.encode("utf-8")).hexdigest()
cache = get_cache_manager(key)
fname = "hip_utils.so"
cache_path = cache.get_file(fname)
if cache_path is None:
with tempfile.TemporaryDirectory() as tmpdir:
src_path = os.path.join(tmpdir, "main.c")
with open(src_path, "w") as f:
f.write(src)
so = _build("hip_utils", src_path, tmpdir)
with open(so, "rb") as f:
cache_path = cache.put(f.read(), fname, binary=True)
import importlib.util
spec = importlib.util.spec_from_file_location("hip_utils", cache_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
self.load_binary = mod.load_binary
self.get_device_properties = mod.get_device_properties
class HIPDriver(DriverBase):
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(HIPDriver, cls).__new__(cls)
return cls.instance
def __init__(self):
self.utils = HIPUtils()
self.backend = self.HIP
class UnsupportedDriver(DriverBase):
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(UnsupportedDriver, cls).__new__(cls)
return cls.instance
def __init__(self):
self.utils = None
self.backend = None
# -----------------------------
# Driver
# -----------------------------
class LazyProxy:
def __init__(self, init_fn):
self._init_fn = init_fn
self._obj = None
def _initialize_obj(self):
if self._obj is None:
self._obj = self._init_fn()
def __getattr__(self, name):
self._initialize_obj()
return getattr(self._obj, name)
def __setattr__(self, name, value):
if name in ['_init_fn', '_obj']:
super().__setattr__(name, value)
else:
self._initialize_obj()
setattr(self._obj, name, value)
def __delattr__(self, name):
self._initialize_obj()
delattr(self._obj, name)
def __repr__(self):
if self._obj is None:
return f"<{self.__class__.__name__} for {self._init_fn} not yet initialized>"
return repr(self._obj)
def __str__(self):
self._initialize_obj()
return str(self._obj)
def initialize_driver():
import torch
if torch.version.hip is not None:
return HIPDriver()
elif torch.cuda.is_available():
return CudaDriver()
else:
return UnsupportedDriver()
driver = LazyProxy(initialize_driver)
| 5,045 | 27.834286 | 92 |
py
|
triton
|
triton-main/python/triton/runtime/errors.py
|
class OutOfResources(Exception):
def __init__(self, required, limit, name):
self.message = f'out of resource: {name}, '\
f'Required: {required}, '\
f'Hardware limit: {limit}'
self.message += '. Reducing block sizes or `num_stages` may help.'
self.required = required
self.limit = limit
self.name = name
super().__init__(self.message)
def __reduce__(self):
# this is necessary to make CompilationError picklable
return (type(self), (self.required, self.limit, self.name))
| 591 | 36 | 74 |
py
|
triton
|
triton-main/python/triton/runtime/cache.py
|
import json
import os
import random
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, Optional
def default_cache_dir():
return os.path.join(Path.home(), ".triton", "cache")
class CacheManager(ABC):
def __init__(self, key):
pass
@abstractmethod
def get_file(self, filename) -> Optional[str]:
pass
@abstractmethod
def has_file(self, filename) -> bool:
pass
@abstractmethod
def put(self, data, filename, binary=True) -> str:
pass
@abstractmethod
def get_group(self, filename: str) -> Optional[Dict[str, str]]:
pass
@abstractmethod
def put_group(self, filename: str, group: Dict[str, str]):
pass
class FileCacheManager(CacheManager):
def __init__(self, key):
self.key = key
self.lock_path = None
# create cache directory if it doesn't exist
self.cache_dir = os.environ.get('TRITON_CACHE_DIR', default_cache_dir())
if self.cache_dir:
self.cache_dir = os.path.join(self.cache_dir, self.key)
self.lock_path = os.path.join(self.cache_dir, "lock")
os.makedirs(self.cache_dir, exist_ok=True)
def _make_path(self, filename) -> str:
return os.path.join(self.cache_dir, filename)
def has_file(self, filename):
if not self.cache_dir:
return False
return os.path.exists(self._make_path(filename))
def get_file(self, filename) -> Optional[str]:
if self.has_file(filename):
return self._make_path(filename)
else:
return None
def get_group(self, filename: str) -> Optional[Dict[str, str]]:
grp_filename = f"__grp__{filename}"
if not self.has_file(grp_filename):
return None
grp_filepath = self._make_path(grp_filename)
with open(grp_filepath) as f:
grp_data = json.load(f)
child_paths = grp_data.get("child_paths", None)
# Invalid group data.
if child_paths is None:
return None
result = {}
for c in child_paths:
p = self._make_path(c)
if not os.path.exists(p):
raise Exception(f"Group file {p} does not exist from group {grp_filename} ")
result[c] = p
return result
# Note a group of pushed files as being part of a group
def put_group(self, filename: str, group: Dict[str, str]):
if not self.cache_dir:
return
grp_contents = json.dumps({"child_paths": sorted(list(group.keys()))})
grp_filename = f"__grp__{filename}"
return self.put(grp_contents, grp_filename, binary=False)
def put(self, data, filename, binary=True) -> str:
if not self.cache_dir:
return
binary = isinstance(data, bytes)
if not binary:
data = str(data)
assert self.lock_path is not None
filepath = self._make_path(filename)
# Random ID to avoid any collisions
rnd_id = random.randint(0, 1000000)
# we use the PID incase a bunch of these around so we can see what PID made it
pid = os.getpid()
# use tempfile to be robust against program interruptions
temp_path = f"{filepath}.tmp.pid_{pid}_{rnd_id}"
mode = "wb" if binary else "w"
with open(temp_path, mode) as f:
f.write(data)
# Replace is guaranteed to be atomic on POSIX systems if it succeeds
# so filepath cannot see a partial write
os.replace(temp_path, filepath)
return filepath
__cache_cls = FileCacheManager
__cache_cls_nme = "DEFAULT"
def get_cache_manager(key) -> CacheManager:
import os
user_cache_manager = os.environ.get("TRITON_CACHE_MANAGER", None)
global __cache_cls
global __cache_cls_nme
if user_cache_manager is not None and user_cache_manager != __cache_cls_nme:
import importlib
module_path, clz_nme = user_cache_manager.split(":")
module = importlib.import_module(module_path)
__cache_cls = getattr(module, clz_nme)
__cache_cls_nme = user_cache_manager
return __cache_cls(key)
| 4,190 | 30.75 | 92 |
py
|
triton
|
triton-main/python/triton/runtime/__init__.py
|
from .autotuner import (Autotuner, Config, Heuristics, OutOfResources, autotune,
heuristics)
from .driver import driver
from .jit import (JITFunction, KernelInterface, MockTensor, TensorWrapper, reinterpret,
version_key)
__all__ = [
"driver",
"Config",
"Heuristics",
"autotune",
"heuristics",
"JITFunction",
"KernelInterface",
"version_key",
"reinterpret",
"TensorWrapper",
"OutOfResources",
"MockTensor",
"Autotuner",
]
| 516 | 22.5 | 87 |
py
|
triton
|
triton-main/python/triton/interpreter/torch_wrapper.py
|
try:
import torch as _torch
except ImportError:
_torch = None
class TorchWrapper:
"""
Helps in making torch an optional dependency
"""
def __getattr__(self, name):
if _torch is None:
raise ImportError("Triton requires PyTorch to be installed")
return getattr(_torch, name)
torch = TorchWrapper()
| 353 | 17.631579 | 72 |
py
|
triton
|
triton-main/python/triton/interpreter/core.py
|
from typing import Tuple
import dataclasses
@dataclasses.dataclass
class ExecutionContext:
program_id: Tuple[int]
program_size: Tuple[int]
| 150 | 14.1 | 28 |
py
|
triton
|
triton-main/python/triton/interpreter/tl_lang.py
|
from __future__ import annotations
from ..language import core as lcore
from . import torch_wrapper
from .core import ExecutionContext
from .memory_map import MemoryMap
torch = torch_wrapper.torch
def _primitive_to_tensor(x):
"""
Converts various Python primitive data types to PyTorch tensor.
"""
tensor_args = {"device": "cuda"}
if isinstance(x, bool):
return torch.tensor([x], dtype=torch.bool, **tensor_args)
elif isinstance(x, int):
if -(2**31) <= x < 2**31:
return torch.tensor([x], dtype=torch.int32, **tensor_args)
elif -(2**63) <= x < 2**63:
return torch.tensor([x], dtype=torch.int64, **tensor_args)
else:
raise RuntimeError(f"Nonrepresentable integer {x}.")
elif isinstance(x, float):
return torch.tensor([x], dtype=torch.float32, **tensor_args)
elif torch.is_tensor(x):
return x
elif isinstance(x, WrappedTensor):
return x
elif isinstance(x, debugger_constexpr):
if x.value is None:
return None
return _primitive_to_tensor(x.value)
elif x is None:
return None
assert False, f"cannot convert {x} of type {type(x)} to tensor"
def _infer_tensor(func):
"""
A decorator function to harmonize function args:
- converts primitives to PyTorch tensors
- wraps PyTorch tensors with WrappedTensors
"""
def wrapper(*args):
new_args = tuple(map(lambda v: _primitive_to_tensor(v), args))
new_args = tuple(map(lambda v: WrappedTensor(v) if torch.is_tensor(v) else v, new_args))
return func(*new_args)
return wrapper
def _tensor_operation(func):
"""
A decorator function to unwrap WrappedTensors and debugger_constexpr before calling the function.
Can be combined with _infer_tensor decorator to harmonize args (everything to torch tensor).
"""
def wrapper(*args, **kwargs):
for arg in args:
assert not torch.is_tensor(arg), "unexpected tensor argument"
def unwrap_tensor(v):
if isinstance(v, WrappedTensor):
return v.tensor
if isinstance(v, debugger_constexpr):
return v.value
return v
new_args = tuple(map(unwrap_tensor, args))
new_kwargs = {k: unwrap_tensor(v) for k, v in kwargs.items()}
result = func(args[0], *new_args[1:], **new_kwargs)
return WrappedTensor(result) if torch.is_tensor(result) else result
return wrapper
class debugger_constexpr:
def __init__(self, value):
if isinstance(value, debugger_constexpr):
self.value = value.value
else:
self.value = value
def __str__(self) -> str:
return "debugger_constexpr(" + str(self.value) + ")"
def __index__(self) -> int:
return self.value
def __bool__(self):
return bool(self.value)
def __ge__(self, other):
other = other.value if isinstance(other, debugger_constexpr) else other
return self.value >= other
def __gt__(self, other):
other = other.value if isinstance(other, debugger_constexpr) else other
return self.value > other
def __le__(self, other):
other = other.value if isinstance(other, debugger_constexpr) else other
return self.value <= other
def __lt__(self, other):
other = other.value if isinstance(other, debugger_constexpr) else other
return self.value < other
def __eq__(self, other):
other = other.value if isinstance(other, debugger_constexpr) else other
return self.value == other
def __or__(self, other):
other = other.value if isinstance(other, debugger_constexpr) else other
return self.value | other
def __ror__(self, other):
other = other.value if isinstance(other, debugger_constexpr) else other
return self.value | other
def __and__(self, other):
other = other.value if isinstance(other, debugger_constexpr) else other
return self.value & other
def __rand__(self, other):
other = other.value if isinstance(other, debugger_constexpr) else other
return self.value & other
def to(self, dtype, bitcast=False, _builder=None):
if dtype in [torch.int64]:
ret_ty = int
elif dtype == torch.bool:
ret_ty = bool
elif dtype in [torch.float64]:
ret_ty = float
else:
raise ValueError("dtype not supported in debugger")
return debugger_constexpr(ret_ty(self.value))
class WrappedTensor:
def __init__(self, tensor):
self.tensor = tensor
def __index__(self) -> int:
return self.tensor.item()
def __str__(self) -> str:
return "wrapped_" + str(self.tensor)
def __bool__(self) -> bool:
return torch.all(self.tensor == True).item() # noqa: E712
@property
def dtype(self):
return self.tensor.dtype
@_infer_tensor
@_tensor_operation
def __add__(self, other):
return torch.add(self.tensor, other)
@_infer_tensor
@_tensor_operation
def __radd__(self, other):
return self.__add__(other)
@_infer_tensor
@_tensor_operation
def __sub__(self, other):
return torch.sub(self.tensor, other)
@_infer_tensor
@_tensor_operation
def __rsub__(self, other):
return torch.sub(other, self.tensor)
@_infer_tensor
@_tensor_operation
def __mul__(self, other):
return torch.mul(self.tensor, other)
@_infer_tensor
@_tensor_operation
def __rmul__(self, other):
return self.__mul__(other)
@_infer_tensor
@_tensor_operation
def __truediv__(self, other):
return torch.div(self.tensor, other)
@_infer_tensor
@_tensor_operation
def __rtruediv__(self, other):
return torch.div(other, self.tensor)
@_infer_tensor
@_tensor_operation
def __floordiv__(self, other):
return torch.floor_divide(self.tensor, other)
@_infer_tensor
@_tensor_operation
def __rfloordiv__(self, other):
return torch.floor_divide(other, self.tensor)
@_infer_tensor
@_tensor_operation
def __mod__(self, other):
return torch.remainder(self.tensor, other)
@_infer_tensor
@_tensor_operation
def __rmod__(self, other):
return torch.remainder(other, self.tensor)
@_infer_tensor
@_tensor_operation
def __neg__(self):
return -self.tensor
@_infer_tensor
@_tensor_operation
def __invert__(self):
return ~self.tensor
@_infer_tensor
@_tensor_operation
def __and__(self, other):
return torch.bitwise_and(self.tensor, other)
@_infer_tensor
@_tensor_operation
def __or__(self, other):
return torch.bitwise_or(self.tensor, other)
@_infer_tensor
@_tensor_operation
def __xor__(self, other):
return torch.bitwise_xor(self.tensor, other)
@_infer_tensor
@_tensor_operation
def __lshift__(self, other):
return torch.bitwise_left_shift(self.tensor, other)
@_infer_tensor
@_tensor_operation
def __rshift__(self, other):
return torch.bitwise_right_shift(self.tensor, other)
@_infer_tensor
@_tensor_operation
def __gt__(self, other):
return self.tensor > other
@_infer_tensor
@_tensor_operation
def __rgt__(self, other):
return other > self.tensor
@_infer_tensor
@_tensor_operation
def __ge__(self, other):
return self.tensor >= other
@_infer_tensor
@_tensor_operation
def __rge__(self, other):
return other >= self.tensor
@_infer_tensor
@_tensor_operation
def __lt__(self, other):
return self.tensor < other
@_infer_tensor
@_tensor_operation
def __rlt__(self, other):
return other < self.tensor
@_infer_tensor
@_tensor_operation
def __le__(self, other):
return self.tensor <= other
@_infer_tensor
@_tensor_operation
def __rle__(self, other):
return other <= self.tensor
@_infer_tensor
@_tensor_operation
def __eq__(self, other):
return torch.equal(self.tensor, other)
@_infer_tensor
@_tensor_operation
def __ne__(self, other):
return not torch.equal(self.tensor, other)
@_tensor_operation
def __getitem__(self, slices):
return self.tensor.__getitem__(slices)
# if isinstance(slices, slice):
# slices = [slices]
# src_shape = self.shape
# dst_shape = []
# curr = 0
# for sl in slices:
# if isinstance(sl, constexpr) and sl.value is None:
# dst_shape.append(1)
# elif sl == slice(None, None, None):
# dst_shape.append(src_shape[curr].value)
# curr += 1
# ret = torch.reshape(self.tensor, dst_shape, )
# return ret
@_tensor_operation
def to(self, dtype, bitcast=False):
return self.tensor.to(dtype)
# if isinstance(bitcast, constexpr):
# bitcast = bitcast.value
# if bitcast:
# return semantic.bitcast(self, dtype, )
# return semantic.cast(self, dtype, )
def _constexpr_to_value(v):
if isinstance(v, debugger_constexpr):
return v.value
return v
class TritonLangProxy:
_memory_map: MemoryMap
_context: ExecutionContext
def __init__(self, memory_map: MemoryMap, context: ExecutionContext):
self._memory_map = memory_map
self._context = context
# Types
# Removed void, int1, float8, uint16, uint32, uint64, pi32_t
# constexpr = debugger_constexpr
# Program functions
@_tensor_operation
def load(
self,
pointer: torch.Tensor,
mask: torch.Tensor = None,
other=0.0,
cache_modifier="",
eviction_policy="",
volatile=False,
):
return self._memory_map.load(pointer, mask, other)
@_tensor_operation
def store(self, pointer: torch.Tensor, value: torch.Tensor, mask=None):
return self._memory_map.store(pointer, value, mask)
@_tensor_operation
def program_id(self, axis):
assert axis < len(self._context.program_id)
return torch.tensor([self._context.program_id[axis]], dtype=torch.int32, device="cuda")
@_tensor_operation
def num_programs(self, axis):
assert axis < len(self._context.program_size)
return torch.tensor([self._context.program_size[axis]], dtype=torch.int32, device="cuda")
@_tensor_operation
def arange(self, start, end):
return torch.arange(start=start, end=end, dtype=torch.int32, device="cuda")
@_tensor_operation
def zeros(self, shape, dtype):
for i, d in enumerate(shape):
if not isinstance(d, debugger_constexpr):
raise TypeError(f"Shape element {i} must have type `constexpr`")
if not isinstance(d.value, int):
raise TypeError(f"Shape element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]")
shape = [x.value for x in shape]
if isinstance(dtype, lcore.dtype):
if dtype.is_fp32():
dtype = torch.float32
elif dtype.is_fp16():
dtype = torch.float16
elif dtype.is_bf16():
dtype = torch.bfloat16
elif dtype.is_int32():
dtype = torch.int32
elif dtype.is_int16():
dtype = torch.int16
elif dtype.is_int8():
dtype = torch.int8
else:
raise TypeError(f"Unsupported dtype {dtype}")
return torch.zeros(size=shape, dtype=dtype, device="cuda")
@_tensor_operation
def dequantize(self, input, scale, shift, nbit, dst_ty=None):
if dst_ty is None:
dst_ty = torch.float16
raise NotImplementedError()
@_tensor_operation
def broadcast(self, input, other):
raise NotImplementedError()
@_tensor_operation
def broadcast_to(self, input, shape):
raise NotImplementedError()
@_tensor_operation
def cat(self, input, shape):
raise NotImplementedError()
@_tensor_operation
def reshape(self, input, shape):
raise NotImplementedError()
@_tensor_operation
def dot(self, input, other, trans_a=False, trans_b=False, allow_tf32=True):
assert input.dtype == other.dtype
if trans_a:
input = input.T
if trans_b:
other = other.T
return torch.matmul(input=input, other=other)
@_tensor_operation
def atomic_cas(self, pointer, cmp, val):
stored = self._memory_map.load(pointer, None, 0.0)
if not isinstance(cmp, torch.Tensor):
cmp = torch.tensor([cmp], dtype=stored.dtype, device="cuda")
if not isinstance(val, torch.Tensor):
val = torch.tensor([val], dtype=stored.dtype, device="cuda")
if stored == cmp:
self._memory_map.store(pointer, val, None)
return stored
@_tensor_operation
def atomic_xchg(self, pointer, val, mask=None):
if isinstance(val, int):
val = torch.tensor([val], dtype=torch.int32, device="cuda")
stored = self._memory_map.load(pointer, mask, 0.0)
self._memory_map.store(pointer, val, mask)
return stored
@_tensor_operation
def atomic_add(self, pointer, val, mask=None):
# arbitrary other value as it will masked during storing
stored = self._memory_map.load(pointer, mask, 0.0)
result = stored + val
self._memory_map.store(pointer, result, mask)
return stored
@_tensor_operation
def atomic_max(self, pointer, val, mask=None):
stored = self._memory_map.load(pointer, mask, 0.0)
result = torch.maximum(stored, val)
self._memory_map.store(pointer, result, mask)
return stored
@_tensor_operation
def atomic_min(self, pointer, val, mask=None):
stored = self._memory_map.load(pointer, mask, 0.0)
result = torch.minimum(stored, val)
self._memory_map.store(pointer, result, mask)
return stored
@_tensor_operation
def atomic_and(self, pointer, val, mask=None):
stored = self._memory_map.load(pointer, mask, 0)
result = torch.bitwise_and(stored, val)
self._memory_map.store(pointer, result, mask)
return stored
@_tensor_operation
def atomic_or(self, pointer, val, mask=None):
stored = self._memory_map.load(pointer, mask, 0)
result = torch.bitwise_or(stored, val)
self._memory_map.store(pointer, result, mask)
return stored
@_tensor_operation
def atomic_xor(self, pointer, val, mask=None):
stored = self._memory_map.load(pointer, mask, 0)
result = torch.bitwise_xor(stored, val)
self._memory_map.store(pointer, result, mask)
return stored
@_tensor_operation
def where(self, condition, x, y):
condition = _primitive_to_tensor(condition)
x = _primitive_to_tensor(x)
y = _primitive_to_tensor(y)
return torch.where(condition, x, y)
@_tensor_operation
def umulhi(self, x, y):
raise NotImplementedError()
@_tensor_operation
def fdiv(self, x, y, ieee_rounding=False):
raise NotImplementedError()
@_tensor_operation
def exp(self, x):
return torch.exp(x)
@_tensor_operation
def log(self, x):
return torch.log(x)
@_tensor_operation
def cos(self, x):
return torch.cos(x)
@_tensor_operation
def sin(self, x):
return torch.sin(x)
@_tensor_operation
def sqrt(self, x):
return torch.sqrt(x)
@_tensor_operation
def globaltimer(self):
raise NotImplementedError()
@_tensor_operation
def clock(self):
raise NotImplementedError()
@_tensor_operation
def debug_barrier(self):
raise NotImplementedError()
@_tensor_operation
def multiple_of(self, input, values):
return input
@_tensor_operation
def max_contiguous(self, input, values):
return input
@_tensor_operation
def max_constancy(self, input, values):
return input
@_tensor_operation
def abs(self, x):
return torch.abs(x)
@_tensor_operation
def cdiv(self, x, div):
return (x + div - 1) // div
@_tensor_operation
def minimum(self, x, y):
if isinstance(x, int):
x = torch.tensor(x, device="cuda")
if isinstance(y, int):
y = torch.tensor(y, device="cuda")
return torch.minimum(x, y)
@_tensor_operation
def maximum(self, x, y):
return torch.maximum(x, y)
@_tensor_operation
def sigmoid(self, x):
raise NotImplementedError()
@_tensor_operation
def softmax(self, x, ieee_rounding=False):
raise NotImplementedError()
@_tensor_operation
def ravel(self, x):
raise NotImplementedError()
@_tensor_operation
def swizzle2d(self, i, j, size_i, size_j, size_g):
raise NotImplementedError()
@_tensor_operation
def zeros_like(self, input):
raise NotImplementedError()
@_tensor_operation
def max(self, input, axis=None):
if axis is None:
return torch.max(input)
return torch.max(input, dim=axis).values
@_tensor_operation
def argmax(self, input, axis):
raise NotImplementedError()
@_tensor_operation
def min(self, input, axis=None):
if axis is None:
return torch.min(input)
return torch.min(input, dim=axis).values
@_tensor_operation
def argmin(self, input, axis):
raise NotImplementedError()
@_tensor_operation
def sum(self, input, axis=None):
if axis is None:
return torch.sum(input)
return torch.sum(input, dim=axis)
@_tensor_operation
def xor_sum(self, input, axis):
raise NotImplementedError()
@_tensor_operation
def cumsum(self, input, axis=None):
if axis is None:
return torch.cumsum(input)
return torch.cumsum(input, dim=axis)
@_tensor_operation
def cumprod(self, input, axis=None):
if axis is None:
return torch.cumprod(input)
return torch.cumprod(input, dim=axis)
| 18,501 | 27.819315 | 118 |
py
|
triton
|
triton-main/python/triton/interpreter/interpreter.py
|
import itertools
import random
from typing import Tuple
from .. import language as tl
# import .language.core as lcore
from ..language import core as lcore
from . import torch_wrapper
from .core import ExecutionContext
from .memory_map import MemoryMap
from .tl_lang import (TritonLangProxy, WrappedTensor, _primitive_to_tensor,
debugger_constexpr)
torch = torch_wrapper.torch
tl_method_backup = {}
def get_proxy_method(proxy, name):
method = getattr(proxy, name)
def fun(*args, **kwarg):
return method(*args, **kwarg)
return fun
def attach_triton(module, proxy):
method_list = [func for func in dir(TritonLangProxy) if func[0] != "_"]
for name in method_list:
if hasattr(module, name):
attr = getattr(module, name)
tl_method_backup[name] = attr
if callable(attr):
setattr(module, name, get_proxy_method(proxy, name))
else:
setattr(module, name, getattr(proxy, name))
def detach_triton(module):
for name, method in tl_method_backup.items():
setattr(module, name, method)
def program_ids_from_grid(grid: Tuple[int, ...]) -> Tuple[int, ...]:
# reverse the grid dimensions and generate the range for each dimension
reversed_grid = reversed(grid)
ranges_for_each_dimension = [range(dim) for dim in reversed_grid]
# gen all combinations
index_combinations = list(itertools.product(*ranges_for_each_dimension))
random.shuffle(index_combinations)
for index_combination in index_combinations:
yield index_combination
class DebuggerFunction:
def __init__(self, func, grid=(1,)):
self.func = func
self.grid = grid
def _is_constexpr(self, name):
return name in self.func.__annotations__ and self.func.__annotations__[name] is lcore.constexpr
def _get_constexpr(self):
result = []
for name, annotation in self.func.__annotations__.items():
if annotation is lcore.constexpr:
result.append(name)
return result
def _assert_constexpr(self, **kwargs):
constexp = self._get_constexpr()
missing = [i for i in constexp if i not in kwargs.keys()]
assert len(missing) == 0, f"You must specify constexpr {missing}"
def _get_grid(self, **kwargs):
if callable(self.grid):
return self.grid(kwargs)
else:
return self.grid
def __call__(self, *args, **kwargs):
self._assert_constexpr(**kwargs)
memory = MemoryMap()
def convert_arg(v):
name, arg = v
if torch.is_tensor(arg):
ptr = memory.add_tensor(arg)
return WrappedTensor(torch.tensor([ptr], dtype=torch.int64, device="cuda"))
if self._is_constexpr(name):
return debugger_constexpr(arg)
return WrappedTensor(_primitive_to_tensor(arg))
new_args = tuple(map(convert_arg, zip(self.func.__code__.co_varnames, args)))
new_kwargs = {k: convert_arg((k, v)) for (k, v) in kwargs.items() if k not in ["num_warps", "num_stages"]}
grid = self._get_grid(**kwargs)
for program_id in program_ids_from_grid(grid):
proxy = TritonLangProxy(memory, ExecutionContext(program_id, grid))
attach_triton(tl, proxy)
self.func(*new_args, **new_kwargs)
detach_triton(tl)
class GridSelector:
"""
Entry point of the debugger
"""
def __init__(self, func):
version = torch.__version__
assert version[0] == "2", f"Triton Debugger only supports torch >= 2.0, using {version}"
self.func = func
def __getitem__(self, grid):
return DebuggerFunction(self.func, grid)
def __call__(self, *args, **kwargs):
return DebuggerFunction(self.func)(*args, **kwargs)
class AutotuneGridSelector:
def __init__(self, func, autotune_params):
self.func = func
self.autotune_params = autotune_params
def __getitem__(self, grid):
return AutotuneRunner(self.func, self.autotune_params, grid)
def __call__(self, *args, **kwargs):
return AutotuneRunner(self.func, self.autotune_params)(*args, **kwargs)
class AutotuneRunner:
def __init__(self, func, autotune_params, grid=None):
self.func = func
self.autotune_params = autotune_params
self.grid = grid
def __call__(self, *args, **kwargs):
assert len(self.autotune_params["configs"]) >= 1
for config in self.autotune_params["configs"][1:]:
def convert_arg(v):
if torch.is_tensor(v):
return torch.clone(v)
return v
new_args = tuple(map(convert_arg, args))
new_kwargs = {k: convert_arg(v) for k, v in kwargs.items()}
if self.grid:
self.func[self.grid](*new_args, **new_kwargs, **config.kwargs)
else:
self.func(*new_args, **new_kwargs, **config.kwargs)
main_config = self.autotune_params["configs"][0]
if self.grid:
self.func[self.grid](*args, **kwargs, **main_config.kwargs)
else:
self.func(*args, **kwargs, **main_config.kwargs)
def triton_debug_autotune(**kwars):
def wrapper(func):
return AutotuneGridSelector(func, kwars)
return wrapper
| 5,412 | 30.47093 | 114 |
py
|
triton
|
triton-main/python/triton/interpreter/memory_map.py
|
from __future__ import annotations
import dataclasses
from . import torch_wrapper
torch = torch_wrapper.torch
@dataclasses.dataclass
class RegisteredStorage:
storage: torch.Storage
dtype: torch.dtype
size: int
ptr: int
@property
def end_ptr(self) -> int:
return self.ptr + self.size
@property
def access_tensor(self) -> torch.Tensor:
return torch.tensor(self.storage, dtype=self.dtype, device=self.storage.device)
def ensure_immutable(self):
assert self.storage.data_ptr() == self.ptr and self.storage.size() == self.size
class MemoryMap:
storages: [RegisteredStorage]
def __init__(self):
self.storages = []
def _get_registered_storage(self, pointer: torch.Tensor):
max_pointer = torch.max(pointer).item()
min_pointer = torch.min(pointer).item()
registered_storage = next(
filter(
lambda registered: min_pointer >= registered.ptr and max_pointer < registered.end_ptr, self.storages
),
None,
)
if registered_storage is None:
raise Exception("Storage not found or pointers spanning multiple tensors")
registered_storage.ensure_immutable()
return registered_storage
def add_tensor(self, t: torch.Tensor):
storage = t.untyped_storage()
self.storages.append(RegisteredStorage(storage, t.dtype, storage.size(), storage.data_ptr()))
return t.data_ptr()
def load(
self,
pointer: torch.Tensor,
mask: torch.Tensor = None,
other=0.0,
):
assert pointer.is_cuda
assert 0 < pointer.dim() < 3
assert pointer.dtype == torch.int64
if mask is None:
mask = torch.ones_like(pointer).bool()
assert mask.is_cuda
assert 0 < mask.dim() < 3
assert mask.dtype == torch.bool
mask = mask.expand(pointer.size())
if torch.all(~mask):
# Todo: The type is wrong here, we can't determine the correct type
return torch.full_like(pointer, fill_value=other, dtype=torch.float16, device="cuda")
registered_storage = self._get_registered_storage(pointer[mask])
access_tensor = registered_storage.access_tensor
index_tensor = pointer - registered_storage.ptr
block = torch.full_like(pointer, fill_value=other, dtype=access_tensor.dtype, device="cuda")
block[mask] = access_tensor[index_tensor[mask]]
return block
def store(self, pointer: torch.Tensor, value: torch.Tensor, mask=None):
assert 0 < pointer.dim() < 3
assert pointer.dtype == torch.int64
if mask is None:
mask = torch.ones_like(pointer).bool()
assert 0 < mask.dim() < 3
assert mask.dtype == torch.bool
mask = mask.expand(pointer.size())
if torch.all(~mask):
return
registered_storage = self._get_registered_storage(pointer[mask])
access_tensor = registered_storage.access_tensor
index_tensor = pointer - registered_storage.ptr
access_tensor[index_tensor[mask]] = value[mask].to(access_tensor.dtype)
| 3,187 | 29.951456 | 116 |
py
|
triton
|
triton-main/python/triton/interpreter/__init__.py
| 0 | 0 | 0 |
py
|
|
triton
|
triton-main/python/triton/compiler/errors.py
|
import ast
from typing import Optional, Union
class CompilationError(Exception):
source_line_count_max_in_message = 12
def _format_message(self) -> str:
node = self.node
if self.src is None:
source_excerpt = " <source unavailable>"
else:
source_excerpt = self.src.split('\n')[:node.lineno][-self.source_line_count_max_in_message:]
if source_excerpt:
source_excerpt.append(' ' * node.col_offset + '^')
source_excerpt = '\n'.join(source_excerpt)
else:
source_excerpt = " <source empty>"
message = "at {}:{}:{}".format(node.lineno, node.col_offset, source_excerpt)
if self.error_message:
message += '\n' + self.error_message
return message
def __init__(self, src: Optional[str], node: ast.AST, error_message: Union[str, None]):
self.src = src
self.node = node
self.error_message = error_message
self.message = self._format_message()
def set_source_code(self, src: Optional[str]):
self.src = src
self.message = self._format_message()
def __str__(self):
return self.message
def __repr__(self):
return "{}({!r})".format(type(self).__name__, self.message)
def __reduce__(self):
# this is necessary to make CompilationError picklable
return type(self), (self.src, self.node, self.error_message)
class CompileTimeAssertionFailure(CompilationError):
"""Specific exception for failed tests in `static_assert` invocations"""
pass
class UnsupportedLanguageConstruct(CompilationError):
pass
| 1,666 | 30.45283 | 104 |
py
|
triton
|
triton-main/python/triton/compiler/make_launcher.py
|
import hashlib
import os
import tempfile
from ..common import _build
from ..runtime.cache import get_cache_manager
from ..runtime.jit import version_key
def is_hip():
import torch
return torch.version.hip is not None
# ----- stub --------
def make_so_cache_key(version_hash, signature, constants):
# Get unique key for the compiled code
signature = {k: 'ptr' if v[0] == '*' else v for k, v in signature.items()}
key = f"{version_hash}-{''.join(signature.values())}{constants}"
key = hashlib.md5(key.encode("utf-8")).hexdigest()
return key
def make_stub(name, signature, constants):
# name of files that are cached
so_cache_key = make_so_cache_key(version_key(), signature, constants)
so_cache_manager = get_cache_manager(so_cache_key)
so_name = f"{name}.so"
# retrieve stub from cache if it exists
cache_path = so_cache_manager.get_file(so_name)
if cache_path is None:
with tempfile.TemporaryDirectory() as tmpdir:
src = generate_launcher(constants, signature)
src_path = os.path.join(tmpdir, "main.c")
with open(src_path, "w") as f:
f.write(src)
so = _build(name, src_path, tmpdir)
with open(so, "rb") as f:
return so_cache_manager.put(f.read(), so_name, binary=True)
else:
return cache_path
# ----- source code generation --------
def ty_to_cpp(ty):
if ty[0] == '*':
return "hipDeviceptr_t" if is_hip() else "CUdeviceptr"
return {
"i1": "int32_t",
"i8": "int8_t",
"i16": "int16_t",
"i32": "int32_t",
"i64": "int64_t",
"u32": "uint32_t",
"u64": "uint64_t",
"fp16": "float",
"bf16": "float",
"fp32": "float",
"f32": "float",
"fp64": "double",
}[ty]
def generate_launcher(constants, signature):
arg_decls = ', '.join(f"{ty_to_cpp(ty)} arg{i}" for i, ty in signature.items())
def _extracted_type(ty):
if ty[0] == '*':
return "PyObject*"
return {
'i1': 'int32_t',
'i32': 'int32_t',
'i64': 'int64_t',
'u32': 'uint32_t',
'u64': 'uint64_t',
'fp16': 'float',
'bf16': 'float',
'fp32': 'float',
'f32': 'float',
'fp64': 'double',
}[ty]
def format_of(ty):
return {
"PyObject*": "O",
"float": "f",
"double": "d",
"long": "l",
"uint32_t": "I",
"int32_t": "i",
"uint64_t": "K",
"int64_t": "L",
}[ty]
format = "iiiiiKKOOO" + ''.join([format_of(_extracted_type(ty)) for ty in signature.values()])
# generate glue code
if is_hip():
src = f"""
#define __HIP_PLATFORM_AMD__
#include <hip/hip_runtime.h>
#include <Python.h>
#include <stdio.h>
static inline void gpuAssert(hipError_t code, const char *file, int line)
{{
if (code != HIP_SUCCESS)
{{
const char* prefix = "Triton Error [HIP]: ";
const char* str = hipGetErrorString(code);
char err[1024] = {{0}};
snprintf(err, 1024, "%s Code: %d, Messsage: %s", prefix, code, str );
PyErr_SetString(PyExc_RuntimeError, err);
}}
}}
#define HIP_CHECK(ans) {{ gpuAssert((ans), __FILE__, __LINE__); }}
static void _launch(int gridX, int gridY, int gridZ, int num_warps, int shared_memory, hipStream_t stream, hipFunction_t function, {arg_decls}) {{
void *params[] = {{ {', '.join(f"&arg{i}" for i in signature.keys() if i not in constants)} }};
if (gridX*gridY*gridZ > 0) {{
HIP_CHECK(hipModuleLaunchKernel(function, gridX, gridY, gridZ, 64*num_warps, 1, 1, shared_memory, stream, params, 0));
}}
}}
typedef struct _DevicePtrInfo {{
hipDeviceptr_t dev_ptr;
bool valid;
}} DevicePtrInfo;
static inline DevicePtrInfo getPointer(PyObject *obj, int idx) {{
DevicePtrInfo ptr_info;
ptr_info.dev_ptr = 0;
ptr_info.valid = true;
if (PyLong_Check(obj)) {{
ptr_info.dev_ptr = (hipDeviceptr_t)PyLong_AsUnsignedLongLong(obj);
return ptr_info;
}}
if (obj == Py_None) {{
// valid nullptr
return ptr_info;
}}
PyObject *ptr = PyObject_GetAttrString(obj, "data_ptr");
if (ptr) {{
PyObject *empty_tuple = PyTuple_New(0);
PyObject *ret = PyObject_Call(ptr, empty_tuple, NULL);
Py_DECREF(empty_tuple);
Py_DECREF(ptr);
if (!PyLong_Check(ret)) {{
PyErr_SetString(PyExc_TypeError, "data_ptr method of Pointer object must return 64-bit int");
ptr_info.valid = false;
return ptr_info;
}}
ptr_info.dev_ptr = (hipDeviceptr_t)PyLong_AsUnsignedLongLong(ret);
if (!ptr_info.dev_ptr)
return ptr_info;
uint64_t dev_ptr;
hipError_t status = hipPointerGetAttribute(&dev_ptr, HIP_POINTER_ATTRIBUTE_DEVICE_POINTER, ptr_info.dev_ptr);
if (status == hipErrorInvalidValue) {{
PyErr_Format(PyExc_ValueError,
"Pointer argument (at %d) cannot be accessed from Triton (cpu tensor?)", idx);
ptr_info.valid = false;
}}
ptr_info.dev_ptr = (hipDeviceptr_t)dev_ptr;
return ptr_info;
}}
PyErr_SetString(PyExc_TypeError, "Pointer argument must be either uint64 or have data_ptr method");
return ptr_info;
}}
static PyObject* launch(PyObject* self, PyObject* args) {{
int gridX, gridY, gridZ;
uint64_t _stream;
uint64_t _function;
int num_warps;
int shared_memory;
PyObject *launch_enter_hook = NULL;
PyObject *launch_exit_hook = NULL;
PyObject *compiled_kernel = NULL;
{' '.join([f"{_extracted_type(ty)} _arg{i}; " for i, ty in signature.items()])}
if (!PyArg_ParseTuple(args, \"{format}\", &gridX, &gridY, &gridZ, &num_warps, &shared_memory, &_stream, &_function, &launch_enter_hook, &launch_exit_hook, &compiled_kernel, {', '.join(f"&_arg{i}" for i, ty in signature.items())})) {{
return NULL;
}}
if (launch_enter_hook != Py_None) {{
PyObject_CallObject(launch_enter_hook, args);
}}
// raise exception asap
{"; ".join([f"DevicePtrInfo ptr_info{i} = getPointer(_arg{i}, {i}); if (!ptr_info{i}.valid) return NULL;" if ty[0] == "*" else "" for i, ty in signature.items()])};
_launch(gridX, gridY, gridZ, num_warps, shared_memory, (hipStream_t)_stream, (hipFunction_t)_function, {', '.join(f"ptr_info{i}.dev_ptr" if ty[0]=="*" else f"_arg{i}" for i, ty in signature.items())});
if (launch_exit_hook != Py_None) {{
PyObject_CallObject(launch_exit_hook, args);
}}
if (PyErr_Occurred()) {{
return NULL;
}}
// return None
Py_INCREF(Py_None);
return Py_None;
}}
static PyMethodDef ModuleMethods[] = {{
{{"launch", launch, METH_VARARGS, "Entry point for all kernels with this signature"}},
{{NULL, NULL, 0, NULL}} // sentinel
}};
static struct PyModuleDef ModuleDef = {{
PyModuleDef_HEAD_INIT,
\"__triton_launcher\",
NULL, //documentation
-1, //size
ModuleMethods
}};
PyMODINIT_FUNC PyInit___triton_launcher(void) {{
PyObject *m = PyModule_Create(&ModuleDef);
if(m == NULL) {{
return NULL;
}}
PyModule_AddFunctions(m, ModuleMethods);
return m;
}}
"""
else:
src = f"""
#include \"cuda.h\"
#include <stdbool.h>
#include <Python.h>
static inline void gpuAssert(CUresult code, const char *file, int line)
{{
if (code != CUDA_SUCCESS)
{{
const char* prefix = "Triton Error [CUDA]: ";
const char* str;
cuGetErrorString(code, &str);
char err[1024] = {{0}};
strcat(err, prefix);
strcat(err, str);
PyErr_SetString(PyExc_RuntimeError, err);
}}
}}
#define CUDA_CHECK(ans) {{ gpuAssert((ans), __FILE__, __LINE__); }}
static void _launch(int gridX, int gridY, int gridZ, int num_warps, int shared_memory, CUstream stream, CUfunction function, {arg_decls}) {{
void *params[] = {{ {', '.join(f"&arg{i}" for i in signature.keys() if i not in constants)} }};
if(gridX*gridY*gridZ > 0){{
CUDA_CHECK(cuLaunchKernel(function, gridX, gridY, gridZ, 32*num_warps, 1, 1, shared_memory, stream, params, 0));
}}
}}
typedef struct _DevicePtrInfo {{
CUdeviceptr dev_ptr;
bool valid;
}} DevicePtrInfo;
static inline DevicePtrInfo getPointer(PyObject *obj, int idx) {{
DevicePtrInfo ptr_info;
ptr_info.dev_ptr = 0;
ptr_info.valid = true;
if (PyLong_Check(obj)) {{
ptr_info.dev_ptr = PyLong_AsUnsignedLongLong(obj);
return ptr_info;
}}
if (obj == Py_None) {{
// valid nullptr
return ptr_info;
}}
PyObject *ptr = PyObject_GetAttrString(obj, "data_ptr");
if(ptr){{
PyObject *empty_tuple = PyTuple_New(0);
PyObject *ret = PyObject_Call(ptr, empty_tuple, NULL);
Py_DECREF(empty_tuple);
Py_DECREF(ptr);
if (!PyLong_Check(ret)) {{
PyErr_SetString(PyExc_TypeError, "data_ptr method of Pointer object must return 64-bit int");
ptr_info.valid = false;
return ptr_info;
}}
ptr_info.dev_ptr = PyLong_AsUnsignedLongLong(ret);
if(!ptr_info.dev_ptr)
return ptr_info;
uint64_t dev_ptr;
int status = cuPointerGetAttribute(&dev_ptr, CU_POINTER_ATTRIBUTE_DEVICE_POINTER, ptr_info.dev_ptr);
if (status == CUDA_ERROR_INVALID_VALUE) {{
PyErr_Format(PyExc_ValueError,
"Pointer argument (at %d) cannot be accessed from Triton (cpu tensor?)", idx);
ptr_info.valid = false;
}}
ptr_info.dev_ptr = dev_ptr;
Py_DECREF(ret); // Thanks ChatGPT!
return ptr_info;
}}
PyErr_SetString(PyExc_TypeError, "Pointer argument must be either uint64 or have data_ptr method");
return ptr_info;
}}
static PyObject* launch(PyObject* self, PyObject* args) {{
int gridX, gridY, gridZ;
uint64_t _stream;
uint64_t _function;
int num_warps;
int shared_memory;
PyObject *launch_enter_hook = NULL;
PyObject *launch_exit_hook = NULL;
PyObject *compiled_kernel = NULL;
{' '.join([f"{_extracted_type(ty)} _arg{i}; " for i, ty in signature.items()])}
if(!PyArg_ParseTuple(args, \"{format}\", &gridX, &gridY, &gridZ, &num_warps, &shared_memory, &_stream, &_function, &launch_enter_hook, &launch_exit_hook, &compiled_kernel, {', '.join(f"&_arg{i}" for i, ty in signature.items())})) {{
return NULL;
}}
if (launch_enter_hook != Py_None) {{
PyObject_CallObject(launch_enter_hook, args);
}}
// raise exception asap
{"; ".join([f"DevicePtrInfo ptr_info{i} = getPointer(_arg{i}, {i}); if (!ptr_info{i}.valid) return NULL;" if ty[0] == "*" else "" for i, ty in signature.items()])};
_launch(gridX, gridY, gridZ, num_warps, shared_memory, (CUstream)_stream, (CUfunction)_function, {', '.join(f"ptr_info{i}.dev_ptr" if ty[0]=="*" else f"_arg{i}"for i, ty in signature.items())});
if (launch_exit_hook != Py_None) {{
PyObject_CallObject(launch_exit_hook, args);
}}
if(PyErr_Occurred()) {{
return NULL;
}}
// return None
Py_INCREF(Py_None);
return Py_None;
}}
static PyMethodDef ModuleMethods[] = {{
{{"launch", launch, METH_VARARGS, "Entry point for all kernels with this signature"}},
{{NULL, NULL, 0, NULL}} // sentinel
}};
static struct PyModuleDef ModuleDef = {{
PyModuleDef_HEAD_INIT,
\"__triton_launcher\",
NULL, //documentation
-1, //size
ModuleMethods
}};
PyMODINIT_FUNC PyInit___triton_launcher(void) {{
PyObject *m = PyModule_Create(&ModuleDef);
if(m == NULL) {{
return NULL;
}}
PyModule_AddFunctions(m, ModuleMethods);
return m;
}}
"""
return src
| 11,854 | 30.697861 | 239 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.