RPC-Chat / rpc.py
pedrocas15's picture
Update rpc.py
e069448 verified
raw
history blame
7.62 kB
import tensorflow as tf
from tensorflow import keras
from keras.layers import *
import keras_nlp
import math
import json
from transformers import AutoTokenizer
from tokenizers import AddedToken
# Config
input_size = 512
embed_dim = 128
# Tokenizer
tokenizer = AutoTokenizer.from_pretrained('google/t5-v1_1-base')
tokenizer.add_tokens(AddedToken("\n", normalized=False))
tokenizer.add_tokens(AddedToken("<s>", normalized=False))
vocab_size = len(tokenizer.get_vocab().keys())
print("vocab_size:", vocab_size)
print("pad token id:", tokenizer.pad_token)
# Masked Accuracy Metric
def masked_accuracy(y_true, y_pred, padding_token=tokenizer.pad_token_id):
y_true = tf.cast(y_true, tf.int32)
y_pred = tf.cast(tf.argmax(y_pred, axis=-1), tf.int32)
mask = tf.cast(tf.not_equal(y_true, padding_token), tf.float32)
matches = tf.cast(tf.equal(y_true, y_pred), tf.float32)
accuracy = tf.reduce_sum(matches * mask) / tf.reduce_sum(mask)
return accuracy
# Embedding Layer
class SharedEmbedding(tf.keras.layers.Layer):
def __init__(self, vocab_size, embed_dim, **kwargs):
super(SharedEmbedding, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embed_dim = embed_dim
def build(self, input_shape):
self.shared_weights = self.add_weight(
shape=(self.vocab_size, self.embed_dim),
initializer='random_normal',
trainable=True,
name='shared_weights'
)
super(SharedEmbedding, self).build(input_shape)
def call(self, inputs, mode='embedding', temp=0.1):
if mode == 'embedding':
return tf.nn.embedding_lookup(self.shared_weights, inputs)
elif mode == 'classify':
sw = tf.nn.l2_normalize(self.shared_weights, axis=-1)
return tf.nn.softmax(tf.matmul(inputs, sw, transpose_b=True)/temp, axis=-1)
# Attention Layer
class Attention(keras.layers.Layer):
def __init__(self, **kwargs):
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
self.embed_dim = input_shape[-1]
self.mask = tf.where(tf.linalg.band_part(tf.ones((input_shape[-2], input_shape[-2])), -1, 0) == 1.0, 0.0, float("-inf"))
self.range_do = -tf.range(input_shape[-2])-1
self.range_undo = tf.range(input_shape[-2])+1
self.Q = self.add_weight(name='kernelQ',
shape=(input_shape[-1], input_shape[-1]),
initializer='uniform',
trainable=True)
self.K = self.add_weight(name='kernelK',
shape=(input_shape[-1], input_shape[-1]),
initializer='uniform',
trainable=True)
self.V = self.add_weight(name='kernelV',
shape=(input_shape[-1], input_shape[-1]),
initializer='uniform',
trainable=True)
super(Attention, self).build(input_shape)
def roll_embeddings(self, tensor, shift_values):
batch_size, time_size, embed_dim = tensor.shape
if batch_size is None: return tensor
shift_matrix = tf.reshape(shift_values, (1, -1, 1))
shift_matrix = tf.tile(shift_matrix, [batch_size, 1, embed_dim])
indices = tf.range(embed_dim)
indices_matrix = tf.tile(indices, [batch_size * time_size])
indices_matrix = tf.reshape(indices_matrix, (batch_size, time_size, embed_dim))
new_indices = (indices_matrix + shift_matrix) % embed_dim
rolled_tensor = tf.gather(tensor, new_indices, batch_dims=2)
return rolled_tensor
def call(self, x, pos):
q = x @ self.Q
k = x @ self.K
v = x @ self.V
atti = tf.matmul(q, k, transpose_b=True)
attp = tf.matmul(q, pos, transpose_b=True)
attp = self.roll_embeddings(attp, self.range_do)
att = atti + attp
att = tf.nn.softmax((att / math.sqrt(self.embed_dim)) + self.mask, axis=-1)
outi = att @ v
attp = self.roll_embeddings(att, self.range_undo)
outp = attp @ pos
out = outi + outp
return out
# Encoder
inputs = Input(shape=(input_size, ), dtype=tf.int32)
emb_layer = SharedEmbedding(vocab_size, embed_dim)
pos_layer = keras_nlp.layers.PositionEmbedding(input_size)
x = LayerNormalization()(emb_layer(inputs, mode="embedding"))
pos = pos_layer(x)
b = 6
for _ in range(b):
x += (2*b)**-0.5 * LayerNormalization()(Attention()(x, pos))
x += (2*b)**-0.5 * LayerNormalization()(Dense(embed_dim, activation="gelu")(x))
x = tf.nn.l2_normalize(x, axis=-1)
for _ in range(b):
x1 = Dense(embed_dim, activation="gelu")(x)
x1 = Dense(embed_dim, activation="gelu")(x1)
x += b**-0.5 * LayerNormalization()(x1)
x = tf.nn.l2_normalize(x, axis=-1)
x = emb_layer(x, mode="classify", temp=0.1)
model = keras.Model(inputs=inputs, outputs=x)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(ignore_class=tokenizer.pad_token_id),
optimizer=keras.optimizers.AdamW(learning_rate=0.001),
metrics=[masked_accuracy, keras_nlp.metrics.Perplexity(mask_token_id=tokenizer.pad_token_id)],
)
# Import Model
model.load_weights("rpc.keras")
encoder = keras.Model(inputs=model.layers[0].input, outputs=model.layers[52].output)
encoder.summary()
# Vectorize Function
def vectorize_texts(all_texts):
batch_size = 128
vects = []
for i in range(0, len(all_texts), batch_size):
texts = all_texts[i:i+batch_size]
toks = [text + ([tokenizer.pad_token_id] * (input_size - len(text))) for text in texts]
if len(toks) > 0:
toks = tf.constant(toks, shape=(len(toks), input_size))
vect = encoder.predict(toks, verbose=0)
for v, t in zip(vect, texts):
vects.append(v[:len(t), :])
return tf.concat(vects, axis=0).numpy()
# Import Database and All Toks
index = None
all_toks = None
def load_index(index_path="/dev/shm/rpc-vecdb/index"):
global index
global all_toks
#import ngtpy
#index = ngtpy.Index(index_path, read_only=True)
import faiss
index = faiss.read_index(index_path + "/index.faiss")
with open(index_path + "/all_toks.json", "r") as f:
all_toks = json.loads(f.read())
# Generate Function
def generate(text, use_rpc=True, max_tokens=128):
enc_text = tokenizer.encode(text, add_special_tokens=False)
i = 0
while i < max_tokens and tok != vocab_size - 2:
enc_text = enc_text[-input_size:]
if use_rpc:
xq = vectorize_texts([enc_text])[-1]
#_id, _ = index.search(xq, size=1, epsilon=2)[0]
D, I = index.search(xq.reshape((1, -1)), 1)
_id = I[0][0]
if all_toks[_id] in carry_toks:
tmp = tf.argmax(tf.matmul(xq.reshape((1, -1)), encoder.layers[1].shared_weights, transpose_b=True), axis=-1).numpy()[0]
if all_toks[tmp] in enc_text: tok = tmp
else: tok = all_toks[_id]
else: tok = all_toks[_id]
else:
ins = enc_text + [tokenizer.pad_token_id] * (input_size - len(enc_text))
ins = tf.constant(ins, shape=(1, input_size))
res = model.predict(ins, verbose=0)[0][len(enc_text)-1]
tok = tf.argmax(res, axis=-1).numpy().tolist()
enc_text += [tok]
response = tokenizer.decode(enc_text)
yield response