repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
fat-albert
fat-albert-master/abmn/src/movieqa/data/data/story_loader.py
"""MovieQA - Story Understanding Benchmark. Story loaders for reading plots, subtitles, DVS, etc. http://movieqa.cs.toronto.edu/ Release: v1.0 Date: 18 Nov 2015 """ import os import re import pysrt import config PKG = config.PACKAGE_DIRECTORY dvs_rep = re.compile('^\d+-\d+') dvs_cur = re.compile('\{.+?\}') quote_matches = re.compile('<.+?>') class StoryLoader(object): """Data loader class.""" def __init__(self): self.available_types = ['plot', 'split_plot', 'subtitle', 'dvs', 'script'] def _check_exists(self, filename): """Check that the story file exists and is OK to load. """ if os.path.exists(filename): # git repo OK! return True return False def _read_plot(self, plot_filename): """Read a plot synopsis file. Also used to read split plots, where each line contains one sentence of the plot. """ with open(plot_filename) as f: plot = f.readlines() plot = [p.strip() for p in plot] plot = [p for p in plot if p] return plot def _read_subtitle(self, subtitle_filename): """Read the subtitle file and output dialogs. """ subtitle_text = pysrt.open(subtitle_filename, encoding='iso-8859-1') subtitle_text = [l.strip() for l in subtitle_text.text.split('\n')] subtitle_text = [quote_matches.sub('', l).strip() for l in subtitle_text] # Prepare dialogs dialogs = [] create_new_dialog = True for l in subtitle_text: if not l: # Get rid of newlines continue if create_new_dialog: dialogs.append([l]) # Start new dialog else: dialogs[-1].append(l) # Append to last dialog # Decide what to do with next line based on current line ending create_new_dialog = False if l[-1] in ['.', '!', '?', ':', ')']: create_new_dialog = True # Join the lists to form single dialogs for d in range(len(dialogs)): dialogs[d] = ' '.join(dialogs[d]) return dialogs def _read_dvs(self, dvs_filename): """Read a DVS file. """ dvs_text = pysrt.open(dvs_filename, encoding='iso-8859-1') dvs_text = [l.strip() for l in dvs_text.text.split('\n')] dvs_text = [quote_matches.sub('', l).strip() for l in dvs_text] # Cleanup DVS (remove the DVS index and stuff in {}) for k in range(len(dvs_text)): dvs_text[k] = dvs_rep.sub('', dvs_text[k]).strip() dvs_text[k] = dvs_cur.sub('', dvs_text[k]).strip() return dvs_text def load_story(self, movies_map, story_type='plot'): """Load story files for given set of movies. Args: movies_map: Dictionary of movie named tuples. story_type: 'plot', 'split_plot', 'subtitle', 'dvs', 'script'. Returns: story: Story for each movie indexed by imdb_key. Raises: ValueError: If input story type is not supported. """ story = {} for imdb_key, movie in movies_map.items(): if story_type == 'plot': if not movie.text.plot: continue plot_filename = os.path.join(PKG, movie.text.plot) if not self._check_exists(plot_filename): continue this_story = self._read_plot(plot_filename) elif story_type == 'split_plot': fname = 'story/split_plot/' + imdb_key + '.split.wiki' split_plot_filename = os.path.join(PKG, fname) if not self._check_exists(split_plot_filename): continue this_story = self._read_plot(split_plot_filename) elif story_type == 'subtitle': if not movie.text.subtitle: continue subtitle_filename = os.path.join(PKG, movie.text.subtitle) if not self._check_exists(subtitle_filename): continue this_story = self._read_subtitle(subtitle_filename) elif story_type == 'dvs': if not movie.text.dvs: continue dvs_filename = os.path.join(PKG, movie.text.dvs) if not self._check_exists(dvs_filename): continue this_story = self._read_subtitle(dvs_filename) elif story_type == 'script': if not movie.text.script: continue script_filename = os.path.join(PKG, movie.text.script) if not self._check_exists(script_filename): continue this_story = self._read_plot(script_filename) else: raise ValueError('Unsupported story type!') story[imdb_key] = this_story if not story: raise ValueError('Story returned empty!') return story
5,064
32.322368
89
py
fat-albert
fat-albert-master/abmn/src/movieqa/data/data/config.py
"""Define file paths.""" import os PACKAGE_DIRECTORY = os.path.dirname(os.path.abspath(__file__)) MOVIES_JSON = os.path.join(PACKAGE_DIRECTORY, 'data/movies.json') QA_JSON = os.path.join(PACKAGE_DIRECTORY, 'data/qa.json') SPLIT_JSON = os.path.join(PACKAGE_DIRECTORY, 'data/splits.json')
292
23.416667
65
py
fat-albert
fat-albert-master/abmn/src/movieqa/data/data/__init__.py
"""MovieQA - Story Understanding Benchmark. http://movieqa.cs.toronto.edu/ Release: v1.0 Date: 18 Nov 2015 """ from data_loader import DataLoader
151
11.666667
43
py
fat-albert
fat-albert-master/abmn/src/movieqa/adversarial_addAny/english_words.py
"""Get common English words from Brown Corpus.""" from nltk import FreqDist from nltk.corpus import brown import string punctuation = set(string.punctuation) | set(['``', "''", "--"]) def get_common_words(outfile, num): freq_dist = FreqDist(w.lower() for w in brown.words() if w not in punctuation) vocab = [x[0] for x in freq_dist.most_common()[:num]] with open(outfile, "w") as of: for w in vocab: of.write(w + "\n") if __name__ == '__main__': get_common_words('common_english.txt', 1000)
533
25.7
82
py
fat-albert
fat-albert-master/abmn/src/movieqa/adversarial_addAny/create_addAny_examples.py
import sys import os sys.path.append('data') # os.chdir("..") import movieqa.data.data_loader as movie import core.util as util from random import randrange from random import shuffle import random import glob import tensorflow as tf import core.model as model import numpy as np import _pickle as pickle # implementation of AddAny/AddCommon from Jia and Liang 2017 def add_plot_sentence(plot, sentence): new_sent = np.zeros(shape=[1, 1, plot.shape[2]], dtype=np.int64) for i, index in enumerate(sentence): new_sent[0][0][i] = index m_plot = np.append(plot, new_sent, 1) return m_plot def run_creation(model_type, attack, model_folder, examples_folder, instances_to_attack): print("store created examples in %s" % examples_folder) if model_type == "lstm": import movieqa.run_lstm as runner else: import movieqa.run_cnn as runner runner.data_conf.TRAIN_DIR = model_folder load = False check_sents = [] check_found = [] check_num = 0 corr_probs = [] if not tf.io.gfile.exists(examples_folder): tf.io.gfile.makedirs(examples_folder) else: checkpoints = glob.glob(examples_folder + "/[!accuracies]*") checkpoints = sorted(checkpoints, reverse=True) latest = checkpoints[0] splitted = latest.split(".txt")[0] check_num = int(splitted[len(splitted) - 1]) + 1 check = open(latest, encoding="utf8") for line in check: parts = line.replace('\n', '').split("\t") check_words = parts[0].split(" ") check_sents.append(check_words) last_prob = float(parts[1]) found = parts[2] if found == 'True': b_found = True else: b_found = False corr_probs.append(last_prob) check_found.append(b_found) load = True emb_dir = runner.data_conf.EMBEDDING_DIR vectors, vocab = util.load_embeddings(emb_dir) rev_vocab = dict(zip(vocab.values(), vocab.keys())) # print(rev_vocab) filename = "adversarial_addAny/common_english.txt" # length of the distractor sentence d = 10 # pool size of common words to sample from for each word in the distractor sentence poolsize = 10 common_words = {} fin = open(filename, encoding="utf8") for line in fin: word = line.replace('\n', '') # print(word) if word in rev_vocab: common_words[word] = rev_vocab[word] else: print('ERROR: word "%s" not in vocab. Run add_common_words_to_vocab.py first.' % word) exit(1) with open(instances_to_attack + '/val.pickle', 'rb') as handle: qa = pickle.load(handle) w_s = [] w_choices = [] w_found = [] q_inds = [] pools = [] with open(examples_folder + "/" + str(0 + check_num) + ".txt", "a") as file: for k, question in enumerate(qa): # load question indices q_words = util.normalize_text(question.question) q_ind = [] for word in q_words: q_ind.append(rev_vocab[word]) a_words = [] for i, answer in enumerate(question.answers): if not i == int(question.correct_index): words = util.normalize_text(answer) a_words.extend(words) w = [] w_choice = [] rand_sent = "" for i in range(0, d): if load: c_word = check_sents[k][i] w_index = rev_vocab[c_word] rand_sent += (c_word + " ") else: w_index = random.choice(list(common_words.values())) rand_sent += (vocab[w_index] + " ") w_found.append(False) w.append(w_index) w_choice.append(i) if load: found = check_found[k] w_found.append(found) # file.write(rand_sent+"\t"+str(corr_probs[k])+"\t"+str(found)+"\n") else: found = False w_found.append(found) file.write(rand_sent + "\t" + "1.0" + "\t" + str(found) + "\n") shuffle(w_choice) w_choices.append(w_choice) w_s.append(w) d_pools = [] for j, dj in enumerate(w): pool = [] random_common_words = np.random.choice(list(common_words.values()), poolsize, replace=False) print("Adding common words") pool.extend(random_common_words) if attack == 'addQ' or attack == "addQA": print("Adding question words") for word in q_words: pool.append(rev_vocab[word]) if attack == "addA" or attack == "addQA": print("Adding answer words") for word in a_words: pool.append(rev_vocab[word]) shuffle(pool) d_pools.append(pool) pools.append(d_pools) filepath = instances_to_attack + "/*.tfrecords" filenames = glob.glob(filepath) global_step = tf.contrib.framework.get_or_create_global_step() dataset = tf.contrib.data.TFRecordDataset(filenames) dataset = dataset.map(runner.get_single_sample) dataset = dataset.repeat(poolsize * d) batch_size = 1 dataset = dataset.padded_batch(batch_size, padded_shapes=( [None], [5, None], [None], (), [None, None], ())) iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next() add_sent = tf.compat.v1.placeholder(tf.int64, shape=[None]) # sent_exp = tf.expand_dims(add_sent,0) m_p = tf.compat.v1.py_func(add_plot_sentence, [next_plots, add_sent], [tf.int64])[0] # m_p = next_plots # m_p = tf.concat([next_plots,sent_exp],axis=0) logits, atts, sent_atts, _ = runner.predict_batch([next_q, next_a, m_p], training=False) probabs = model.compute_probabilities(logits=logits) accuracy_example = tf.reduce_mean(input_tensor=model.compute_accuracies(logits=logits, labels=next_l, dim=1)) to_restore = tf.contrib.slim.get_variables_to_restore(exclude=["embeddings"]) saver = tf.compat.v1.train.Saver(to_restore) p_counts = 0 last_p = '' p_id = 0 f_counter = 0 with tf.compat.v1.Session() as sess: init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer()) sess.run(init_op) ckpt = tf.train.get_checkpoint_state(runner.data_conf.TRAIN_DIR) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print('No checkpoint file found') _ = sess.run(runner.set_embeddings_op, feed_dict={runner.place: vectors}) coord = tf.train.Coordinator() threads = tf.compat.v1.train.start_queue_runners(sess=sess, coord=coord) if not load: accs = np.ones(shape=(len(qa))) else: accs = corr_probs for w_counter in range(0, d): words = np.zeros(shape=(len(qa)), dtype=np.int64) # select next word to optimize greedily next_inds = [] for k, question in enumerate(qa): next_word = w_choices[k].pop() next_inds.append(next_word) words[k] = w_s[k][next_word] # go through whole pool for every question next_ind = 0 for pool_counter in range(0, poolsize): total_acc = 0.0 info = "" for k, question in enumerate(qa): w_copy = [x for x in w_s[k]] print("==============") next_ind = next_inds[k] pool = pools[k][next_ind] pool_ind = pool.pop() print("setting " + str(w_s[k][next_ind]) + " to " + str(pool_ind)) w_copy[next_ind] = pool_ind info = "wordcounter: " + str(w_counter) + " - poolcounter: " + str( pool_counter) + " - question: " + str(k) print(info) acc_val, probs_val, gs_val, q_type_val, q_val, atts_val, sent_atts_val, labels_val, p_val, a_val, p_id_val = sess.run( [accuracy_example, probabs, global_step, next_q_types, next_q, atts, sent_atts, next_l, m_p, next_a, next_plot_ids], feed_dict={add_sent: w_copy}) sent = "" for word in w_copy: sent += (" " + vocab[word]) print(sent + " - acc: " + str(acc_val)) corr = np.argmax(labels_val[0]) pred_val = probs_val[0][corr] if (pred_val < accs[k]): word_s = vocab[words[k]] pool_s = vocab[pool_ind] print(pool_s + " (" + str(pred_val) + ") < " + word_s + " (" + str(accs[k]) + ")") words[k] = pool_ind accs[k] = pred_val if acc_val == 0: print( "setting" + str(k) + " to true with acc" + str(acc_val) + " and pred " + str(pred_val)) w_found[k] = True f_counter += 1 filename = '' q_s = '' for index in q_val[0]: word = (vocab[index]) q_s += (word + ' ') filename += (word + '_') predicted_probabilities = probs_val[0] labels = labels_val[0] p_id = 'test' path = runner.data_conf.EVAL_DIR + "/plots/" + p_id + "/" + filename if (p_counts < 20): for i, a_att in enumerate(atts_val[0]): # a_att = np.max(a_att, 1) qa_s = q_s + "? (acc: " + str(acc_val) + ")\n " for index in a_val[0][i]: qa_s += (vocab[index] + ' ') lv = " (label: " + str(int(labels[i])) + " - prediction: " + ( str("%.2f" % (predicted_probabilities[i] * 100))) + "%)" qa_s += lv a_sents = [] y_labels = [] for j, att in enumerate(a_att): a_s = [] y_labels.append(str("%.2f" % (sent_atts_val[0][i][j] * 100)) + "%") for index in p_val[0][j]: a_s.append(vocab[index]) a_sents.append(a_s) util.plot_attention(np.array(a_att), np.array(a_sents), qa_s, y_labels, path, filename) last_p = p_id p_counts += 1 total_acc += acc_val print(total_acc / (k + 1)) with open(examples_folder + "/accuracies.txt", "a") as file: file.write(info + " - " + str(total_acc / (len(qa))) + "\n") with open(examples_folder + "/" + str(w_counter + check_num + 1) + ".txt", "a") as file: for k, question in enumerate(qa): w_s[k][next_ind] = words[k] sent = "" for word in w_s[k]: sent += (vocab[word] + " ") file.write(sent + "\t" + str(accs[k]) + "\t" + str(w_found[k]) + "\n")
12,013
38.781457
138
py
fat-albert
fat-albert-master/abmn/src/movieqa/adversarial_addAny/eval_addAny.py
import sys import os sys.path.append('data') # os.chdir("..") import movieqa.data.data_loader as movie import core.util as util from random import randrange from random import shuffle import random import glob import tensorflow as tf import core.model as model import numpy as np import _pickle as pickle # implementation of AddAny/AddCommon from Jia and Liang 2017 def add_plot_sentence(plot, sentence): new_sent = np.zeros(shape=[1, 1, plot.shape[2]], dtype=np.int64) for i, index in enumerate(sentence): new_sent[0][0][i] = index m_plot = np.append(plot, new_sent, 1) return m_plot def run_evaluation(model_type, attack_type, model_folder, examples_folder, instances_to_attack): if model_type == "lstm": import movieqa.run_lstm as runner else: import movieqa.run_cnn as runner runner.data_conf.TRAIN_DIR = model_folder mode = attack_type sent_out_dir = examples_folder load = False check_sents = [] check_found = [] check_num = 0 corr_probs = [] if not tf.io.gfile.exists(sent_out_dir): tf.io.gfile.makedirs(sent_out_dir) else: checkpoints = glob.glob(sent_out_dir + "/[!accuracies]*") checkpoints = sorted(checkpoints, reverse=True) print(checkpoints) latest = checkpoints[0] splitted = latest.split(".txt")[0] check_num = int(splitted[len(splitted) - 1]) + 1 check = open(latest, encoding="utf8") for line in check: parts = line.replace('\n', '').split("\t") check_words = parts[0].split(" ") print(check_words) check_sents.append(check_words) last_prob = float(parts[1]) found = parts[2] if found == 'True': b_found = True else: b_found = False corr_probs.append(last_prob) check_found.append(b_found) load = True emb_dir = runner.data_conf.EMBEDDING_DIR vectors, vocab = util.load_embeddings(emb_dir) rev_vocab = dict(zip(vocab.values(), vocab.keys())) # print(rev_vocab) filename = "adversarial_addAny/common_english.txt" # length of the distractor sentence d = 10 # pool size of common words to sample from for each word in the distractor sentence poolsize = 10 common_words = {} fin = open(filename, encoding="utf8") for line in fin: word = line.replace('\n', '') # print(word) if word in rev_vocab: common_words[word] = rev_vocab[word] else: print('ERROR: word "%s" not in vocab. Run add_common_words_to_vocab.py first.' % word) exit(1) with open(instances_to_attack + '/val.pickle', 'rb') as handle: qa = pickle.load(handle) with open("question_references.txt", "w") as file: for q in qa: file.write(str(q.qid) + "\t" + q.question + "\n") w_s = [] w_choices = [] w_found = [] q_inds = [] pools = [] for k, question in enumerate(qa): # load question indices q_words = util.normalize_text(question.question) q_ind = [] for word in q_words: q_ind.append(rev_vocab[word]) a_words = [] for i, answer in enumerate(question.answers): if not i == int(question.correct_index): words = util.normalize_text(answer) a_words.extend(words) w = [] w_choice = [] rand_sent = "" for i in range(0, d): c_word = check_sents[k][i] w_index = rev_vocab[c_word] rand_sent += (c_word + " ") w.append(w_index) w_choice.append(i) found = check_found[k] w_found.append(found) w_s.append(w) print(w_s) filepath = instances_to_attack + '/*.tfrecords' filenames = glob.glob(filepath) global_step = tf.contrib.framework.get_or_create_global_step() dataset = tf.contrib.data.TFRecordDataset(filenames) dataset = dataset.map(runner.get_single_sample) dataset = dataset.repeat(poolsize * d) batch_size = 1 dataset = dataset.padded_batch(batch_size, padded_shapes=( [None], [5, None], [None], (), [None, None], ())) iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) next_q, next_a, next_l, next_plot_ids, next_plots, next_q_types = iterator.get_next() add_sent = tf.compat.v1.placeholder(tf.int64, shape=[None]) # sent_exp = tf.expand_dims(add_sent,0) m_p = tf.compat.v1.py_func(add_plot_sentence, [next_plots, add_sent], [tf.int64])[0] # m_p = next_plots # m_p = tf.concat([next_plots,sent_exp],axis=0) logits, atts, sent_atts, _ = runner.predict_batch([next_q, next_a, m_p], training=False) probabs = model.compute_probabilities(logits=logits) accuracy_example = tf.reduce_mean(input_tensor=model.compute_accuracies(logits=logits, labels=next_l, dim=1)) to_restore = tf.contrib.slim.get_variables_to_restore(exclude=["embeddings"]) saver = tf.compat.v1.train.Saver(to_restore) p_counts = 0 last_p = '' p_id = 0 f_counter = 0 with tf.compat.v1.Session() as sess: init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer()) sess.run(init_op) ckpt = tf.train.get_checkpoint_state(runner.data_conf.TRAIN_DIR) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print('No checkpoint file found') _ = sess.run(runner.set_embeddings_op, feed_dict={runner.place: vectors}) coord = tf.train.Coordinator() threads = tf.compat.v1.train.start_queue_runners(sess=sess, coord=coord) accs = corr_probs total_acc = 0.0 for k, question in enumerate(qa): w_copy = [x for x in w_s[k]] print("==============") acc_val, probs_val, gs_val, q_type_val, q_val, atts_val, sent_atts_val, labels_val, p_val, a_val, p_id_val = sess.run( [accuracy_example, probabs, global_step, next_q_types, next_q, atts, sent_atts, next_l, m_p, next_a, next_plot_ids], feed_dict={add_sent: w_copy}) sent = "" for word in w_copy: sent += (" " + vocab[word]) print(sent + " - acc: " + str(acc_val)) corr = np.argmax(labels_val[0]) pred_val = probs_val[0][corr] filename = '' q_s = '' for index in q_val[0]: word = (vocab[index]) q_s += (word + ' ') filename += (word + '_') predicted_probabilities = probs_val[0] labels = labels_val[0] p_id = 'test' path = sent_out_dir + "/plots/" + p_id + "/" + filename if (p_counts < 20): for i, a_att in enumerate(atts_val[0]): # a_att = np.max(a_att, 1) qa_s = q_s + "? (acc: " + str(acc_val) + ")\n " for index in a_val[0][i]: qa_s += (vocab[index] + ' ') lv = " (label: " + str(int(labels[i])) + " - prediction: " + ( str("%.2f" % (predicted_probabilities[i] * 100))) + "%)" qa_s += lv a_sents = [] y_labels = [] for j, att in enumerate(a_att): a_s = [] y_labels.append(str("%.2f" % (sent_atts_val[0][i][j] * 100)) + "%") for index in p_val[0][j]: a_s.append(vocab[index]) a_sents.append(a_s) # util.plot_attention(np.array(a_att), np.array(a_sents), qa_s, y_labels, path,filename) last_p = p_id p_counts += 1 total_acc += acc_val print(total_acc / (k + 1)) with open(sent_out_dir + "/accuracies_eval.txt", "a") as file: file.write(str(total_acc / (len(qa))) + "\n") return total_acc / (len(qa))
8,214
33.516807
130
py
fat-albert
fat-albert-master/abmn/src/movieqa/adversarial_addAny/add_common_words_to_vocab.py
import os import sys present_path = os.path.dirname(os.path.realpath(sys.argv[0])) sys.path.append(os.path.join(present_path, '../../')) import core.util as util import movieqa.data_conf as data_conf glove = util.loadGloveModel(data_conf.PRETRAINED_EMBEDDINGS_PATH) #vectors, vocab = util.load_embeddings(data_conf.EMBEDDING_DIR) util.restore_vocab(data_conf.EMBEDDING_DIR) print("Restored vocab") #rev_vocab = dict(zip(vocab.values(), vocab.keys())) #print("Current vocabulary %s with %d entries" % (str(rev_vocab), len(rev_vocab))) filename = "adversarial_addAny/common_english.txt" fin = open(filename, encoding="utf8") for line in fin: word = line.replace('\n', '') print("get word vector for %s" % word) vec = util.get_word_vector(glove, word, data_conf.EMBEDDING_SIZE) vsize = util.save_embeddings(data_conf.EMBEDDING_DIR, data_conf.EMBEDDING_SIZE) print("New vocabulary size %d" % vsize)
916
29.566667
82
py
fat-albert
fat-albert-master/abmn/src/movieqa/adversarial_addAny/__init__.py
0
0
0
py
fat-albert
fat-albert-master/abmn/src/ensemble_folder_test/refine.py
import numpy as np predictions = np.loadtxt('preds.txt') f = open("plot_results.txt", "a") counter = 0 for i in predictions: f.write("test:" + str(counter) + " " + str(int(i)) + "\n") counter = counter + 1
215
23
62
py
fat-albert
fat-albert-master/bert/setup.py
""" Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py To create the package for pypi. 1. Change the version in __init__.py, setup.py as well as docs/source/conf.py. 2. Commit these changes with the message: "Release: VERSION" 3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' " Push the tag to git: git push --tags origin master 4. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). For the wheel, run: "python setup.py bdist_wheel" in the top level directory. (this will build a wheel for the python version you use to build it - make sure you use python 3.x). For the sources, run: "python setup.py sdist" You should now have a /dist directory with both .whl and .tar.gz source versions. 5. Check that everything looks correct by uploading the package to the pypi test server: twine upload dist/* -r pypitest (pypi suggest using twine as other methods upload files via plaintext.) Check that you can install it in a virtualenv by running: pip install -i https://testpypi.python.org/pypi transformers 6. Upload the final version to actual pypi: twine upload dist/* -r pypi 7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory. """ from io import open from setuptools import find_packages, setup setup( name="transformers", version="2.2.0", author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors", author_email="[email protected]", description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch", long_description=open("README.md", "r", encoding='utf-8').read(), long_description_content_type="text/markdown", keywords='NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU', license='Apache', url="https://github.com/huggingface/transformers", packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), install_requires=['numpy', 'boto3', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses'], entry_points={ 'console_scripts': [ "transformers=transformers.__main__:main", ] }, # python_requires='>=3.5.0', tests_require=['pytest'], classifiers=[ 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], )
2,923
39.054795
183
py
fat-albert
fat-albert-master/bert/hubconf.py
from transformers import ( AutoTokenizer, AutoConfig, AutoModel, AutoModelWithLMHead, AutoModelForSequenceClassification, AutoModelForQuestionAnswering ) from transformers.file_utils import add_start_docstrings dependencies = ['torch', 'tqdm', 'boto3', 'requests', 'regex', 'sentencepiece', 'sacremoses'] @add_start_docstrings(AutoConfig.__doc__) def config(*args, **kwargs): r""" # Using torch.hub ! import torch config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased') # Download configuration from S3 and cache. config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')` config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/my_configuration.json') config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attention=True, foo=False) assert config.output_attention == True config, unused_kwargs = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attention=True, foo=False, return_unused_kwargs=True) assert config.output_attention == True assert unused_kwargs == {'foo': False} """ return AutoConfig.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoTokenizer.__doc__) def tokenizer(*args, **kwargs): r""" # Using torch.hub ! import torch tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', 'bert-base-uncased') # Download vocabulary from S3 and cache. tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', './test/bert_saved_model/') # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')` """ return AutoTokenizer.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoModel.__doc__) def model(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased') # Download model and configuration from S3 and cache. model = torch.hub.load('huggingface/transformers', 'model', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModel.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoModelWithLMHead.__doc__) def modelWithLMHead(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased') # Download model and configuration from S3 and cache. model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelWithLMHead.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoModelForSequenceClassification.__doc__) def modelForSequenceClassification(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased') # Download model and configuration from S3 and cache. model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelForSequenceClassification.from_pretrained(*args, **kwargs) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__) def modelForQuestionAnswering(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased') # Download model and configuration from S3 and cache. model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelForQuestionAnswering.from_pretrained(*args, **kwargs)
6,489
56.433628
189
py
fat-albert
fat-albert-master/bert/examples/run_lm_finetuning.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa). GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned using a masked language modeling (MLM) loss. """ from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import pickle import random import re import shutil import numpy as np import torch from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, BertConfig, BertForMaskedLM, BertTokenizer, GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) logger = logging.getLogger(__name__) MODEL_CLASSES = { 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer), 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) } class TextDataset(Dataset): def __init__(self, tokenizer, args, file_path='train', block_size=512): assert os.path.isfile(file_path) directory, filename = os.path.split(file_path) cached_features_file = os.path.join(directory, args.model_name_or_path + '_cached_lm_' + str(block_size) + '_' + filename) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) with open(cached_features_file, 'rb') as handle: self.examples = pickle.load(handle) else: logger.info("Creating features from dataset file at %s", directory) self.examples = [] with open(file_path, encoding="utf-8") as f: text = f.read() tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) for i in range(0, len(tokenized_text)-block_size+1, block_size): # Truncate in block of block_size self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i:i+block_size])) # Note that we are loosing the last truncated example here for the sake of simplicity (no padding) # If your dataset is small, first you should loook for a bigger one :-) and second you # can change this behavior by adding (model specific) padding. logger.info("Saving features into cached file %s", cached_features_file) with open(cached_features_file, 'wb') as handle: pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) def __len__(self): return len(self.examples) def __getitem__(self, item): return torch.tensor(self.examples[item]) def load_and_cache_examples(args, tokenizer, evaluate=False): dataset = TextDataset(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size) return dataset def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def _rotate_checkpoints(args, checkpoint_prefix, use_mtime=False): if not args.save_total_limit: return if args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) glob_checkpoints = glob.glob(os.path.join(args.output_dir, '{}-*'.format(checkpoint_prefix))) if len(glob_checkpoints) <= args.save_total_limit: return ordering_and_checkpoint_path = [] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match('.*{}-([0-9]+)'.format(checkpoint_prefix), path) if regex_match and regex_match.groups(): ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint)) shutil.rmtree(checkpoint) def mask_tokens(inputs, tokenizer, args): """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.clone() # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa) probability_matrix = torch.full(labels.shape, args.mlm_probability) special_tokens_mask = [tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()] probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[~masked_indices] = -1 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.resize_token_embeddings(len(tokenizer)) model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproducibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch) inputs = inputs.to(args.device) labels = labels.to(args.device) model.train() outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: checkpoint_prefix = 'checkpoint' # Save model checkpoint output_dir = os.path.join(args.output_dir, '{}-{}'.format(checkpoint_prefix, global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) _rotate_checkpoints(args, checkpoint_prefix) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_output_dir = args.output_dir eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 model.eval() for batch in tqdm(eval_dataloader, desc="Evaluating"): inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch) inputs = inputs.to(args.device) labels = labels.to(args.device) with torch.no_grad(): outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels) lm_loss = outputs[0] eval_loss += lm_loss.mean().item() nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps perplexity = torch.exp(torch.tensor(eval_loss)) result = { "perplexity": perplexity } output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_data_file", default=None, type=str, required=True, help="The input training data file (a text file).") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--eval_data_file", default=None, type=str, help="An optional input evaluation data file to evaluate the perplexity on (a text file).") parser.add_argument("--model_type", default="bert", type=str, help="The model architecture to be fine-tuned.") parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str, help="The model checkpoint for weights initialization.") parser.add_argument("--mlm", action='store_true', help="Train with masked-language modeling loss instead of language modeling.") parser.add_argument("--mlm_probability", type=float, default=0.15, help="Ratio of tokens to mask for masked language modeling loss") parser.add_argument("--config_name", default="", type=str, help="Optional pretrained config name or path if not the same as model_name_or_path") parser.add_argument("--tokenizer_name", default="", type=str, help="Optional pretrained tokenizer name or path if not the same as model_name_or_path") parser.add_argument("--cache_dir", default="", type=str, help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)") parser.add_argument("--block_size", default=-1, type=int, help="Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens).") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Run evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=1.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument('--save_total_limit', type=int, default=None, help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default') parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") args = parser.parse_args() if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm: raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm " "flag (masked language modeling).") if args.eval_data_file is None and args.do_eval: raise ValueError("Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file " "or remove the --do_eval argument.") if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) if args.block_size <= 0: args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model args.block_size = min(args.block_size, tokenizer.max_len_single_sentence) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) model.to(args.device) if args.local_rank == 0: torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False) if args.local_rank == 0: torch.distributed.barrier() global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
28,924
50.929982
165
py
fat-albert
fat-albert-master/bert/examples/utils_squad_evaluate.py
""" Official evaluation script for SQuAD version 2.0. Modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0 In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted probability that a question is unanswerable. """ import argparse import collections import json import numpy as np import os import re import string import sys class EVAL_OPTS(): def __init__(self, data_file, pred_file, out_file="", na_prob_file="na_prob.json", na_prob_thresh=1.0, out_image_dir=None, verbose=False): self.data_file = data_file self.pred_file = pred_file self.out_file = out_file self.na_prob_file = na_prob_file self.na_prob_thresh = na_prob_thresh self.out_image_dir = out_image_dir self.verbose = verbose OPTS = None def parse_args(): parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.') parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.') parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.') parser.add_argument('--out-file', '-o', metavar='eval.json', help='Write accuracy metrics to file (default is stdout).') parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json', help='Model estimates of probability of no answer.') parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0, help='Predict "" if no-answer probability exceeds this (default = 1.0).') parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None, help='Save precision-recall curves to directory.') parser.add_argument('--verbose', '-v', action='store_true') if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def make_qid_to_has_ans(dataset): qid_to_has_ans = {} for article in dataset: for p in article['paragraphs']: for qa in p['qas']: qid_to_has_ans[qa['id']] = bool(qa['answers']) return qid_to_has_ans def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) return re.sub(regex, ' ', text) def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return ''.join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def get_tokens(s): if not s: return [] return normalize_answer(s).split() def compute_exact(a_gold, a_pred): return int(normalize_answer(a_gold) == normalize_answer(a_pred)) def compute_f1(a_gold, a_pred): gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = collections.Counter(gold_toks) & collections.Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1 def get_raw_scores(dataset, preds): exact_scores = {} f1_scores = {} for article in dataset: for p in article['paragraphs']: for qa in p['qas']: qid = qa['id'] gold_answers = [a['text'] for a in qa['answers'] if normalize_answer(a['text'])] if not gold_answers: # For unanswerable questions, only correct answer is empty string gold_answers = [''] if qid not in preds: print('Missing prediction for %s' % qid) continue a_pred = preds[qid] # Take max over all gold answers exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) return exact_scores, f1_scores def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): new_scores = {} for qid, s in scores.items(): pred_na = na_probs[qid] > na_prob_thresh if pred_na: new_scores[qid] = float(not qid_to_has_ans[qid]) else: new_scores[qid] = s return new_scores def make_eval_dict(exact_scores, f1_scores, qid_list=None): if not qid_list: total = len(exact_scores) return collections.OrderedDict([ ('exact', 100.0 * sum(exact_scores.values()) / total), ('f1', 100.0 * sum(f1_scores.values()) / total), ('total', total), ]) else: total = len(qid_list) return collections.OrderedDict([ ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total), ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total), ('total', total), ]) def merge_eval(main_eval, new_eval, prefix): for k in new_eval: main_eval['%s_%s' % (prefix, k)] = new_eval[k] def plot_pr_curve(precisions, recalls, out_image, title): plt.step(recalls, precisions, color='b', alpha=0.2, where='post') plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b') plt.xlabel('Recall') plt.ylabel('Precision') plt.xlim([0.0, 1.05]) plt.ylim([0.0, 1.05]) plt.title(title) plt.savefig(out_image) plt.clf() def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans, out_image=None, title=None): qid_list = sorted(na_probs, key=lambda k: na_probs[k]) true_pos = 0.0 cur_p = 1.0 cur_r = 0.0 precisions = [1.0] recalls = [0.0] avg_prec = 0.0 for i, qid in enumerate(qid_list): if qid_to_has_ans[qid]: true_pos += scores[qid] cur_p = true_pos / float(i+1) cur_r = true_pos / float(num_true_pos) if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(cur_p) recalls.append(cur_r) if out_image: plot_pr_curve(precisions, recalls, out_image, title) return {'ap': 100.0 * avg_prec} def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, out_image_dir): if out_image_dir and not os.path.exists(out_image_dir): os.makedirs(out_image_dir) num_true_pos = sum(1 for v in qid_to_has_ans.values() if v) if num_true_pos == 0: return pr_exact = make_precision_recall_eval( exact_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, 'pr_exact.png'), title='Precision-Recall curve for Exact Match score') pr_f1 = make_precision_recall_eval( f1_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, 'pr_f1.png'), title='Precision-Recall curve for F1 score') oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()} pr_oracle = make_precision_recall_eval( oracle_scores, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, 'pr_oracle.png'), title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)') merge_eval(main_eval, pr_exact, 'pr_exact') merge_eval(main_eval, pr_f1, 'pr_f1') merge_eval(main_eval, pr_oracle, 'pr_oracle') def histogram_na_prob(na_probs, qid_list, image_dir, name): if not qid_list: return x = [na_probs[k] for k in qid_list] weights = np.ones_like(x) / float(len(x)) plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0)) plt.xlabel('Model probability of no-answer') plt.ylabel('Proportion of dataset') plt.title('Histogram of no-answer probability: %s' % name) plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name)) plt.clf() def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for i, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] return 100.0 * best_score / len(scores), best_thresh def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for i, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] has_ans_score, has_ans_cnt = 0, 0 for qid in qid_list: if not qid_to_has_ans[qid]: continue has_ans_cnt += 1 if qid not in scores: continue has_ans_score += scores[qid] return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) main_eval['best_exact'] = best_exact main_eval['best_exact_thresh'] = exact_thresh main_eval['best_f1'] = best_f1 main_eval['best_f1_thresh'] = f1_thresh def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans) main_eval['best_exact'] = best_exact main_eval['best_exact_thresh'] = exact_thresh main_eval['best_f1'] = best_f1 main_eval['best_f1_thresh'] = f1_thresh main_eval['has_ans_exact'] = has_ans_exact main_eval['has_ans_f1'] = has_ans_f1 def main(OPTS): with open(OPTS.data_file) as f: dataset_json = json.load(f) dataset = dataset_json['data'] with open(OPTS.pred_file) as f: preds = json.load(f) if OPTS.na_prob_file: with open(OPTS.na_prob_file) as f: na_probs = json.load(f) else: na_probs = {k: 0.0 for k in preds} qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(dataset, preds) exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh) f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh) out_eval = make_eval_dict(exact_thresh, f1_thresh) if has_ans_qids: has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids) merge_eval(out_eval, has_ans_eval, 'HasAns') if no_ans_qids: no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids) merge_eval(out_eval, no_ans_eval, 'NoAns') if OPTS.na_prob_file: find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, OPTS.out_image_dir) histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns') histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns') if OPTS.out_file: with open(OPTS.out_file, 'w') as f: json.dump(out_eval, f) else: print(json.dumps(out_eval, indent=2)) return out_eval if __name__ == '__main__': OPTS = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main(OPTS)
12,493
36.746224
107
py
fat-albert
fat-albert-master/bert/examples/run_squad.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).""" from __future__ import absolute_import, division, print_function import argparse import logging import os import random import glob import timeit import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForQuestionAnswering, BertTokenizer, XLMConfig, XLMForQuestionAnswering, XLMTokenizer, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer, DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer, AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup from utils_squad import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions, RawResultExtended, write_predictions_extended) # The follwing import is the official SQuAD evaluation script (2.0). # You can remove it from the dependencies if you are using this script outside of the library # We've added it here for automated tests (see examples/test_examples.py file) from utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ for conf in (BertConfig, XLNetConfig, XLMConfig)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer), 'xlnet': (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer), 'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer), 'distilbert': (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer), 'albert': (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer) } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 1 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'start_positions': batch[3], 'end_positions': batch[4]} if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] if args.model_type in ['xlnet', 'xlm']: inputs.update({'cls_index': batch[5], 'p_mask': batch[6]}) outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1] } if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids example_indices = batch[3] if args.model_type in ['xlnet', 'xlm']: inputs.update({'cls_index': batch[4], 'p_mask': batch[5]}) outputs = model(**inputs) for i, example_index in enumerate(example_indices): eval_feature = features[example_index.item()] unique_id = int(eval_feature.unique_id) if args.model_type in ['xlnet', 'xlm']: # XLNet uses a more complex post-processing procedure result = RawResultExtended(unique_id = unique_id, start_top_log_probs = to_list(outputs[0][i]), start_top_index = to_list(outputs[1][i]), end_top_log_probs = to_list(outputs[2][i]), end_top_index = to_list(outputs[3][i]), cls_logits = to_list(outputs[4][i])) else: result = RawResult(unique_id = unique_id, start_logits = to_list(outputs[0][i]), end_logits = to_list(outputs[1][i])) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None if args.model_type in ['xlnet', 'xlm']: # XLNet uses a more complex post-processing procedure write_predictions_extended(examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.predict_file, model.config.start_n_top, model.config.end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging) else: write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold) # Evaluate with the official SQuAD script evaluate_options = EVAL_OPTS(data_file=args.predict_file, pred_file=output_prediction_file, na_prob_file=output_null_log_odds_file) results = evaluate_on_squad(evaluate_options) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length))) if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", input_file) examples = read_squad_examples(input_file=input_file, is_training=not evaluate, version_2_with_negative=args.version_2_with_negative) features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0, pad_token_segment_id=3 if args.model_type in ['xlnet'] else 0, cls_token_at_end=True if args.model_type in ['xlnet'] else False, sequence_a_is_doc=True if args.model_type in ['xlnet'] else False) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) if evaluate: all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index, all_cls_index, all_p_mask) else: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask) if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_file", default=None, type=str, required=True, help="SQuAD json for training. E.g., train-v1.1.json") parser.add_argument("--predict_file", default=None, type=str, required=True, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.') parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.") parser.add_argument("--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.") parser.add_argument("--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.") parser.add_argument("--verbose_logging", action='store_true', help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, 'einsum') except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir, force_download=True) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint, force_download=True) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
31,780
54.175347
151
py
fat-albert
fat-albert-master/bert/examples/utils_multiple_choice.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Multiple choice fine-tuning: utilities to work with multiple choice tasks of reading comprehension """ from __future__ import absolute_import, division, print_function import logging import os import sys from io import open import json import csv import glob import tqdm from typing import List from transformers import PreTrainedTokenizer logger = logging.getLogger(__name__) class InputExample(object): """A single training/test example for multiple choice""" def __init__(self, example_id, question, contexts, endings, label=None): """Constructs a InputExample. Args: example_id: Unique id for the example. contexts: list of str. The untokenized text of the first sequence (context of corresponding question). question: string. The untokenized text of the second sequence (question). endings: list of str. multiple choice's options. Its length must be equal to contexts' length. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.example_id = example_id self.question = question self.contexts = contexts self.endings = endings self.label = label class InputFeatures(object): def __init__(self, example_id, choices_features, label ): self.example_id = example_id self.choices_features = [ { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } for input_ids, input_mask, segment_ids in choices_features ] self.label = label class DataProcessor(object): """Base class for data converters for multiple choice data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for the test set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() class RaceProcessor(DataProcessor): """Processor for the RACE data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) high = os.path.join(data_dir, 'train/high') middle = os.path.join(data_dir, 'train/middle') high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, 'train') def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) high = os.path.join(data_dir, 'dev/high') middle = os.path.join(data_dir, 'dev/middle') high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, 'dev') def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} test".format(data_dir)) high = os.path.join(data_dir, 'test/high') middle = os.path.join(data_dir, 'test/middle') high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, 'test') def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_txt(self, input_dir): lines = [] files = glob.glob(input_dir + "/*txt") for file in tqdm.tqdm(files, desc="read files"): with open(file, 'r', encoding='utf-8') as fin: data_raw = json.load(fin) data_raw["race_id"] = file lines.append(data_raw) return lines def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (_, data_raw) in enumerate(lines): race_id = "%s-%s" % (set_type, data_raw["race_id"]) article = data_raw["article"] for i in range(len(data_raw["answers"])): truth = str(ord(data_raw['answers'][i]) - ord('A')) question = data_raw['questions'][i] options = data_raw['options'][i] examples.append( InputExample( example_id=race_id, question=question, contexts=[article, article, article, article], # this is not efficient but convenient endings=[options[0], options[1], options[2], options[3]], label=truth)) return examples class SwagProcessor(DataProcessor): """Processor for the SWAG data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev") def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) raise ValueError( "For swag testing, the input file does not contain a label column. It can not be tested in current code" "setting!" ) return self._create_examples(self._read_csv(os.path.join(data_dir, "test.csv")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_csv(self, input_file): with open(input_file, 'r', encoding='utf-8') as f: reader = csv.reader(f) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines def _create_examples(self, lines: List[List[str]], type: str): """Creates examples for the training and dev sets.""" if type == "train" and lines[0][-1] != 'label': raise ValueError( "For training, the input file must contain a label column." ) examples = [ InputExample( example_id=line[2], question=line[5], # in the swag dataset, the # common beginning of each # choice is stored in "sent2". contexts = [line[4], line[4], line[4], line[4]], endings = [line[7], line[8], line[9], line[10]], label=line[11] ) for line in lines[1:] # we skip the line with the column names ] return examples class MovieQAProcessor(DataProcessor): """Processor for the MovieQA data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev") def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) raise ValueError( "For movieqa testing, the input file does not contain a label column. It can not be tested in current code" "setting!" ) return self._create_examples(self._read_csv(os.path.join(data_dir, "test.csv")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3", "4"] def _read_csv(self, input_file): with open(input_file, 'r', encoding='utf-8') as f: reader = csv.reader(f) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines def _create_examples(self, lines: List[List[str]], type: str): """Creates examples for the training and dev sets.""" if type == "train" and lines[0][-1] != 'label': raise ValueError( "For training, the input file must contain a label column." ) examples = [ InputExample( example_id=line[2], question=line[5], # in the movieqa dataset, the # common beginning of each # choice is stored in "sent2". contexts = [line[4], line[4], line[4], line[4], line[4]], endings = [line[7], line[8], line[9], line[10], line[11]], label=line[12] ) for line in lines[1:] # we skip the line with the column names ] return examples class ArcProcessor(DataProcessor): """Processor for the ARC data set (request from allennlp).""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "train.jsonl")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "dev.jsonl")), "dev") def get_test_examples(self, data_dir): logger.info("LOOKING AT {} test".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "test.jsonl")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_json(self, input_file): with open(input_file, 'r', encoding='utf-8') as fin: lines = fin.readlines() return lines def _create_examples(self, lines, type): """Creates examples for the training and dev sets.""" #There are two types of labels. They should be normalized def normalize(truth): if truth in "ABCD": return ord(truth) - ord("A") elif truth in "1234": return int(truth) - 1 else: logger.info("truth ERROR! %s", str(truth)) return None examples = [] three_choice = 0 four_choice = 0 five_choice = 0 other_choices = 0 # we deleted example which has more than or less than four choices for line in tqdm.tqdm(lines, desc="read arc data"): data_raw = json.loads(line.strip("\n")) if len(data_raw["question"]["choices"]) == 3: three_choice += 1 continue elif len(data_raw["question"]["choices"]) == 5: five_choice += 1 continue elif len(data_raw["question"]["choices"]) != 4: other_choices += 1 continue four_choice += 1 truth = str(normalize(data_raw["answerKey"])) assert truth != "None" question_choices = data_raw["question"] question = question_choices["stem"] id = data_raw["id"] options = question_choices["choices"] if len(options) == 4: examples.append( InputExample( example_id = id, question=question, contexts=[options[0]["para"].replace("_", ""), options[1]["para"].replace("_", ""), options[2]["para"].replace("_", ""), options[3]["para"].replace("_", "")], endings=[options[0]["text"], options[1]["text"], options[2]["text"], options[3]["text"]], label=truth)) if type == "train": assert len(examples) > 1 assert examples[0].label is not None logger.info("len examples: %s}", str(len(examples))) logger.info("Three choices: %s", str(three_choice)) logger.info("Five choices: %s", str(five_choice)) logger.info("Other choices: %s", str(other_choices)) logger.info("four choices: %s", str(four_choice)) return examples def convert_examples_to_features( examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer, pad_token_segment_id=0, pad_on_left=False, pad_token=0, mask_padding_with_zero=True, is_training=0, ) -> List[InputFeatures]: """ Loads a data file into a list of `InputFeatures` """ label_map = {label : i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) choices_features = [] for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)): text_a = context if example.question.find("_") != -1: # this is for cloze question text_b = example.question.replace("_", ending) else: text_b = example.question + " " + ending inputs = tokenizer.encode_plus( text_a, text_b, add_special_tokens=True, max_length=max_length, ) if 'num_truncated_tokens' in inputs and inputs['num_truncated_tokens'] > 0: logger.info('Attention! you are cropping tokens (swag task is ok). ' 'If you are training ARC and RACE and you are poping question + options,' 'you need to try to use a bigger max seq length!') input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"] # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_length assert len(attention_mask) == max_length assert len(token_type_ids) == max_length choices_features.append((input_ids, attention_mask, token_type_ids)) label = label_map[example.label] if ex_index < 2: logger.info("*** Example ***") logger.info("race_id: {}".format(example.example_id)) for choice_idx, (input_ids, attention_mask, token_type_ids) in enumerate(choices_features): logger.info("choice: {}".format(choice_idx)) logger.info("input_ids: {}".format(' '.join(map(str, input_ids)))) logger.info("attention_mask: {}".format(' '.join(map(str, attention_mask)))) logger.info("token_type_ids: {}".format(' '.join(map(str, token_type_ids)))) logger.info("label: {}".format(label)) features.append( InputFeatures( example_id=example.example_id, choices_features=choices_features, label=label, ) ) return features def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index processors = { "race": RaceProcessor, "swag": SwagProcessor, "movieqa": MovieQAProcessor, "arc": ArcProcessor } MULTIPLE_CHOICE_TASKS_NUM_LABELS = { "race", 4, "swag", 4, "movieqa", 5, "arc", 4 }
20,773
37.257827
119
py
fat-albert
fat-albert-master/bert/examples/run_glue.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa).""" from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForSequenceClassification, BertTokenizer, RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer, XLMConfig, XLMForSequenceClassification, XLMTokenizer, XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer, DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer, AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer, ) from transformers import AdamW, get_linear_schedule_with_warmup from transformers import glue_compute_metrics as compute_metrics from transformers import glue_output_modes as output_modes from transformers import glue_processors as processors from transformers import glue_convert_examples_to_features as convert_examples_to_features logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig, RobertaConfig, DistilBertConfig)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForSequenceClassification, BertTokenizer), 'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer), 'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer), 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer), 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer), 'albert': (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer) } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(task))) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']: # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) features = convert_examples_to_features(examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
28,343
52.278195
158
py
fat-albert
fat-albert-master/bert/examples/benchmarks.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Benchmarking the library on inference and training """ # If checking the tensors placement # tf.debugging.set_log_device_placement(True) from typing import List import timeit from transformers import is_tf_available, is_torch_available from time import time import argparse import csv if is_tf_available(): import tensorflow as tf from transformers import TFAutoModel if is_torch_available(): import torch from transformers import AutoModel from transformers import AutoConfig, AutoTokenizer input_text = """Bent over their instruments, three hundred Fertilizers were plunged, as the Director of Hatcheries and Conditioning entered the room, in the scarcely breathing silence, the absent-minded, soliloquizing hum or whistle, of absorbed concentration. A troop of newly arrived students, very young, pink and callow, followed nervously, rather abjectly, at the Director's heels. Each of them carried a notebook, in which, whenever the great man spoke, he desperately scribbled. Straight from the horse's mouth. It was a rare privilege. The D. H. C. for Central London always made a point of personally conducting his new students round the various departments. "Just to give you a general idea," he would explain to them. For of course some sort of general idea they must have, if they were to do their work intelligently-though as little of one, if they were to be good and happy members of society, as possible. For particulars, as every one knows, make for virtue and happiness; generalities are intellectu- ally necessary evils. Not philosophers but fret-sawyers and stamp col- lectors compose the backbone of society. "To-morrow," he would add, smiling at them with a slightly menacing geniality, "you'll be settling down to serious work. You won't have time for generalities. Meanwhile ..." Meanwhile, it was a privilege. Straight from the horse's mouth into the notebook. The boys scribbled like mad. Tall and rather thin but upright, the Director advanced into the room. He had a long chin and big rather prominent teeth, just covered, when he was not talking, by his full, floridly curved lips. Old, young? Thirty? Fifty? Fifty-five? It was hard to say. And anyhow the question didn't arise; in this year of stability, A. F. 632, it didn't occur to you to ask it. "I shall begin at the beginning," said the D.H.C. and the more zealous students recorded his intention in their notebooks: Begin at the begin- ning. "These," he waved his hand, "are the incubators." And opening an insulated door he showed them racks upon racks of numbered test- tubes. "The week's supply of ova. Kept," he explained, "at blood heat; whereas the male gametes," and here he opened another door, "they have to be kept at thirty-five instead of thirty-seven. Full blood heat sterilizes." Rams wrapped in theremogene beget no lambs. Still leaning against the incubators he gave them, while the pencils scurried illegibly across the pages, a brief description of the modern fertilizing process; spoke first, of course, of its surgical introduc- tion-"the operation undergone voluntarily for the good of Society, not to mention the fact that it carries a bonus amounting to six months' salary"; continued with some account of the technique for preserving the excised ovary alive and actively developing; passed on to a consid- eration of optimum temperature, salinity, viscosity; referred to the liq- uor in which the detached and ripened eggs were kept; and, leading his charges to the work tables, actually showed them how this liquor was drawn off from the test-tubes; how it was let out drop by drop onto the specially warmed slides of the microscopes; how the eggs which it contained were inspected for abnormalities, counted and transferred to a porous receptacle; how (and he now took them to watch the operation) this receptacle was immersed in a warm bouillon containing free-swimming spermatozoa-at a minimum concentration of one hundred thousand per cubic centimetre, he insisted; and how, after ten minutes, the container was lifted out of the liquor and its contents re-examined; how, if any of the eggs remained unfertilized, it was again immersed, and, if necessary, yet again; how the fertilized ova went back to the incubators; where the Alphas and Betas re- mained until definitely bottled; while the Gammas, Deltas and Epsilons were brought out again, after only thirty-six hours, to undergo Bo- kanovsky's Process. "Bokanovsky's Process," repeated the Director, and the students un- derlined the words in their little notebooks. One egg, one embryo, one adult-normality. But a bokanovskified egg will bud, will proliferate, will divide. From eight to ninety-six buds, and every bud will grow into a perfectly formed embryo, and every embryo into a full-sized adult. Making ninety-six human beings grow where only one grew before. Progress. "Essentially," the D.H.C. concluded, "bokanovskification consists of a series of arrests of development. We check the normal growth and, paradoxically enough, the egg responds by budding." Responds by budding. The pencils were busy. He pointed. On a very slowly moving band a rack-full of test-tubes was entering a large metal box, another, rack-full was emerging. Machinery faintly purred. It took eight minutes for the tubes to go through, he told them. Eight minutes of hard X-rays being about as much as an egg can stand. A few died; of the rest, the least susceptible divided into two; most put out four buds; some eight; all were returned to the incubators, where the buds began to develop; then, after two days, were suddenly chilled, chilled and checked. Two, four, eight, the buds in their turn budded; and having budded were dosed almost to death with alcohol; consequently burgeoned again and having budded-bud out of bud out of bud-were thereafter-further arrest being generally fatal-left to develop in peace. By which time the original egg was in a fair way to becoming anything from eight to ninety-six embryos- a prodigious improvement, you will agree, on nature. Identical twins-but not in piddling twos and threes as in the old viviparous days, when an egg would sometimes accidentally divide; actually by dozens, by scores at a time. "Scores," the Director repeated and flung out his arms, as though he were distributing largesse. "Scores." But one of the students was fool enough to ask where the advantage lay. "My good boy!" The Director wheeled sharply round on him. "Can't you see? Can't you see?" He raised a hand; his expression was solemn. "Bokanovsky's Process is one of the major instruments of social stabil- ity!" Major instruments of social stability. Standard men and women; in uniform batches. The whole of a small factory staffed with the products of a single bokanovskified egg. "Ninety-six identical twins working ninety-six identical machines!" The voice was almost tremulous with enthusiasm. "You really know where you are. For the first time in history." He quoted the planetary motto. "Community, Identity, Stability." Grand words. "If we could bo- kanovskify indefinitely the whole problem would be solved." Solved by standard Gammas, unvarying Deltas, uniform Epsilons. Mil- lions of identical twins. The principle of mass production at last applied to biology. "But, alas," the Director shook his head, "we can't bokanovskify indefi- nitely." Ninety-six seemed to be the limit; seventy-two a good average. From the same ovary and with gametes of the same male to manufacture as many batches of identical twins as possible-that was the best (sadly a second best) that they could do. And even that was difficult. "For in nature it takes thirty years for two hundred eggs to reach ma- turity. But our business is to stabilize the population at this moment, here and now. Dribbling out twins over a quarter of a century-what would be the use of that?" Obviously, no use at all. But Podsnap's Technique had immensely ac- celerated the process of ripening. They could make sure of at least a hundred and fifty mature eggs within two years. Fertilize and bo- kanovskify-in other words, multiply by seventy-two-and you get an average of nearly eleven thousand brothers and sisters in a hundred and fifty batches of identical twins, all within two years of the same age. "And in exceptional cases we can make one ovary yield us over fifteen thousand adult individuals." Beckoning to a fair-haired, ruddy young man who happened to be passing at the moment. "Mr. Foster," he called. The ruddy young man approached. "Can you tell us the record for a single ovary, Mr. Foster?" "Sixteen thousand and twelve in this Centre," Mr. Foster replied with- out hesitation. He spoke very quickly, had a vivacious blue eye, and took an evident pleasure in quoting figures. "Sixteen thousand and twelve; in one hundred and eighty-nine batches of identicals. But of course they've done much better," he rattled on, "in some of the tropi- cal Centres. Singapore has often produced over sixteen thousand five hundred; and Mombasa has actually touched the seventeen thousand mark. But then they have unfair advantages. You should see the way a negro ovary responds to pituitary! It's quite astonishing, when you're used to working with European material. Still," he added, with a laugh (but the light of combat was in his eyes and the lift of his chin was challenging), "still, we mean to beat them if we can. I'm working on a wonderful Delta-Minus ovary at this moment. Only just eighteen months old. Over twelve thousand seven hundred children already, ei- ther decanted or in embryo. And still going strong. We'll beat them yet." "That's the spirit I like!" cried the Director, and clapped Mr. Foster on the shoulder. "Come along with us, and give these boys the benefit of your expert knowledge." Mr. Foster smiled modestly. "With pleasure." They went. In the Bottling Room all was harmonious bustle and ordered activity. Flaps of fresh sow's peritoneum ready cut to the proper size came shooting up in little lifts from the Organ Store in the sub-basement. Whizz and then, click! the lift-hatches hew open; the bottle-liner had only to reach out a hand, take the flap, insert, smooth-down, and be- fore the lined bottle had had time to travel out of reach along the end- less band, whizz, click! another flap of peritoneum had shot up from the depths, ready to be slipped into yet another bottle, the next of that slow interminable procession on the band. Next to the Liners stood the Matriculators. The procession advanced; one by one the eggs were transferred from their test-tubes to the larger containers; deftly the peritoneal lining was slit, the morula dropped into place, the saline solution poured in ... and already the bottle had passed, and it was the turn of the labellers. Heredity, date of fertilization, membership of Bokanovsky Group-details were trans- ferred from test-tube to bottle. No longer anonymous, but named, identified, the procession marched slowly on; on through an opening in the wall, slowly on into the Social Predestination Room. "Eighty-eight cubic metres of card-index," said Mr. Foster with relish, as they entered.""" def create_setup_and_compute(model_names: List[str], gpu: bool = True, tensorflow: bool = False, average_over: int = 3, torchscript: bool = False, xla: bool = False, amp: bool = False, fp16: bool = False, save_to_csv: bool = False, csv_filename: str = f"results_{round(time())}.csv"): if xla: tf.config.optimizer.set_jit(True) if amp: tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True}) if tensorflow: dictionary = {model_name: {} for model_name in model_names} results = _compute_tensorflow(model_names, dictionary, average_over, amp) else: device = 'cuda' if (gpu and torch.cuda.is_available()) else 'cpu' dictionary = {model_name: {} for model_name in model_names} results = _compute_pytorch(model_names, dictionary, average_over, device, torchscript, fp16) print("=========== RESULTS ===========") for model_name in model_names: print("\t" + f"======= MODEL CHECKPOINT: {model_name} =======") for batch_size in results[model_name]["bs"]: print("\t\t" + f"===== BATCH SIZE: {batch_size} =====") for slice_size in results[model_name]["ss"]: result = results[model_name]['results'][batch_size][slice_size] if isinstance(result, str): print(f"\t\t{model_name}/{batch_size}/{slice_size}: " f"{result}") else: print(f"\t\t{model_name}/{batch_size}/{slice_size}: " f"{(round(1000 * result) / 1000)}" f"s") if save_to_csv: with open(csv_filename, mode='w') as csv_file: fieldnames = ['model', '1x8', '1x64', '1x128', '1x256', '1x512', '1x1024', '2x8', '2x64', '2x128', '2x256', '2x512', '2x1024', '4x8', '4x64', '4x128', '4x256', '4x512', '4x1024', '8x8', '8x64', '8x128', '8x256', '8x512', '8x1024', ] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for model_name in model_names: model_results = { f'{bs}x{ss}': results[model_name]['results'][bs][ss] for bs in results[model_name]["results"] for ss in results[model_name]['results'][bs] } writer.writerow({'model': model_name, **model_results}) def _compute_pytorch(model_names, dictionary, average_over, device, torchscript, fp16): for c, model_name in enumerate(model_names): print(f"{c + 1} / {len(model_names)}") config = AutoConfig.from_pretrained(model_name, torchscript=torchscript) model = AutoModel.from_pretrained(model_name, config=config) tokenizer = AutoTokenizer.from_pretrained(model_name) tokenized_sequence = tokenizer.encode(input_text, add_special_tokens=False) max_input_size = tokenizer.max_model_input_sizes[model_name] batch_sizes = [1, 2, 4, 8] slice_sizes = [8, 64, 128, 256, 512, 1024] dictionary[model_name] = {"bs": batch_sizes, "ss": slice_sizes, "results": {}} dictionary[model_name]["results"] = {i: {} for i in batch_sizes} for batch_size in batch_sizes: if fp16: model.half() model.to(device) model.eval() for slice_size in slice_sizes: if max_input_size is not None and slice_size > max_input_size: dictionary[model_name]["results"][batch_size][slice_size] = "N/A" else: sequence = torch.tensor(tokenized_sequence[:slice_size], device=device).repeat(batch_size, 1) try: if torchscript: print("Tracing model with sequence size", sequence.shape) inference = torch.jit.trace(model, sequence) inference(sequence) else: inference = model inference(sequence) print("Going through model with sequence of shape", sequence.shape) runtimes = timeit.repeat(lambda: inference(sequence), repeat=average_over, number=3) average_time = sum(runtimes)/float(len(runtimes)) / 3.0 dictionary[model_name]["results"][batch_size][slice_size] = average_time except RuntimeError as e: print("Doesn't fit on GPU.", e) torch.cuda.empty_cache() dictionary[model_name]["results"][batch_size][slice_size] = "N/A" return dictionary def _compute_tensorflow(model_names, dictionary, average_over, amp): for c, model_name in enumerate(model_names): print(f"{c + 1} / {len(model_names)}") config = AutoConfig.from_pretrained(model_name) model = TFAutoModel.from_pretrained(model_name, config=config) tokenizer = AutoTokenizer.from_pretrained(model_name) tokenized_sequence = tokenizer.encode(input_text, add_special_tokens=False) max_input_size = tokenizer.max_model_input_sizes[model_name] batch_sizes = [1, 2, 4, 8] slice_sizes = [8, 64, 128, 256, 512, 1024] dictionary[model_name] = {"bs": batch_sizes, "ss": slice_sizes, "results": {}} dictionary[model_name]["results"] = {i: {} for i in batch_sizes} print("Using model", model) @tf.function def inference(inputs): return model(inputs) for batch_size in batch_sizes: for slice_size in slice_sizes: if max_input_size is not None and slice_size > max_input_size: dictionary[model_name]["results"][batch_size][slice_size] = "N/A" else: sequence = tf.stack([tf.squeeze(tf.constant(tokenized_sequence[:slice_size])[None, :])] * batch_size) try: print("Going through model with sequence of shape", sequence.shape) # To make sure that the model is traced + that the tensors are on the appropriate device inference(sequence) runtimes = timeit.repeat(lambda: inference(sequence), repeat=average_over, number=3) average_time = sum(runtimes)/float(len(runtimes)) / 3.0 dictionary[model_name]["results"][batch_size][slice_size] = average_time except tf.errors.ResourceExhaustedError as e: print("Doesn't fit on GPU.", e) torch.cuda.empty_cache() dictionary[model_name]["results"][batch_size][slice_size] = "N/A" return dictionary def main(): parser = argparse.ArgumentParser() parser.add_argument("--models", required=False, type=str, default='all', help="Model checkpoints to be provided " "to the AutoModel classes. Leave " "blank to benchmark the base version " "of all available model " "architectures.") parser.add_argument("--torch", required=False, action="store_true", help="Benchmark the Pytorch version of the " "models") parser.add_argument("--torch_cuda", required=False, action="store_true", help="Pytorch only: run on available " "cuda devices") parser.add_argument("--torchscript", required=False, action="store_true", help="Pytorch only: trace the models " "using torchscript") parser.add_argument("--tensorflow", required=False, action="store_true", help="Benchmark the TensorFlow version " "of the models. Will run on GPU if " "the correct dependencies are " "installed") parser.add_argument("--xla", required=False, action="store_true", help="TensorFlow only: use XLA acceleration.") parser.add_argument("--amp", required=False, action="store_true", help="TensorFlow only: use automatic mixed precision acceleration.") parser.add_argument("--fp16", required=False, action="store_true", help="PyTorch only: use FP16 to accelerate inference.") parser.add_argument("--keras_predict", required=False, action="store_true", help="Whether to use model.predict " "instead of model() to do a " "forward pass.") parser.add_argument("--save_to_csv", required=False, action="store_true", help="Save to a CSV file.") parser.add_argument("--csv_filename", required=False, default=None, help="CSV filename used if saving results to csv.") parser.add_argument("--average_over", required=False, default=30, type=int, help="Times an experiment will be run.") args = parser.parse_args() if args.models == 'all': args.models = [ "gpt2", "bert-base-cased", "xlnet-base-cased", "xlm-mlm-en-2048", "transfo-xl-wt103", "openai-gpt", "distilbert-base-uncased", "distilgpt2", "roberta-base", "ctrl" ] else: args.models = args.models.split() print("Running with arguments", args) if args.torch: if is_torch_available(): create_setup_and_compute( model_names=args.models, tensorflow=False, gpu=args.torch_cuda, torchscript=args.torchscript, fp16=args.fp16, save_to_csv=args.save_to_csv, csv_filename=args.csv_filename, average_over=args.average_over ) else: raise ImportError("Trying to run a PyTorch benchmark but PyTorch was not found in the environment.") if args.tensorflow: if is_tf_available(): create_setup_and_compute( model_names=args.models, tensorflow=True, xla=args.xla, amp=args.amp, save_to_csv=args.save_to_csv, csv_filename=args.csv_filename, average_over=args.average_over ) else: raise ImportError("Trying to run a TensorFlow benchmark but TensorFlow was not found in the environment.") if __name__ == '__main__': main()
23,631
48.439331
138
py
fat-albert
fat-albert-master/bert/examples/run_summarization_finetuning.py
# coding=utf-8 # Copyright 2019 The HuggingFace Inc. team. # Copyright (c) 2019 The HuggingFace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning seq2seq models for sequence generation.""" import argparse import functools import logging import os import random import sys import numpy as np from tqdm import tqdm, trange import torch from torch.optim import Adam from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from transformers import ( AutoTokenizer, BertForMaskedLM, BertConfig, PreTrainedEncoderDecoder, Model2Model, ) from utils_summarization import ( CNNDailyMailDataset, encode_for_summarization, fit_to_block_size, build_lm_labels, build_mask, compute_token_type_ids, ) logger = logging.getLogger(__name__) logging.basicConfig(stream=sys.stdout, level=logging.INFO) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) # ------------ # Load dataset # ------------ def load_and_cache_examples(args, tokenizer): dataset = CNNDailyMailDataset(tokenizer, data_dir=args.data_dir) return dataset def collate(data, tokenizer, block_size): """ List of tuple as an input. """ # remove the files with empty an story/summary, encode and fit to block data = filter(lambda x: not (len(x[0]) == 0 or len(x[1]) == 0), data) data = [ encode_for_summarization(story, summary, tokenizer) for story, summary in data ] data = [ ( fit_to_block_size(story, block_size, tokenizer.pad_token_id), fit_to_block_size(summary, block_size, tokenizer.pad_token_id), ) for story, summary in data ] stories = torch.tensor([story for story, summary in data]) summaries = torch.tensor([summary for story, summary in data]) encoder_token_type_ids = compute_token_type_ids(stories, tokenizer.cls_token_id) encoder_mask = build_mask(stories, tokenizer.pad_token_id) decoder_mask = build_mask(summaries, tokenizer.pad_token_id) lm_labels = build_lm_labels(summaries, tokenizer.pad_token_id) return ( stories, summaries, encoder_token_type_ids, encoder_mask, decoder_mask, lm_labels, ) # ---------- # Optimizers # ---------- class BertSumOptimizer(object): """ Specific optimizer for BertSum. As described in [1], the authors fine-tune BertSum for abstractive summarization using two Adam Optimizers with different warm-up steps and learning rate. They also use a custom learning rate scheduler. [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." arXiv preprint arXiv:1908.08345 (2019). """ def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8): self.encoder = model.encoder self.decoder = model.decoder self.lr = lr self.warmup_steps = warmup_steps self.optimizers = { "encoder": Adam( model.encoder.parameters(), lr=lr["encoder"], betas=(beta_1, beta_2), eps=eps, ), "decoder": Adam( model.decoder.parameters(), lr=lr["decoder"], betas=(beta_1, beta_2), eps=eps, ), } self._step = 0 def _update_rate(self, stack): return self.lr[stack] * min( self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-0.5) ) def zero_grad(self): self.optimizer_decoder.zero_grad() self.optimizer_encoder.zero_grad() def step(self): self._step += 1 for stack, optimizer in self.optimizers.items(): new_rate = self._update_rate(stack) for param_group in optimizer.param_groups: param_group["lr"] = new_rate optimizer.step() # ------------ # Train # ------------ def train(args, model, tokenizer): """ Fine-tune the pretrained model on the corpus. """ set_seed(args) # Load the data args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_dataset = load_and_cache_examples(args, tokenizer) train_sampler = RandomSampler(train_dataset) model_collate_fn = functools.partial(collate, tokenizer=tokenizer, block_size=512) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=model_collate_fn, ) # Training schedule if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = t_total // ( len(train_dataloader) // args.gradient_accumulation_steps + 1 ) else: t_total = ( len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs ) # Prepare the optimizer lr = {"encoder": 0.002, "decoder": 0.2} warmup_steps = {"encoder": 20000, "decoder": 10000} optimizer = BertSumOptimizer(model, lr, warmup_steps) # Train logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info( " Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size ) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps # * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) model.zero_grad() train_iterator = trange(args.num_train_epochs, desc="Epoch", disable=True) global_step = 0 tr_loss = 0.0 for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True) for step, batch in enumerate(epoch_iterator): source, target, encoder_token_type_ids, encoder_mask, decoder_mask, lm_labels = batch source = source.to(args.device) target = target.to(args.device) encoder_token_type_ids = encoder_token_type_ids.to(args.device) encoder_mask = encoder_mask.to(args.device) decoder_mask = decoder_mask.to(args.device) lm_labels = lm_labels.to(args.device) model.train() outputs = model( source, target, encoder_token_type_ids=encoder_token_type_ids, encoder_attention_mask=encoder_mask, decoder_attention_mask=decoder_mask, decoder_lm_labels=lm_labels, ) loss = outputs[0] print(loss) if args.gradient_accumulation_steps > 1: loss /= args.gradient_accumulation_steps loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() model.zero_grad() global_step += 1 if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return global_step, tr_loss / global_step # ------------ # Train # ------------ def evaluate(args, model, tokenizer, prefix=""): set_seed(args) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True) eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size ) # multi-gpu evaluate if args.n_gpu > 1: model = torch.nn.DataParallel(model) logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 model.eval() for batch in tqdm(eval_dataloader, desc="Evaluating"): source, target, encoder_token_type_ids, encoder_mask, decoder_mask, lm_labels = batch source = source.to(args.device) target = target.to(args.device) encoder_token_type_ids = encoder_token_type_ids.to(args.device) encoder_mask = encoder_mask.to(args.device) decoder_mask = decoder_mask.to(args.device) lm_labels = lm_labels.to(args.device) with torch.no_grad(): outputs = model( source, target, encoder_token_type_ids=encoder_token_type_ids, encoder_attention_mask=encoder_mask, decoder_attention_mask=decoder_mask, decoder_lm_labels=lm_labels, ) lm_loss = outputs[0] eval_loss += lm_loss.mean().item() nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps perplexity = torch.exp(torch.tensor(eval_loss)) result = {"perplexity": perplexity} # Save the evaluation's results output_eval_file = os.path.join(args.output_dir, "eval_results.txt") if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input training data file (a text file).", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) # Optional parameters parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--do_evaluate", type=bool, default=False, help="Run model evaluation on out-of-sample data.", ) parser.add_argument("--do_train", type=bool, default=False, help="Run training.") parser.add_argument( "--do_overwrite_output_dir", type=bool, default=False, help="Whether to overwrite the output dir.", ) parser.add_argument( "--model_name_or_path", default="bert-base-cased", type=str, help="The model checkpoint to initialize the encoder and decoder's weights with.", ) parser.add_argument( "--model_type", default="bert", type=str, help="The decoder architecture to be fine-tuned.", ) parser.add_argument( "--max_grad_norm", default=1.0, type=float, help="Max gradient norm." ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument( "--to_cpu", default=False, type=bool, help="Whether to force training on CPU." ) parser.add_argument( "--num_train_epochs", default=10, type=int, help="Total number of training epochs to perform.", ) parser.add_argument( "--per_gpu_train_batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.", ) parser.add_argument("--seed", default=42, type=int) args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.do_overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --do_overwrite_output_dir to overwrite.".format( args.output_dir ) ) # Set up training device if args.to_cpu or not torch.cuda.is_available(): args.device = torch.device("cpu") args.n_gpu = 0 else: args.device = torch.device("cuda") args.n_gpu = torch.cuda.device_count() # Load pretrained model and tokenizer. The decoder's weights are randomly initialized. tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) config = BertConfig.from_pretrained(args.model_name_or_path) decoder_model = BertForMaskedLM(config) model = Model2Model.from_pretrained( args.model_name_or_path, decoder_model=decoder_model ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", 0, args.device, args.n_gpu, False, False, ) logger.info("Training/evaluation parameters %s", args) # Train the model model.to(args.device) if args.do_train: global_step, tr_loss = train(args, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) torch.save(args, os.path.join(args.output_dir, "training_arguments.bin")) # Evaluate the model results = {} if args.do_evaluate: checkpoints = [] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: encoder_checkpoint = os.path.join(checkpoint, "encoder") decoder_checkpoint = os.path.join(checkpoint, "decoder") model = PreTrainedEncoderDecoder.from_pretrained( encoder_checkpoint, decoder_checkpoint ) model.to(args.device) results = "placeholder" return results if __name__ == "__main__": main()
15,727
30.902637
120
py
fat-albert
fat-albert-master/bert/examples/run_bertology.py
#!/usr/bin/env python3 # Copyright 2018 CMU and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Bertology: this script shows how you can explore the internals of the models in the library to: - compute the entropy of the head attentions - compute the importance of each head - prune (remove) the low importance head. Some parts of this script are adapted from the code of Michel et al. (http://arxiv.org/abs/1905.10650) which is available at https://github.com/pmichel31415/are-16-heads-really-better-than-1 """ import os import argparse import logging from datetime import timedelta, datetime from tqdm import tqdm import numpy as np import torch from torch.utils.data import DataLoader, SequentialSampler, TensorDataset, Subset from torch.utils.data.distributed import DistributedSampler from torch.nn import CrossEntropyLoss, MSELoss from transformers import (WEIGHTS_NAME, BertConfig, BertForSequenceClassification, BertTokenizer, XLMConfig, XLMForSequenceClassification, XLMTokenizer, XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer) from run_glue import set_seed, load_and_cache_examples, ALL_MODELS, MODEL_CLASSES from transformers import glue_compute_metrics as compute_metrics from transformers import glue_output_modes as output_modes from transformers import glue_processors as processors logger = logging.getLogger(__name__) def entropy(p): """ Compute the entropy of a probability distribution """ plogp = p * torch.log(p) plogp[p == 0] = 0 return -plogp.sum(dim=-1) def print_2d_tensor(tensor): """ Print a 2D tensor """ logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor)))) for row in range(len(tensor)): if tensor.dtype != torch.long: logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data)) else: logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data)) def compute_heads_importance(args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None): """ This method shows how to compute: - head attention entropy - head importance scores according to http://arxiv.org/abs/1905.10650 """ # Prepare our tensors n_layers, n_heads = model.bert.config.num_hidden_layers, model.bert.config.num_attention_heads head_importance = torch.zeros(n_layers, n_heads).to(args.device) attn_entropy = torch.zeros(n_layers, n_heads).to(args.device) if head_mask is None: head_mask = torch.ones(n_layers, n_heads).to(args.device) head_mask.requires_grad_(requires_grad=True) preds = None labels = None tot_tokens = 0.0 for step, batch in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): batch = tuple(t.to(args.device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) outputs = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids, head_mask=head_mask) loss, logits, all_attentions = outputs[0], outputs[1], outputs[-1] # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask if compute_entropy: for layer, attn in enumerate(all_attentions): masked_entropy = entropy(attn.detach()) * input_mask.float().unsqueeze(1) attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() # Also store our logits/labels if we want to compute metrics afterwards if preds is None: preds = logits.detach().cpu().numpy() labels = label_ids.detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) labels = np.append(labels, label_ids.detach().cpu().numpy(), axis=0) tot_tokens += input_mask.float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: exponent = 2 norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1/exponent) head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20 if not args.dont_normalize_global_importance: head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print/save matrices np.save(os.path.join(args.output_dir, 'attn_entropy.npy'), attn_entropy.detach().cpu().numpy()) np.save(os.path.join(args.output_dir, 'head_importance.npy'), head_importance.detach().cpu().numpy()) logger.info("Attention entropies") print_2d_tensor(attn_entropy) logger.info("Head importance scores") print_2d_tensor(head_importance) logger.info("Head ranked by importance scores") head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device) head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange(head_importance.numel(), device=args.device) head_ranks = head_ranks.view_as(head_importance) print_2d_tensor(head_ranks) return attn_entropy, head_importance, preds, labels def mask_heads(args, model, eval_dataloader): """ This method shows how to mask head (set some heads to zero), to test the effect on the network, based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650) """ _, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) original_score = compute_metrics(args.task_name, preds, labels)[args.metric_name] logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold) new_head_mask = torch.ones_like(head_importance) num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount)) current_score = original_score while current_score >= original_score * args.masking_threshold: head_mask = new_head_mask.clone() # save current head mask # heads from least important to most - keep only not-masked heads head_importance[head_mask == 0.0] = float('Inf') current_heads_to_mask = head_importance.view(-1).sort()[1] if len(current_heads_to_mask) <= num_to_mask: break # mask heads current_heads_to_mask = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist())) new_head_mask = new_head_mask.view(-1) new_head_mask[current_heads_to_mask] = 0.0 new_head_mask = new_head_mask.view_as(head_mask) print_2d_tensor(new_head_mask) # Compute metric and head importance again _, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) current_score = compute_metrics(args.task_name, preds, labels)[args.metric_name] logger.info("Masking: current score: %f, remaning heads %d (%.1f percents)", current_score, new_head_mask.sum(), new_head_mask.sum()/new_head_mask.numel() * 100) logger.info("Final head mask") print_2d_tensor(head_mask) np.save(os.path.join(args.output_dir, 'head_mask.npy'), head_mask.detach().cpu().numpy()) return head_mask def prune_heads(args, model, eval_dataloader, head_mask): """ This method shows how to prune head (remove heads weights) based on the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650) """ # Try pruning and test time speedup # Pruning is like masking but we actually remove the masked weights before_time = datetime.now() _, _, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) score_masking = compute_metrics(args.task_name, preds, labels)[args.metric_name] original_time = datetime.now() - before_time original_num_params = sum(p.numel() for p in model.parameters()) heads_to_prune = dict((layer, (1 - head_mask[layer].long()).nonzero().tolist()) for layer in range(len(head_mask))) assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item() model.prune_heads(heads_to_prune) pruned_num_params = sum(p.numel() for p in model.parameters()) before_time = datetime.now() _, _, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=None) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) score_pruning = compute_metrics(args.task_name, preds, labels)[args.metric_name] new_time = datetime.now() - before_time logger.info("Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)", original_num_params, pruned_num_params, pruned_num_params/original_num_params * 100) logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning) logger.info("Pruning: speed ratio (new timing / original timing): %f percents", original_time/new_time * 100) def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join( ALL_MODELS)) parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name_or_path") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name_or_path") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--data_subset", type=int, default=-1, help="If > 0: limit the data to a subset of data_subset instances.") parser.add_argument("--overwrite_output_dir", action='store_true', help="Whether to overwrite data in output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument("--dont_normalize_importance_by_layer", action='store_true', help="Don't normalize importance score by layers") parser.add_argument("--dont_normalize_global_importance", action='store_true', help="Don't normalize all importance scores between 0 and 1") parser.add_argument("--try_masking", action='store_true', help="Whether to try to mask head until a threshold of accuracy.") parser.add_argument("--masking_threshold", default=0.9, type=float, help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).") parser.add_argument("--masking_amount", default=0.1, type=float, help="Amount to heads to masking at each masking step.") parser.add_argument("--metric_name", default="acc", type=str, help="Metric to use for head masking.") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded.") parser.add_argument("--batch_size", default=1, type=int, help="Batch size.") parser.add_argument("--seed", type=int, default=42) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) args.device = torch.device("cuda", args.local_rank) args.n_gpu = 1 torch.distributed.init_process_group(backend='nccl') # Initializes the distributed backend # Setup logging logging.basicConfig(level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, args.n_gpu, bool(args.local_rank != -1))) # Set seeds set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = "" for key in MODEL_CLASSES: if key in args.model_name_or_path.lower(): args.model_type = key # take the first match in model types break config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, output_attentions=True, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab # Distributed and parallel training model.to(args.device) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) elif args.n_gpu > 1: model = torch.nn.DataParallel(model) # Print/save training arguments torch.save(args, os.path.join(args.output_dir, 'run_args.bin')) logger.info("Training/evaluation parameters %s", args) # Prepare dataset for the GLUE task eval_data = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=True) if args.data_subset > 0: eval_data = Subset(eval_data, list(range(min(args.data_subset, len(eval_data))))) eval_sampler = SequentialSampler(eval_data) if args.local_rank == -1 else DistributedSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size) # Compute head entropy and importance score compute_heads_importance(args, model, eval_dataloader) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: head_mask = mask_heads(args, model, eval_dataloader) prune_heads(args, model, eval_dataloader, head_mask) if __name__ == '__main__': main()
18,901
51.798883
177
py
fat-albert
fat-albert-master/bert/examples/utils_summarization_test.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from utils_summarization import ( compute_token_type_ids, fit_to_block_size, build_mask, build_lm_labels, process_story, ) class SummarizationDataProcessingTest(unittest.TestCase): def setUp(self): self.block_size = 10 def test_fit_to_block_sequence_too_small(self): """ Pad the sequence with 0 if the sequence is smaller than the block size.""" sequence = [1, 2, 3, 4] expected_output = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual( fit_to_block_size(sequence, self.block_size, 0), expected_output ) def test_fit_to_block_sequence_fit_exactly(self): """ Do nothing if the sequence is the right size. """ sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual( fit_to_block_size(sequence, self.block_size, 0), expected_output ) def test_fit_to_block_sequence_too_big(self): """ Truncate the sequence if it is too long. """ sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual( fit_to_block_size(sequence, self.block_size, 0), expected_output ) def test_process_story_no_highlights(self): """ Processing a story with no highlights returns an empty list for the summary. """ raw_story = """It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.""" _, summary_lines = process_story(raw_story) self.assertEqual(summary_lines, []) def test_process_empty_story(self): """ An empty story returns an empty collection of lines. """ raw_story = "" story_lines, summary_lines = process_story(raw_story) self.assertEqual(story_lines, []) self.assertEqual(summary_lines, []) def test_process_story_with_missing_period(self): raw_story = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) story_lines, summary_lines = process_story(raw_story) expected_story_lines = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(expected_story_lines, story_lines) expected_summary_lines = ["It was the best of times."] self.assertEqual(expected_summary_lines, summary_lines) def test_build_lm_labels_no_padding(self): sequence = torch.tensor([1, 2, 3, 4]) expected = sequence np.testing.assert_array_equal( build_lm_labels(sequence, 0).numpy(), expected.numpy() ) def test_build_lm_labels(self): sequence = torch.tensor([1, 2, 3, 4, 0, 0, 0]) expected = torch.tensor([1, 2, 3, 4, -1, -1, -1]) np.testing.assert_array_equal( build_lm_labels(sequence, 0).numpy(), expected.numpy() ) def test_build_mask_no_padding(self): sequence = torch.tensor([1, 2, 3, 4]) expected = torch.tensor([1, 1, 1, 1]) np.testing.assert_array_equal(build_mask(sequence, 0).numpy(), expected.numpy()) def test_build_mask(self): sequence = torch.tensor([1, 2, 3, 4, 23, 23, 23]) expected = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal( build_mask(sequence, 23).numpy(), expected.numpy() ) def test_build_mask_with_padding_equal_to_one(self): sequence = torch.tensor([8, 2, 3, 4, 1, 1, 1]) expected = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(sequence, 1).numpy(), expected.numpy()) def test_compute_token_type_ids(self): separator = 101 batch = torch.tensor( [[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) expected = torch.tensor( [[0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1], [0, 1, 1, 1, 0, 0]] ) result = compute_token_type_ids(batch, separator) np.testing.assert_array_equal(result, expected) if __name__ == "__main__": unittest.main()
5,178
36.80292
98
py
fat-albert
fat-albert-master/bert/examples/run_generation.py
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet) """ from __future__ import absolute_import, division, print_function, unicode_literals import argparse import logging from tqdm import trange import torch import torch.nn.functional as F import numpy as np from transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, XLMConfig, CTRLConfig from transformers import GPT2LMHeadModel, GPT2Tokenizer from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer from transformers import XLNetLMHeadModel, XLNetTokenizer from transformers import TransfoXLLMHeadModel, TransfoXLTokenizer from transformers import CTRLLMHeadModel, CTRLTokenizer from transformers import XLMWithLMHeadModel, XLMTokenizer logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, XLMConfig, CTRLConfig)), ()) MODEL_CLASSES = { 'gpt2': (GPT2LMHeadModel, GPT2Tokenizer), 'ctrl': (CTRLLMHeadModel, CTRLTokenizer), 'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), 'xlnet': (XLNetLMHeadModel, XLNetTokenizer), 'transfo-xl': (TransfoXLLMHeadModel, TransfoXLTokenizer), 'xlm': (XLMWithLMHeadModel, XLMTokenizer), } # Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia # in https://github.com/rusiaaman/XLNet-gen#methodology # and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos>""" def set_seed(args): np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size x vocabulary size) top_k > 0: keep only top k tokens with highest probability (top-k filtering). top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ top_k = min(top_k, logits.size(-1)) # Safety check if top_k > 0: # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if top_p > 0.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probs > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove) logits[indices_to_remove] = filter_value return logits def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, repetition_penalty=1.0, is_xlnet=False, is_xlm_mlm=False, xlm_mask_token=None, xlm_lang=None, device='cpu'): context = torch.tensor(context, dtype=torch.long, device=device) context = context.unsqueeze(0).repeat(num_samples, 1) generated = context with torch.no_grad(): for _ in trange(length): inputs = {'input_ids': generated} if is_xlnet: # XLNet is a direct (predict same token, not next token) and bi-directional model by default # => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring) input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1) perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device) perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device) target_mapping[0, 0, -1] = 1.0 # predict last token inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping} if is_xlm_mlm and xlm_mask_token: # XLM MLM models are direct models (predict same token, not next token) # => need one additional dummy token in the input (will be masked and guessed) input_ids = torch.cat((generated, torch.full((1, 1), xlm_mask_token, dtype=torch.long, device=device)), dim=1) inputs = {'input_ids': input_ids} if xlm_lang is not None: inputs["langs"] = torch.tensor([xlm_lang] * inputs["input_ids"].shape[1], device=device).view(1, -1) outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet/CTRL (cached hidden-states) next_token_logits = outputs[0][:, -1, :] / (temperature if temperature > 0 else 1.) # repetition penalty from CTRL (https://arxiv.org/abs/1909.05858) for i in range(num_samples): for _ in set(generated[i].tolist()): next_token_logits[i, _] /= repetition_penalty filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) if temperature == 0: # greedy sampling: next_token = torch.argmax(filtered_logits, dim=-1).unsqueeze(-1) else: next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1) generated = torch.cat((generated, next_token), dim=1) return generated def main(): parser = argparse.ArgumentParser() parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--prompt", type=str, default="") parser.add_argument("--padding_text", type=str, default="") parser.add_argument("--xlm_lang", type=str, default="", help="Optional language when used with the XLM model.") parser.add_argument("--length", type=int, default=20) parser.add_argument("--num_samples", type=int, default=1) parser.add_argument("--temperature", type=float, default=1.0, help="temperature of 0 implies greedy sampling") parser.add_argument("--repetition_penalty", type=float, default=1.0, help="primarily useful for CTRL model; in that case, use 1.2") parser.add_argument("--top_k", type=int, default=0) parser.add_argument("--top_p", type=float, default=0.9) parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--stop_token', type=str, default=None, help="Token at which text generation is stopped") args = parser.parse_args() args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() set_seed(args) args.model_type = args.model_type.lower() model_class, tokenizer_class = MODEL_CLASSES[args.model_type] tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path) model = model_class.from_pretrained(args.model_name_or_path) model.to(args.device) model.eval() if args.length < 0 and model.config.max_position_embeddings > 0: args.length = model.config.max_position_embeddings elif 0 < model.config.max_position_embeddings < args.length: args.length = model.config.max_position_embeddings # No generation bigger than model size elif args.length < 0: args.length = MAX_LENGTH # avoid infinite loop logger.info(args) if args.model_type in ["ctrl"]: if args.temperature > 0.7: logger.info('CTRL typically works better with lower temperatures (and lower top_k).') while True: xlm_lang = None # XLM Language usage detailed in the issues #1414 if args.model_type in ["xlm"] and hasattr(tokenizer, 'lang2id') and hasattr(model.config, 'use_lang_emb') \ and model.config.use_lang_emb: if args.xlm_lang: language = args.xlm_lang else: language = None while language not in tokenizer.lang2id.keys(): language = input("Using XLM. Select language in " + str(list(tokenizer.lang2id.keys())) + " >>> ") xlm_lang = tokenizer.lang2id[language] # XLM masked-language modeling (MLM) models need masked token (see details in sample_sequence) is_xlm_mlm = args.model_type in ["xlm"] and 'mlm' in args.model_name_or_path if is_xlm_mlm: xlm_mask_token = tokenizer.mask_token_id else: xlm_mask_token = None raw_text = args.prompt if args.prompt else input("Model prompt >>> ") if args.model_type in ["transfo-xl", "xlnet"]: # Models with memory likes to have a long prompt for short inputs. raw_text = (args.padding_text if args.padding_text else PADDING_TEXT) + raw_text context_tokens = tokenizer.encode(raw_text, add_special_tokens=False) if args.model_type == "ctrl": if not any(context_tokens[0] == x for x in tokenizer.control_codes.values()): logger.info("WARNING! You are not starting your generation from a control code so you won't get good results") out = sample_sequence( model=model, context=context_tokens, num_samples=args.num_samples, length=args.length, temperature=args.temperature, top_k=args.top_k, top_p=args.top_p, repetition_penalty=args.repetition_penalty, is_xlnet=bool(args.model_type == "xlnet"), is_xlm_mlm=is_xlm_mlm, xlm_mask_token=xlm_mask_token, xlm_lang=xlm_lang, device=args.device, ) out = out[:, len(context_tokens):].tolist() for o in out: text = tokenizer.decode(o, clean_up_tokenization_spaces=True) text = text[: text.find(args.stop_token) if args.stop_token else None] print(text) if args.prompt: break return text if __name__ == '__main__': main()
13,112
49.241379
167
py
fat-albert
fat-albert-master/bert/examples/run_ner.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert or Roberta). """ from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import numpy as np import torch from seqeval.metrics import precision_score, recall_score, f1_score from tensorboardX import SummaryWriter from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file from transformers import AdamW, get_linear_schedule_with_warmup from transformers import WEIGHTS_NAME, BertConfig, BertForTokenClassification, BertTokenizer from transformers import RobertaConfig, RobertaForTokenClassification, RobertaTokenizer from transformers import DistilBertConfig, DistilBertForTokenClassification, DistilBertTokenizer from transformers import CamembertConfig, CamembertForTokenClassification, CamembertTokenizer logger = logging.getLogger(__name__) ALL_MODELS = sum( (tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, RobertaConfig, DistilBertConfig)), ()) MODEL_CLASSES = { "bert": (BertConfig, BertForTokenClassification, BertTokenizer), "roberta": (RobertaConfig, RobertaForTokenClassification, RobertaTokenizer), "distilbert": (DistilBertConfig, DistilBertForTokenClassification, DistilBertTokenizer), "camembert": (CamembertConfig, CamembertForTokenClassification, CamembertTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay}, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * ( torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None # XLM and RoBERTa don"t use segment_ids outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scheduler.step() # Update learning rate schedule optimizer.step() model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev") for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""): eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation %s *****", prefix) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None model.eval() for batch in tqdm(eval_dataloader, desc="Evaluating"): batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None # XLM and RoBERTa don"t use segment_ids outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] if args.n_gpu > 1: tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating eval_loss += tmp_eval_loss.item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps preds = np.argmax(preds, axis=2) label_map = {i: label for i, label in enumerate(labels)} out_label_list = [[] for _ in range(out_label_ids.shape[0])] preds_list = [[] for _ in range(out_label_ids.shape[0])] for i in range(out_label_ids.shape[0]): for j in range(out_label_ids.shape[1]): if out_label_ids[i, j] != pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]]) preds_list[i].append(label_map[preds[i][j]]) results = { "loss": eval_loss, "precision": precision_score(out_label_list, preds_list), "recall": recall_score(out_label_list, preds_list), "f1": f1_score(out_label_list, preds_list) } logger.info("***** Eval results %s *****", prefix) for key in sorted(results.keys()): logger.info(" %s = %s", key, str(results[key])) return results, preds_list def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}".format(mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length))) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) examples = read_examples_from_file(args.data_dir, mode) features = convert_examples_to_features(examples, labels, args.max_seq_length, tokenizer, cls_token_at_end=bool(args.model_type in ["xlnet"]), # xlnet has a cls token at the end cls_token=tokenizer.cls_token, cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=bool(args.model_type in ["roberta"]), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0, pad_token_label_id=pad_token_label_id ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--labels", default="", type=str, help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.") parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument("--evaluate_during_training", action="store_true", help="Whether to run evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument("--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory") parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets") parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument("--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir( args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Prepare CONLL-2003 task labels = get_labels(args.labels) num_labels = len(labels) # Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later pad_token_label_id = CrossEntropyLoss().ignore_index # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train") global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))) logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step) if global_step: result = {"{}_{}".format(global_step, k): v for k, v in result.items()} results.update(result) output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: for key in sorted(results.keys()): writer.write("{} = {}\n".format(key, str(results[key]))) if args.do_predict and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model = model_class.from_pretrained(args.output_dir) model.to(args.device) result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test") # Save results output_test_results_file = os.path.join(args.output_dir, "test_results.txt") with open(output_test_results_file, "w") as writer: for key in sorted(result.keys()): writer.write("{} = {}\n".format(key, str(result[key]))) # Save predictions output_test_predictions_file = os.path.join(args.output_dir, "test_predictions.txt") with open(output_test_predictions_file, "w") as writer: with open(os.path.join(args.data_dir, "test.txt"), "r") as f: example_id = 0 for line in f: if line.startswith("-DOCSTART-") or line == "" or line == "\n": writer.write(line) if not predictions[example_id]: example_id += 1 elif predictions[example_id]: output_line = line.split()[0] + " " + predictions[example_id].pop(0) + "\n" writer.write(output_line) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0]) return results if __name__ == "__main__": main()
28,788
53.013133
184
py
fat-albert
fat-albert-master/bert/examples/utils_summarization.py
from collections import deque import os import torch from torch.utils.data import Dataset # ------------ # Data loading # ------------ class CNNDailyMailDataset(Dataset): """ Abstracts the dataset used to train seq2seq models. CNN/Daily News: The CNN/Daily News raw datasets are downloaded from [1]. The stories are stored in different files; the summary appears at the end of the story as sentences that are prefixed by the special `@highlight` line. To process the data, untar both datasets in the same folder, and pass the path to this folder as the "data_dir argument. The formatting code was inspired by [2]. [1] https://cs.nyu.edu/~kcho/ [2] https://github.com/abisee/cnn-dailymail/ """ def __init__(self, tokenizer, prefix="train", data_dir=""): assert os.path.isdir(data_dir) self.tokenizer = tokenizer # We initialize the class by listing all the files that contain # stories and summaries. Files are not read in memory given # the size of the corpus. self.stories_path = [] datasets = ("cnn", "dailymail") for dataset in datasets: path_to_stories = os.path.join(data_dir, dataset, "stories") story_filenames_list = os.listdir(path_to_stories) for story_filename in story_filenames_list: path_to_story = os.path.join(path_to_stories, story_filename) if not os.path.isfile(path_to_story): continue self.stories_path.append(path_to_story) def __len__(self): return len(self.stories_path) def __getitem__(self, idx): story_path = self.stories_path[idx] with open(story_path, encoding="utf-8") as source: raw_story = source.read() story_lines, summary_lines = process_story(raw_story) return story_lines, summary_lines def process_story(raw_story): """ Extract the story and summary from a story file. Attributes: raw_story (str): content of the story file as an utf-8 encoded string. Raises: IndexError: If the stoy is empty or contains no highlights. """ nonempty_lines = list( filter(lambda x: len(x) != 0, [line.strip() for line in raw_story.split("\n")]) ) # for some unknown reason some lines miss a period, add it nonempty_lines = [_add_missing_period(line) for line in nonempty_lines] # gather article lines story_lines = [] lines = deque(nonempty_lines) while True: try: element = lines.popleft() if element.startswith("@highlight"): break story_lines.append(element) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None. return story_lines, [] # gather summary lines summary_lines = list(filter(lambda t: not t.startswith("@highlight"), lines)) return story_lines, summary_lines def _add_missing_period(line): END_TOKENS = [".", "!", "?", "...", "'", "`", '"', u"\u2019", u"\u2019", ")"] if line.startswith("@highlight"): return line if line[-1] in END_TOKENS: return line return line + "." # -------------------------- # Encoding and preprocessing # -------------------------- def fit_to_block_size(sequence, block_size, pad_token): """ Adapt the source and target sequences' lengths to the block size. If the sequence is shorter than the block size we pad it with -1 ids which correspond to padding tokens. """ if len(sequence) > block_size: return sequence[:block_size] else: sequence.extend([pad_token] * (block_size - len(sequence))) return sequence def build_lm_labels(sequence, pad_token): """ Padding token, encoded as 0, are represented by the value -1 so they are not taken into account in the loss computation. """ padded = sequence.clone() padded[padded == pad_token] = -1 return padded def build_mask(sequence, pad_token): """ Builds the mask. The attention mechanism will only attend to positions with value 1. """ mask = torch.ones_like(sequence) idx_pad_tokens = sequence == pad_token mask[idx_pad_tokens] = 0 return mask def encode_for_summarization(story_lines, summary_lines, tokenizer): """ Encode the story and summary lines, and join them as specified in [1] by using `[SEP] [CLS]` tokens to separate sentences. """ story_lines_token_ids = [ tokenizer.add_special_tokens_single_sequence(tokenizer.encode(line)) for line in story_lines ] summary_lines_token_ids = [ tokenizer.add_special_tokens_single_sequence(tokenizer.encode(line)) for line in summary_lines ] story_token_ids = [ token for sentence in story_lines_token_ids for token in sentence ] summary_token_ids = [ token for sentence in summary_lines_token_ids for token in sentence ] return story_token_ids, summary_token_ids def compute_token_type_ids(batch, separator_token_id): """ Segment embeddings as described in [1] The values {0,1} were found in the repository [2]. Attributes: batch: torch.Tensor, size [batch_size, block_size] Batch of input. separator_token_id: int The value of the token that separates the segments. [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." arXiv preprint arXiv:1908.08345 (2019). [2] https://github.com/nlpyang/PreSumm (/src/prepro/data_builder.py, commit fac1217) """ batch_embeddings = [] for sequence in batch: sentence_num = 0 embeddings = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2) batch_embeddings.append(embeddings) return torch.tensor(batch_embeddings)
6,022
31.556757
88
py
fat-albert
fat-albert-master/bert/examples/run_tf_glue.py
import os import tensorflow as tf import tensorflow_datasets from transformers import BertTokenizer, TFBertForSequenceClassification, BertConfig, glue_convert_examples_to_features, BertForSequenceClassification, glue_processors # script parameters BATCH_SIZE = 32 EVAL_BATCH_SIZE = BATCH_SIZE * 2 USE_XLA = False USE_AMP = False EPOCHS = 3 TASK = "mrpc" if TASK == "sst-2": TFDS_TASK = "sst2" elif TASK == "sts-b": TFDS_TASK = "stsb" else: TFDS_TASK = TASK num_labels = len(glue_processors[TASK]().get_labels()) print(num_labels) tf.config.optimizer.set_jit(USE_XLA) tf.config.optimizer.set_experimental_options({"auto_mixed_precision": USE_AMP}) # Load tokenizer and model from pretrained model/vocabulary. Specify the number of labels to classify (2+: classification, 1: regression) config = BertConfig.from_pretrained("bert-base-cased", num_labels=num_labels) tokenizer = BertTokenizer.from_pretrained('bert-base-cased') model = TFBertForSequenceClassification.from_pretrained('bert-base-cased', config=config) # Load dataset via TensorFlow Datasets data, info = tensorflow_datasets.load(f'glue/{TFDS_TASK}', with_info=True) train_examples = info.splits['train'].num_examples # MNLI expects either validation_matched or validation_mismatched valid_examples = info.splits['validation'].num_examples # Prepare dataset for GLUE as a tf.data.Dataset instance train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, 128, TASK) # MNLI expects either validation_matched or validation_mismatched valid_dataset = glue_convert_examples_to_features(data['validation'], tokenizer, 128, TASK) train_dataset = train_dataset.shuffle(128).batch(BATCH_SIZE).repeat(-1) valid_dataset = valid_dataset.batch(EVAL_BATCH_SIZE) # Prepare training: Compile tf.keras model with optimizer, loss and learning rate schedule opt = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08) if USE_AMP: # loss scaling is currently required when using mixed precision opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(opt, 'dynamic') if num_labels == 1: loss = tf.keras.losses.MeanSquaredError() else: loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy') model.compile(optimizer=opt, loss=loss, metrics=[metric]) # Train and evaluate using tf.keras.Model.fit() train_steps = train_examples//BATCH_SIZE valid_steps = valid_examples//EVAL_BATCH_SIZE history = model.fit(train_dataset, epochs=EPOCHS, steps_per_epoch=train_steps, validation_data=valid_dataset, validation_steps=valid_steps) # Save TF2 model os.makedirs('./save/', exist_ok=True) model.save_pretrained('./save/') if TASK == "mrpc": # Load the TensorFlow model in PyTorch for inspection # This is to demo the interoperability between the two frameworks, you don't have to # do this in real life (you can run the inference on the TF model). pytorch_model = BertForSequenceClassification.from_pretrained('./save/', from_tf=True) # Quickly test a few predictions - MRPC is a paraphrasing task, let's see if our model learned the task sentence_0 = 'This research was consistent with his findings.' sentence_1 = 'His findings were compatible with this research.' sentence_2 = 'His findings were not compatible with this research.' inputs_1 = tokenizer.encode_plus(sentence_0, sentence_1, add_special_tokens=True, return_tensors='pt') inputs_2 = tokenizer.encode_plus(sentence_0, sentence_2, add_special_tokens=True, return_tensors='pt') del inputs_1["special_tokens_mask"] del inputs_2["special_tokens_mask"] pred_1 = pytorch_model(**inputs_1)[0].argmax().item() pred_2 = pytorch_model(**inputs_2)[0].argmax().item() print('sentence_1 is', 'a paraphrase' if pred_1 else 'not a paraphrase', 'of sentence_0') print('sentence_2 is', 'a paraphrase' if pred_2 else 'not a paraphrase', 'of sentence_0')
3,978
41.329787
166
py
fat-albert
fat-albert-master/bert/examples/test_examples.py
# coding=utf-8 # Copyright 2018 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import unittest import argparse import logging try: # python 3.4+ can use builtin unittest.mock instead of mock package from unittest.mock import patch except ImportError: from mock import patch import run_glue import run_squad import run_generation logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() def get_setup_file(): parser = argparse.ArgumentParser() parser.add_argument('-f') args = parser.parse_args() return args.f class ExamplesTests(unittest.TestCase): def test_run_glue(self): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) testargs = ["run_glue.py", "--data_dir=./examples/tests_samples/MRPC/", "--task_name=mrpc", "--do_train", "--do_eval", "--output_dir=./examples/tests_samples/temp_dir", "--per_gpu_train_batch_size=2", "--per_gpu_eval_batch_size=1", "--learning_rate=1e-4", "--max_steps=10", "--warmup_steps=2", "--overwrite_output_dir", "--seed=42"] model_type, model_name = ("--model_type=bert", "--model_name_or_path=bert-base-uncased") with patch.object(sys, 'argv', testargs + [model_type, model_name]): result = run_glue.main() for value in result.values(): self.assertGreaterEqual(value, 0.75) def test_run_squad(self): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) testargs = ["run_squad.py", "--train_file=./examples/tests_samples/SQUAD/dev-v2.0-small.json", "--predict_file=./examples/tests_samples/SQUAD/dev-v2.0-small.json", "--model_name=bert-base-uncased", "--output_dir=./examples/tests_samples/temp_dir", "--max_steps=10", "--warmup_steps=2", "--do_train", "--do_eval", "--version_2_with_negative", "--learning_rate=2e-4", "--per_gpu_train_batch_size=2", "--per_gpu_eval_batch_size=1", "--overwrite_output_dir", "--seed=42"] model_type, model_name = ("--model_type=bert", "--model_name_or_path=bert-base-uncased") with patch.object(sys, 'argv', testargs + [model_type, model_name]): result = run_squad.main() self.assertGreaterEqual(result['f1'], 30) self.assertGreaterEqual(result['exact'], 30) def test_generation(self): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) testargs = ["run_generation.py", "--prompt=Hello", "--length=10", "--seed=42"] model_type, model_name = ("--model_type=openai-gpt", "--model_name_or_path=openai-gpt") with patch.object(sys, 'argv', testargs + [model_type, model_name]): result = run_generation.main() self.assertGreaterEqual(len(result), 10) if __name__ == "__main__": unittest.main()
4,166
36.205357
88
py
fat-albert
fat-albert-master/bert/examples/run_multiple_choice.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for multiple choice (Bert, Roberta, XLNet).""" from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForMultipleChoice, BertTokenizer, XLNetConfig, XLNetForMultipleChoice, XLNetTokenizer, RobertaConfig, RobertaForMultipleChoice, RobertaTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup from utils_multiple_choice import (convert_examples_to_features, processors) logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, RobertaConfig)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForMultipleChoice, BertTokenizer), 'xlnet': (XLNetConfig, XLNetForMultipleChoice, XLNetTokenizer), 'roberta': (RobertaConfig, RobertaForMultipleChoice, RobertaTokenizer) } def select_field(features, field): return [ [ choice[field] for choice in feature.choices_features ] for feature in features ] def simple_accuracy(preds, labels): return (preds == labels).mean() def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 best_dev_acc, best_dev_loss = 0.0, 99999999999.0 best_steps = 0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids 'labels': batch[3]} outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) if results["eval_acc"] > best_dev_acc: best_dev_acc = results["eval_acc"] best_dev_loss = results["eval_loss"] best_steps = global_step if args.do_test: results_test = evaluate(args, model, tokenizer, test=True) for key, value in results_test.items(): tb_writer.add_scalar('test_{}'.format(key), value, global_step) logger.info("test acc: %s, loss: %s, global steps: %s", str(results_test['eval_acc']), str(results_test['eval_loss']), str(global_step)) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logger.info("Average loss: %s at global step: %s", str((tr_loss - logging_loss)/args.logging_steps), str(global_step)) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_vocabulary(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step, best_steps def evaluate(args, model, tokenizer, prefix="", test=False): eval_task_names = (args.task_name,) eval_outputs_dirs = (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=not test, test=test) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids 'labels': batch[3]} outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps probabilities = preds preds = np.argmax(preds, axis=1) logger.info("Saving Labels") np.savetxt('probs.txt',probabilities) np.savetxt('preds.txt',preds) acc = simple_accuracy(preds, out_label_ids) result = {"eval_acc": acc, "eval_loss": eval_loss} results.update(result) output_eval_file = os.path.join(eval_output_dir, "is_test_" + str(test).lower() + "_eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(str(prefix) + " is test:" + str(test))) writer.write("model =%s\n" % str(args.model_name_or_path)) writer.write("total batch size=%d\n" % (args.per_gpu_train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))) writer.write("train num epochs=%d\n" % args.num_train_epochs) writer.write("fp16 =%s\n" % args.fp16) writer.write("max seq length =%d\n" % args.max_seq_length) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False, test=False): if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() # Load data features from cache or dataset file if evaluate: cached_mode = 'dev' elif test: cached_mode = 'test' else: cached_mode = 'train' assert (evaluate == True and test == True) == False cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format( cached_mode, list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(task))) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if evaluate: examples = processor.get_dev_examples(args.data_dir) elif test: examples = processor.get_test_examples(args.data_dir) else: examples = processor.get_train_examples(args.data_dir) logger.info("Training number: %s", str(len(examples))) features = convert_examples_to_features( examples, label_list, args.max_seq_length, tokenizer, pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0, is_training=args.do_train ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor(select_field(features, 'input_ids'), dtype=torch.long) all_input_mask = torch.tensor(select_field(features, 'input_mask'), dtype=torch.long) all_segment_ids = torch.tensor(select_field(features, 'segment_ids'), dtype=torch.long) all_label_ids = torch.tensor([f.label for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_test", action='store_true', help='Whether to run test on the test set') parser.add_argument("--evaluate_during_training", action='store_true', help="Run evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda:0" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) best_steps = 0 # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss, best_steps = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: if not args.do_train: args.output_dir = args.model_name_or_path checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) results.update(result) if args.do_test and args.local_rank in [-1, 0]: if not args.do_train: args.output_dir = args.model_name_or_path checkpoints = [args.output_dir] # if args.eval_all_checkpoints: # can not use this to do test!! # checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) # logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix, test=True) result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) results.update(result) if best_steps: logger.info("best steps of eval acc is the following checkpoints: %s", best_steps) return results if __name__ == "__main__": main()
29,481
50.722807
168
py
fat-albert
fat-albert-master/bert/examples/utils_ner.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """ from __future__ import absolute_import, division, print_function import logging import os from io import open logger = logging.getLogger(__name__) class InputExample(object): """A single training/test example for token classification.""" def __init__(self, guid, words, labels): """Constructs a InputExample. Args: guid: Unique id for the example. words: list. The words of the sequence. labels: (Optional) list. The labels for each word of the sequence. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.words = words self.labels = labels class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_ids): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_ids = label_ids def read_examples_from_file(data_dir, mode): file_path = os.path.join(data_dir, "{}.txt".format(mode)) guid_index = 1 examples = [] with open(file_path, encoding="utf-8") as f: words = [] labels = [] for line in f: if line.startswith("-DOCSTART-") or line == "" or line == "\n": if words: examples.append(InputExample(guid="{}-{}".format(mode, guid_index), words=words, labels=labels)) guid_index += 1 words = [] labels = [] else: splits = line.split(" ") words.append(splits[0]) if len(splits) > 1: labels.append(splits[-1].replace("\n", "")) else: # Examples could have no label for mode = "test" labels.append("O") if words: examples.append(InputExample(guid="%s-%d".format(mode, guid_index), words=words, labels=labels)) return examples def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, pad_token_label_id=-1, sequence_a_segment_id=0, mask_padding_with_zero=True): """ Loads a data file into a list of `InputBatch`s `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d", ex_index, len(examples)) tokens = [] label_ids = [] for word, label in zip(example.words, example.labels): word_tokens = tokenizer.tokenize(word) tokens.extend(word_tokens) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. special_tokens_count = 3 if sep_token_extra else 2 if len(tokens) > max_seq_length - special_tokens_count: tokens = tokens[:(max_seq_length - special_tokens_count)] label_ids = label_ids[:(max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: tokens = [cls_token] + tokens label_ids = [pad_token_label_id] + label_ids segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids label_ids = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += ([pad_token] * padding_length) input_mask += ([0 if mask_padding_with_zero else 1] * padding_length) segment_ids += ([pad_token_segment_id] * padding_length) label_ids += ([pad_token_label_id] * padding_length) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s", example.guid) logger.info("tokens: %s", " ".join([str(x) for x in tokens])) logger.info("input_ids: %s", " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s", " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) logger.info("label_ids: %s", " ".join([str(x) for x in label_ids])) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids)) return features def get_labels(path): if path: with open(path, "r") as f: labels = f.read().splitlines() if "O" not in labels: labels = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
9,181
42.107981
109
py
fat-albert
fat-albert-master/bert/examples/run_xnli.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning multi-lingual models on XNLI (Bert, DistilBERT, XLM). Adapted from `examples/run_glue.py`""" from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForSequenceClassification, BertTokenizer, XLMConfig, XLMForSequenceClassification, XLMTokenizer, DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup from transformers import xnli_compute_metrics as compute_metrics from transformers import xnli_output_modes as output_modes from transformers import xnli_processors as processors from transformers import glue_convert_examples_to_features as convert_examples_to_features logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, DistilBertConfig, XLMConfig)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForSequenceClassification, BertTokenizer), 'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer), 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer) } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if args.model_type in ['bert'] else None # XLM and DistilBERT don't use segment_ids outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): eval_task_names = (args.task_name,) eval_outputs_dirs = (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if args.model_type in ['bert'] else None # XLM and DistilBERT don't use segment_ids outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) else: raise ValueError('No other `output_mode` for XNLI.') result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task](language=args.language, train_language=args.train_language) output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}_{}'.format( 'test' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(task), str(args.train_language if (not evaluate and args.train_language is not None) else args.language))) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() examples = processor.get_test_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) features = convert_examples_to_features(examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, pad_on_left=False, pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=0, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) else: raise ValueError('No other `output_mode` for XNLI.') dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--language", default=None, type=str, required=True, help="Evaluation language. Also train language if `train_language` is set to None.") parser.add_argument("--train_language", default=None, type=str, help="Train language if is different of the evaluation language.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the test set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Prepare XNLI task args.task_name = 'xnli' if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name](language=args.language, train_language=args.train_language) args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
27,117
51.554264
151
py
fat-albert
fat-albert-master/bert/examples/utils_squad.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Load SQuAD dataset. """ from __future__ import absolute_import, division, print_function import json import logging import math import collections from io import open from tqdm import tqdm from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize # Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method) from utils_squad_evaluate import find_all_best_thresh_v2, make_qid_to_has_ans, get_raw_scores logger = logging.getLogger(__name__) class SquadExample(object): """ A single training/test example for the Squad dataset. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=None): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (self.qas_id) s += ", question_text: %s" % ( self.question_text) s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.end_position: s += ", end_position: %d" % (self.end_position) if self.is_impossible: s += ", is_impossible: %r" % (self.is_impossible) return s class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map, token_is_max_context, input_ids, input_mask, segment_ids, cls_index, p_mask, paragraph_len, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.cls_index = cls_index self.p_mask = p_mask self.paragraph_len = paragraph_len self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def read_squad_examples(input_file, is_training, version_2_with_negative): """Read a SQuAD json file into a list of SquadExample.""" with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: if version_2_with_negative: is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, cls_token_at_end=False, cls_token='[CLS]', sep_token='[SEP]', pad_token=0, sequence_a_segment_id=0, sequence_b_segment_id=1, cls_token_segment_id=0, pad_token_segment_id=0, mask_padding_with_zero=True, sequence_a_is_doc=False): """Loads a data file into a list of `InputBatch`s.""" unique_id = 1000000000 # cnt_pos, cnt_neg = 0, 0 # max_N, max_M = 1024, 1024 # f = np.zeros((max_N, max_M), dtype=np.float32) features = [] for (example_index, example) in enumerate(tqdm(examples)): # if example_index % 100 == 0: # logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg) query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 assert max_tokens_for_doc > 0 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # Original TF implem also keep the classification token (set to 0) (not sure why...) p_mask = [] # CLS token at the beginning if not cls_token_at_end: tokens.append(cls_token) segment_ids.append(cls_token_segment_id) p_mask.append(0) cls_index = 0 # XLNet: P SEP Q SEP CLS # Others: CLS Q SEP P SEP if not sequence_a_is_doc: # Query tokens += query_tokens segment_ids += [sequence_a_segment_id] * len(query_tokens) p_mask += [1] * len(query_tokens) # SEP token tokens.append(sep_token) segment_ids.append(sequence_a_segment_id) p_mask.append(1) # Paragraph for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) if not sequence_a_is_doc: segment_ids.append(sequence_b_segment_id) else: segment_ids.append(sequence_a_segment_id) p_mask.append(0) paragraph_len = doc_span.length if sequence_a_is_doc: # SEP token tokens.append(sep_token) segment_ids.append(sequence_a_segment_id) p_mask.append(1) tokens += query_tokens segment_ids += [sequence_b_segment_id] * len(query_tokens) p_mask += [1] * len(query_tokens) # SEP token tokens.append(sep_token) segment_ids.append(sequence_b_segment_id) p_mask.append(1) # CLS token at the end if cls_token_at_end: tokens.append(cls_token) segment_ids.append(cls_token_segment_id) p_mask.append(0) cls_index = len(tokens) - 1 # Index of classification token input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(pad_token) input_mask.append(0 if mask_padding_with_zero else 1) segment_ids.append(pad_token_segment_id) p_mask.append(1) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length span_is_impossible = example.is_impossible start_position = None end_position = None if is_training and not span_is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 span_is_impossible = True else: if sequence_a_is_doc: doc_offset = 0 else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and span_is_impossible: start_position = cls_index end_position = cls_index if example_index < 20: logger.info("*** Example ***") logger.info("unique_id: %s" % (unique_id)) logger.info("example_index: %s" % (example_index)) logger.info("doc_span_index: %s" % (doc_span_index)) logger.info("tokens: %s" % " ".join(tokens)) logger.info("token_to_orig_map: %s" % " ".join([ "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])) logger.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in token_is_max_context.items() ])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training and span_is_impossible: logger.info("impossible example") if is_training and not span_is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) logger.info("start_position: %d" % (start_position)) logger.info("end_position: %d" % (end_position)) logger.info( "answer: %s" % (answer_text)) features.append( InputFeatures( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, cls_index=cls_index, p_mask=p_mask, paragraph_len=paragraph_len, start_position=start_position, end_position=end_position, is_impossible=span_is_impossible)) unique_id += 1 return features def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold): """Write final predictions to the json file and log-odds of null if needed.""" logger.info("Writing predictions to: %s" % (output_prediction_file)) logger.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min null score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't include the empty option in the n-best, include it if version_2_with_negative: if "" not in seen_predictions: nbest.append( _NbestPrediction( text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if len(nbest)==1: nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - ( best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions # For XLNet (and XLM which uses the same head) RawResultExtended = collections.namedtuple("RawResultExtended", ["unique_id", "start_top_log_probs", "start_top_index", "end_top_log_probs", "end_top_index", "cls_logits"]) def write_predictions_extended(all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, orig_data_file, start_n_top, end_n_top, version_2_with_negative, tokenizer, verbose_logging): """ XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of null if needed. Requires utils_squad_evaluate.py """ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]) logger.info("Writing predictions to: %s", output_prediction_file) # logger.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] cur_null_score = result.cls_logits # if we could have irrelevant answers, get the min score of irrelevant score_null = min(score_null, cur_null_score) for i in range(start_n_top): for j in range(end_n_top): start_log_prob = result.start_top_log_probs[i] start_index = result.start_top_index[i] j_index = i * end_n_top + j end_log_prob = result.end_top_log_probs[j_index] end_index = result.end_top_index[j_index] # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= feature.paragraph_len - 1: continue if end_index >= feature.paragraph_len - 1: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_log_prob=start_log_prob, end_log_prob=end_log_prob)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] # XLNet un-tokenizer # Let's keep it simple for now and see if we need all this later. # # tok_start_to_orig_index = feature.tok_start_to_orig_index # tok_end_to_orig_index = feature.tok_end_to_orig_index # start_orig_pos = tok_start_to_orig_index[pred.start_index] # end_orig_pos = tok_end_to_orig_index[pred.end_index] # paragraph_text = example.paragraph_text # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() # Previously used Bert untokenizer tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = tokenizer.convert_tokens_to_string(tok_tokens) # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, tokenizer.do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6)) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_log_prob + entry.end_log_prob) if not best_non_null_entry: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_log_prob"] = entry.start_log_prob output["end_log_prob"] = entry.end_log_prob nbest_json.append(output) assert len(nbest_json) >= 1 assert best_non_null_entry is not None score_diff = score_null scores_diff_json[example.qas_id] = score_diff # note(zhiliny): always predict best_non_null_entry # and the evaluation script will search for the best threshold all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") with open(orig_data_file, "r", encoding='utf-8') as reader: orig_data = json.load(reader)["data"] qid_to_has_ans = make_qid_to_has_ans(orig_data) has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(orig_data, all_predictions) out_eval = {} find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans) return out_eval def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heuristic between # `pred_text` and `orig_text` to get a character-to-character alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info( "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs
42,376
40.627701
112
py
fat-albert
fat-albert-master/bert/examples/utils_multiple_choice.bak.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Multiple choice fine-tuning: utilities to work with multiple choice tasks of reading comprehension """ from __future__ import absolute_import, division, print_function import logging import os import sys from io import open import json import csv import glob import tqdm from typing import List from transformers import PreTrainedTokenizer logger = logging.getLogger(__name__) class InputExample(object): """A single training/test example for multiple choice""" def __init__(self, example_id, question, contexts, endings, label=None): """Constructs a InputExample. Args: example_id: Unique id for the example. contexts: list of str. The untokenized text of the first sequence (context of corresponding question). question: string. The untokenized text of the second sequence (question). endings: list of str. multiple choice's options. Its length must be equal to contexts' length. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.example_id = example_id self.question = question self.contexts = contexts self.endings = endings self.label = label class InputFeatures(object): def __init__(self, example_id, choices_features, label ): self.example_id = example_id self.choices_features = [ { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } for input_ids, input_mask, segment_ids in choices_features ] self.label = label class DataProcessor(object): """Base class for data converters for multiple choice data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for the test set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() class RaceProcessor(DataProcessor): """Processor for the RACE data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) high = os.path.join(data_dir, 'train/high') middle = os.path.join(data_dir, 'train/middle') high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, 'train') def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) high = os.path.join(data_dir, 'dev/high') middle = os.path.join(data_dir, 'dev/middle') high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, 'dev') def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} test".format(data_dir)) high = os.path.join(data_dir, 'test/high') middle = os.path.join(data_dir, 'test/middle') high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, 'test') def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_txt(self, input_dir): lines = [] files = glob.glob(input_dir + "/*txt") for file in tqdm.tqdm(files, desc="read files"): with open(file, 'r', encoding='utf-8') as fin: data_raw = json.load(fin) data_raw["race_id"] = file lines.append(data_raw) return lines def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (_, data_raw) in enumerate(lines): race_id = "%s-%s" % (set_type, data_raw["race_id"]) article = data_raw["article"] for i in range(len(data_raw["answers"])): truth = str(ord(data_raw['answers'][i]) - ord('A')) question = data_raw['questions'][i] options = data_raw['options'][i] examples.append( InputExample( example_id=race_id, question=question, contexts=[article, article, article, article], # this is not efficient but convenient endings=[options[0], options[1], options[2], options[3]], label=truth)) return examples class SwagProcessor(DataProcessor): """Processor for the SWAG data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev") def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) raise ValueError( "For swag testing, the input file does not contain a label column. It can not be tested in current code" "setting!" ) return self._create_examples(self._read_csv(os.path.join(data_dir, "test.csv")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_csv(self, input_file): with open(input_file, 'r', encoding='utf-8') as f: reader = csv.reader(f) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines def _create_examples(self, lines: List[List[str]], type: str): """Creates examples for the training and dev sets.""" if type == "train" and lines[0][-1] != 'label': raise ValueError( "For training, the input file must contain a label column." ) examples = [ InputExample( example_id=line[2], question=line[5], # in the swag dataset, the # common beginning of each # choice is stored in "sent2". contexts = [line[4], line[4], line[4], line[4]], endings = [line[7], line[8], line[9], line[10]], label=line[11] ) for line in lines[1:] # we skip the line with the column names ] return examples class MovieQAProcessor(DataProcessor): """Processor for the MovieQA data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev") def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) raise ValueError( "For movieqa testing, the input file does not contain a label column. It can not be tested in current code" "setting!" ) return self._create_examples(self._read_csv(os.path.join(data_dir, "test.csv")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3", "4"] def _read_csv(self, input_file): with open(input_file, 'r', encoding='utf-8') as f: reader = csv.reader(f) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines def _create_examples(self, lines: List[List[str]], type: str): """Creates examples for the training and dev sets.""" if type == "train" and lines[0][-1] != 'label': raise ValueError( "For training, the input file must contain a label column." ) examples = [ InputExample( example_id=line[2], question=line[5], # in the movieqa dataset, the # common beginning of each # choice is stored in "sent2". contexts = [line[4], line[4], line[4], line[4], line[4]], endings = [line[7], line[8], line[9], line[10], line[11]], label=line[12] ) for line in lines[1:] # we skip the line with the column names ] return examples class ArcProcessor(DataProcessor): """Processor for the ARC data set (request from allennlp).""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "train.jsonl")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "dev.jsonl")), "dev") def get_test_examples(self, data_dir): logger.info("LOOKING AT {} test".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "test.jsonl")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_json(self, input_file): with open(input_file, 'r', encoding='utf-8') as fin: lines = fin.readlines() return lines def _create_examples(self, lines, type): """Creates examples for the training and dev sets.""" #There are two types of labels. They should be normalized def normalize(truth): if truth in "ABCD": return ord(truth) - ord("A") elif truth in "1234": return int(truth) - 1 else: logger.info("truth ERROR! %s", str(truth)) return None examples = [] three_choice = 0 four_choice = 0 five_choice = 0 other_choices = 0 # we deleted example which has more than or less than four choices for line in tqdm.tqdm(lines, desc="read arc data"): data_raw = json.loads(line.strip("\n")) if len(data_raw["question"]["choices"]) == 3: three_choice += 1 continue elif len(data_raw["question"]["choices"]) == 5: five_choice += 1 continue elif len(data_raw["question"]["choices"]) != 4: other_choices += 1 continue four_choice += 1 truth = str(normalize(data_raw["answerKey"])) assert truth != "None" question_choices = data_raw["question"] question = question_choices["stem"] id = data_raw["id"] options = question_choices["choices"] if len(options) == 4: examples.append( InputExample( example_id = id, question=question, contexts=[options[0]["para"].replace("_", ""), options[1]["para"].replace("_", ""), options[2]["para"].replace("_", ""), options[3]["para"].replace("_", "")], endings=[options[0]["text"], options[1]["text"], options[2]["text"], options[3]["text"]], label=truth)) if type == "train": assert len(examples) > 1 assert examples[0].label is not None logger.info("len examples: %s}", str(len(examples))) logger.info("Three choices: %s", str(three_choice)) logger.info("Five choices: %s", str(five_choice)) logger.info("Other choices: %s", str(other_choices)) logger.info("four choices: %s", str(four_choice)) return examples def convert_examples_to_features( examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer, pad_token_segment_id=0, pad_on_left=False, pad_token=0, mask_padding_with_zero=True, ) -> List[InputFeatures]: """ Loads a data file into a list of `InputFeatures` """ label_map = {label : i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) choices_features = [] for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)): text_a = context if example.question.find("_") != -1: # this is for cloze question text_b = example.question.replace("_", ending) else: text_b = example.question + " " + ending inputs = tokenizer.encode_plus( text_a, text_b, add_special_tokens=True, max_length=max_length, ) if 'num_truncated_tokens' in inputs and inputs['num_truncated_tokens'] > 0: logger.info('Attention! you are cropping tokens (swag task is ok). ' 'If you are training ARC and RACE and you are poping question + options,' 'you need to try to use a bigger max seq length!') input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"] # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length) assert len(input_ids) == max_length assert len(attention_mask) == max_length assert len(token_type_ids) == max_length choices_features.append((input_ids, attention_mask, token_type_ids)) label = label_map[example.label] if ex_index < 2: logger.info("*** Example ***") logger.info("race_id: {}".format(example.example_id)) for choice_idx, (input_ids, attention_mask, token_type_ids) in enumerate(choices_features): logger.info("choice: {}".format(choice_idx)) logger.info("input_ids: {}".format(' '.join(map(str, input_ids)))) logger.info("attention_mask: {}".format(' '.join(map(str, attention_mask)))) logger.info("token_type_ids: {}".format(' '.join(map(str, token_type_ids)))) logger.info("label: {}".format(label)) features.append( InputFeatures( example_id=example.example_id, choices_features=choices_features, label=label, ) ) return features processors = { "race": RaceProcessor, "swag": SwagProcessor, "movieqa": MovieQAProcessor, "arc": ArcProcessor } MULTIPLE_CHOICE_TASKS_NUM_LABELS = { "race", 4, "swag", 4, "movieqa", 5, "arc", 4 }
17,575
36.797849
119
py
fat-albert
fat-albert-master/bert/examples/contrib/run_transfo_xl.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Transformer XL model evaluation script. Adapted from https://github.com/kimiyoung/transformer-xl. In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/eval.py This script with default values evaluates a pretrained Transformer-XL on WikiText 103 """ from __future__ import absolute_import, division, print_function, unicode_literals import argparse import logging import time import math import torch from transformers import TransfoXLLMHeadModel, TransfoXLCorpus, TransfoXLTokenizer logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model') parser.add_argument('--model_name', type=str, default='transfo-xl-wt103', help='pretrained model name') parser.add_argument('--split', type=str, default='test', choices=['all', 'valid', 'test'], help='which split to evaluate') parser.add_argument('--batch_size', type=int, default=10, help='batch size') parser.add_argument('--tgt_len', type=int, default=128, help='number of tokens to predict') parser.add_argument('--ext_len', type=int, default=0, help='length of the extended context') parser.add_argument('--mem_len', type=int, default=1600, help='length of the retained previous heads') parser.add_argument('--clamp_len', type=int, default=1000, help='max positional embedding index') parser.add_argument('--no_cuda', action='store_true', help='Do not use CUDA even though CUA is available') parser.add_argument('--work_dir', type=str, required=True, help='path to the work_dir') parser.add_argument('--no_log', action='store_true', help='do not log the eval result') parser.add_argument('--same_length', action='store_true', help='set same length attention with masking') parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() assert args.ext_len >= 0, 'extended context length must be non-negative' if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") logger.info("device: {}".format(device)) # Load a pre-processed dataset # You can also build the corpus yourself using TransfoXLCorpus methods # The pre-processing involve computing word frequencies to prepare the Adaptive input and SoftMax # and tokenizing the dataset # The pre-processed corpus is a convertion (using the conversion script ) tokenizer = TransfoXLTokenizer.from_pretrained(args.model_name) corpus = TransfoXLCorpus.from_pretrained(args.model_name) ntokens = len(corpus.vocab) va_iter = corpus.get_iterator('valid', args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len) te_iter = corpus.get_iterator('test', args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len) # Load a pre-trained model model = TransfoXLLMHeadModel.from_pretrained(args.model_name) model = model.to(device) logger.info('Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}'.format( args.batch_size, args.tgt_len, args.ext_len, args.mem_len, args.clamp_len)) model.reset_length(args.tgt_len, args.ext_len, args.mem_len) if args.clamp_len > 0: model.clamp_len = args.clamp_len if args.same_length: model.same_length = True ############################################################################### # Evaluation code ############################################################################### def evaluate(eval_iter): # Turn on evaluation mode which disables dropout. model.eval() total_len, total_loss = 0, 0. start_time = time.time() with torch.no_grad(): mems = None for idx, (data, target, seq_len) in enumerate(eval_iter): ret = model(data, lm_labels=target, mems=mems) loss, _, mems = ret loss = loss.mean() total_loss += seq_len * loss.item() total_len += seq_len total_time = time.time() - start_time logger.info('Time : {:.2f}s, {:.2f}ms/segment'.format( total_time, 1000 * total_time / (idx+1))) return total_loss / total_len # Run on test data. if args.split == 'all': test_loss = evaluate(te_iter) valid_loss = evaluate(va_iter) elif args.split == 'valid': valid_loss = evaluate(va_iter) test_loss = None elif args.split == 'test': test_loss = evaluate(te_iter) valid_loss = None def format_log(loss, split): log_str = '| {0} loss {1:5.2f} | {0} ppl {2:9.3f} '.format( split, loss, math.exp(loss)) return log_str log_str = '' if valid_loss is not None: log_str += format_log(valid_loss, 'valid') if test_loss is not None: log_str += format_log(test_loss, 'test') logger.info('=' * 100) logger.info(log_str) logger.info('=' * 100) if __name__ == '__main__': main()
6,742
42.785714
111
py
fat-albert
fat-albert-master/bert/examples/contrib/run_swag.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner. Finetuning the library models for multiple choice on SWAG (Bert). """ from __future__ import absolute_import, division, print_function import argparse import logging import csv import os import random import sys import glob import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForMultipleChoice, BertTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ for conf in [BertConfig]), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForMultipleChoice, BertTokenizer), } class SwagExample(object): """A single training/test example for the SWAG dataset.""" def __init__(self, swag_id, context_sentence, start_ending, ending_0, ending_1, ending_2, ending_3, label = None): self.swag_id = swag_id self.context_sentence = context_sentence self.start_ending = start_ending self.endings = [ ending_0, ending_1, ending_2, ending_3, ] self.label = label def __str__(self): return self.__repr__() def __repr__(self): l = [ "swag_id: {}".format(self.swag_id), "context_sentence: {}".format(self.context_sentence), "start_ending: {}".format(self.start_ending), "ending_0: {}".format(self.endings[0]), "ending_1: {}".format(self.endings[1]), "ending_2: {}".format(self.endings[2]), "ending_3: {}".format(self.endings[3]), ] if self.label is not None: l.append("label: {}".format(self.label)) return ", ".join(l) class InputFeatures(object): def __init__(self, example_id, choices_features, label ): self.example_id = example_id self.choices_features = [ { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } for _, input_ids, input_mask, segment_ids in choices_features ] self.label = label def read_swag_examples(input_file, is_training=True): with open(input_file, 'r', encoding='utf-8') as f: reader = csv.reader(f) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) if is_training and lines[0][-1] != 'label': raise ValueError( "For training, the input file must contain a label column." ) examples = [ SwagExample( swag_id = line[2], context_sentence = line[4], start_ending = line[5], # in the swag dataset, the # common beginning of each # choice is stored in "sent2". ending_0 = line[7], ending_1 = line[8], ending_2 = line[9], ending_3 = line[10], label = int(line[11]) if is_training else None ) for line in lines[1:] # we skip the line with the column names ] return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, is_training): """Loads a data file into a list of `InputBatch`s.""" # Swag is a multiple choice task. To perform this task using Bert, # we will use the formatting proposed in "Improving Language # Understanding by Generative Pre-Training" and suggested by # @jacobdevlin-google in this issue # https://github.com/google-research/bert/issues/38. # # Each choice will correspond to a sample on which we run the # inference. For a given Swag example, we will create the 4 # following inputs: # - [CLS] context [SEP] choice_1 [SEP] # - [CLS] context [SEP] choice_2 [SEP] # - [CLS] context [SEP] choice_3 [SEP] # - [CLS] context [SEP] choice_4 [SEP] # The model will output a single value for each input. To get the # final decision of the model, we will run a softmax over these 4 # outputs. features = [] for example_index, example in tqdm(enumerate(examples)): context_tokens = tokenizer.tokenize(example.context_sentence) start_ending_tokens = tokenizer.tokenize(example.start_ending) choices_features = [] for ending_index, ending in enumerate(example.endings): # We create a copy of the context tokens in order to be # able to shrink it according to ending_tokens context_tokens_choice = context_tokens[:] ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) # Modifies `context_tokens_choice` and `ending_tokens` in # place so that the total length is less than the # specified length. Account for [CLS], [SEP], [SEP] with # "- 3" _truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3) tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"] segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length choices_features.append((tokens, input_ids, input_mask, segment_ids)) label = example.label if example_index < 5: logger.info("*** Example ***") logger.info("swag_id: {}".format(example.swag_id)) for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features): logger.info("choice: {}".format(choice_idx)) logger.info("tokens: {}".format(' '.join(tokens))) logger.info("input_ids: {}".format(' '.join(map(str, input_ids)))) logger.info("input_mask: {}".format(' '.join(map(str, input_mask)))) logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids)))) if is_training: logger.info("label: {}".format(label)) features.append( InputFeatures( example_id = example.swag_id, choices_features = choices_features, label = label ) ) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def select_field(features, field): return [ [ choice[field] for choice in feature.choices_features ] for feature in features ] def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length))) if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", input_file) examples = read_swag_examples(input_file) features = convert_examples_to_features( examples, tokenizer, args.max_seq_length, not evaluate) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor(select_field(features, 'input_ids'), dtype=torch.long) all_input_mask = torch.tensor(select_field(features, 'input_mask'), dtype=torch.long) all_segment_ids = torch.tensor(select_field(features, 'segment_ids'), dtype=torch.long) all_label = torch.tensor([f.label for f in features], dtype=torch.long) if evaluate: dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) else: dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) if output_examples: return dataset, examples, features return dataset def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], #'token_type_ids': None if args.model_type == 'xlm' else batch[2], 'token_type_ids': batch[2], 'labels': batch[3]} # if args.model_type in ['xlnet', 'xlm']: # inputs.update({'cls_index': batch[5], # 'p_mask': batch[6]}) outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_vocabulary(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], # 'token_type_ids': None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids 'token_type_ids': batch[2], 'labels': batch[3]} # if args.model_type in ['xlnet', 'xlm']: # inputs.update({'cls_index': batch[4], # 'p_mask': batch[5]}) outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() logits = logits.detach().cpu().numpy() label_ids = inputs['labels'].to('cpu').numpy() tmp_eval_accuracy = accuracy(logits, label_ids) eval_accuracy += tmp_eval_accuracy nb_eval_steps += 1 nb_eval_examples += inputs['input_ids'].size(0) eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info("%s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_file", default=None, type=str, required=True, help="SWAG csv for training. E.g., train.csv") parser.add_argument("--predict_file", default=None, type=str, required=True, help="SWAG csv for predictions. E.g., val.csv or test.csv") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.local_rank == -1 or torch.distributed.get_rank() == 0: # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: checkpoints = [args.output_dir] else: # if do_train is False and do_eval is true, load model directly from pretrained. checkpoints = [args.model_name_or_path] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) tokenizer = tokenizer_class.from_pretrained(checkpoint) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
31,683
45.731563
154
py
fat-albert
fat-albert-master/bert/examples/contrib/run_movieqa.bak.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner. Finetuning the library models for multiple choice on MovieQA (Bert). """ from __future__ import absolute_import, division, print_function import argparse import logging import csv import os import random import sys import glob import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForMultipleChoice, BertTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ for conf in [BertConfig]), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForMultipleChoice, BertTokenizer), } class MovieQAExample(object): """A single training/test example for the MovieQA dataset.""" def __init__(self, movieqa_id, context_sentence, start_ending, ending_0, ending_1, ending_2, ending_3, ending_4, label = None): self.movieqa_id = movieqa_id self.context_sentence = context_sentence self.start_ending = start_ending self.endings = [ ending_0, ending_1, ending_2, ending_3, ending_4, ] self.label = label def __str__(self): return self.__repr__() def __repr__(self): l = [ "movieqa_id: {}".format(self.movieqa_id), "context_sentence: {}".format(self.context_sentence), "start_ending: {}".format(self.start_ending), "ending_0: {}".format(self.endings[0]), "ending_1: {}".format(self.endings[1]), "ending_2: {}".format(self.endings[2]), "ending_3: {}".format(self.endings[3]), "ending_4: {}".format(self.endings[4]), ] if self.label is not None: l.append("label: {}".format(self.label)) return ", ".join(l) class InputFeatures(object): def __init__(self, example_id, choices_features, label ): self.example_id = example_id self.choices_features = [ { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } for _, input_ids, input_mask, segment_ids in choices_features ] self.label = label def read_movieqa_examples(input_file, is_training=True): with open(input_file, 'r', encoding='utf-8') as f: reader = csv.reader(f) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) if is_training and lines[0][-1] != 'label': raise ValueError( "For training, the input file must contain a label column." ) if lines[0][-1] not in ['0','1','2','3','4']: print("WRONG LABEL") import sys sys.exit() examples = [ MovieQAExample( movieqa_id = line[2], context_sentence = line[4], start_ending = line[5], # in the movieQA dataset, the # common beginning of each # choice is stored in "sent2". ending_0 = line[7], ending_1 = line[8], ending_2 = line[9], ending_3 = line[10], ending_4 = line[11], label = int(line[12]) if is_training else None ) for line in lines[1:] # we skip the line with the column names ] return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, is_training): """Loads a data file into a list of `InputBatch`s.""" # MovieQA is a multiple choice task. To perform this task using Bert, # we will use the formatting proposed in "Improving Language # Understanding by Generative Pre-Training" and suggested by # @jacobdevlin-google in this issue # https://github.com/google-research/bert/issues/38. # # Each choice will correspond to a sample on which we run the # inference. For a given MovieQA example, we will create the 4 # following inputs: # - [CLS] context [SEP] choice_1 [SEP] # - [CLS] context [SEP] choice_2 [SEP] # - [CLS] context [SEP] choice_3 [SEP] # - [CLS] context [SEP] choice_4 [SEP] # - [CLS] context [SEP] choice_5 [SEP] # The model will output a single value for each input. To get the # final decision of the model, we will run a softmax over these 4 # outputs. features = [] for example_index, example in tqdm(enumerate(examples)): context_tokens = tokenizer.tokenize(example.context_sentence) start_ending_tokens = tokenizer.tokenize(example.start_ending) choices_features = [] for ending_index, ending in enumerate(example.endings): # We create a copy of the context tokens in order to be # able to shrink it according to ending_tokens context_tokens_choice = context_tokens[:] ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) # Modifies `context_tokens_choice` and `ending_tokens` in # place so that the total length is less than the # specified length. Account for [CLS], [SEP], [SEP] with # "- 3" _truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3) tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"] segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length choices_features.append((tokens, input_ids, input_mask, segment_ids)) label = example.label if example_index < 5: logger.info("*** Example ***") logger.info("movieqa_id: {}".format(example.movieqa_id)) for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features): logger.info("choice: {}".format(choice_idx)) logger.info("tokens: {}".format(' '.join(tokens))) logger.info("input_ids: {}".format(' '.join(map(str, input_ids)))) logger.info("input_mask: {}".format(' '.join(map(str, input_mask)))) logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids)))) if is_training: logger.info("label: {}".format(label)) features.append( InputFeatures( example_id = example.movieqa_id, choices_features = choices_features, label = label ) ) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def select_field(features, field): return [ [ choice[field] for choice in feature.choices_features ] for feature in features ] def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length))) if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", input_file) examples = read_movieqa_examples(input_file) features = convert_examples_to_features( examples, tokenizer, args.max_seq_length, not evaluate) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor(select_field(features, 'input_ids'), dtype=torch.long) all_input_mask = torch.tensor(select_field(features, 'input_mask'), dtype=torch.long) all_segment_ids = torch.tensor(select_field(features, 'segment_ids'), dtype=torch.long) all_label = torch.tensor([f.label for f in features], dtype=torch.long) if evaluate: dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) else: dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) if output_examples: return dataset, examples, features return dataset def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], #'token_type_ids': None if args.model_type == 'xlm' else batch[2], 'token_type_ids': batch[2], 'labels': batch[3]} # if args.model_type in ['xlnet', 'xlm']: # inputs.update({'cls_index': batch[5], # 'p_mask': batch[6]}) outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_vocabulary(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], # 'token_type_ids': None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids 'token_type_ids': batch[2], 'labels': batch[3]} # if args.model_type in ['xlnet', 'xlm']: # inputs.update({'cls_index': batch[4], # 'p_mask': batch[5]}) outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() logits = logits.detach().cpu().numpy() label_ids = inputs['labels'].to('cpu').numpy() tmp_eval_accuracy = accuracy(logits, label_ids) eval_accuracy += tmp_eval_accuracy nb_eval_steps += 1 nb_eval_examples += inputs['input_ids'].size(0) eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info("%s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_file", default=None, type=str, required=True, help="MovieQA csv for training. E.g., train.csv") parser.add_argument("--predict_file", default=None, type=str, required=True, help="MovieQA csv for predictions. E.g., val.csv or test.csv") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.local_rank == -1 or torch.distributed.get_rank() == 0: # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: checkpoints = [args.output_dir] else: # if do_train is False and do_eval is true, load model directly from pretrained. checkpoints = [args.model_name_or_path] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) tokenizer = tokenizer_class.from_pretrained(checkpoint) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
32,036
45.701166
154
py
fat-albert
fat-albert-master/bert/examples/contrib/run_openai_gpt.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenAI GPT model fine-tuning script. Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset: python run_openai_gpt.py \ --model_name openai-gpt \ --do_train \ --do_eval \ --train_dataset $ROC_STORIES_DIR/cloze_test_val__spring2016\ -\ cloze_test_ALL_val.csv \ --eval_dataset $ROC_STORIES_DIR/cloze_test_test__spring2016\ -\ cloze_test_ALL_test.csv \ --output_dir ../log \ --train_batch_size 16 \ """ import argparse import os import csv import random import logging from tqdm import tqdm, trange import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from transformers import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, AdamW, cached_path, WEIGHTS_NAME, CONFIG_NAME, get_linear_schedule_with_warmup) ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz" logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def load_rocstories_dataset(dataset_path): """ Output a list of tuples(story, 1st continuation, 2nd continuation, label) """ with open(dataset_path, encoding='utf_8') as f: f = csv.reader(f) output = [] next(f) # skip the first line for line in tqdm(f): output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1)) return output def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token): """ Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label) To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation: input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] """ tensor_datasets = [] for dataset in encoded_datasets: n_batch = len(dataset) input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64) mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64) lm_labels = np.full((n_batch, 2, input_len), fill_value=-1, dtype=np.int64) mc_labels = np.zeros((n_batch,), dtype=np.int64) for i, (story, cont1, cont2, mc_label), in enumerate(dataset): with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token] input_ids[i, 0, :len(with_cont1)] = with_cont1 input_ids[i, 1, :len(with_cont2)] = with_cont2 mc_token_ids[i, 0] = len(with_cont1) - 1 mc_token_ids[i, 1] = len(with_cont2) - 1 lm_labels[i, 0, :len(with_cont1)] = with_cont1 lm_labels[i, 1, :len(with_cont2)] = with_cont2 mc_labels[i] = mc_label all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs)) return tensor_datasets def main(): parser = argparse.ArgumentParser() parser.add_argument('--model_name', type=str, default='openai-gpt', help='pretrained model name') parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") parser.add_argument('--train_dataset', type=str, default='') parser.add_argument('--eval_dataset', type=str, default='') parser.add_argument('--seed', type=int, default=42) parser.add_argument('--num_train_epochs', type=int, default=3) parser.add_argument('--train_batch_size', type=int, default=8) parser.add_argument('--eval_batch_size', type=int, default=16) parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument('--max_grad_norm', type=int, default=1) parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training \ steps to perform. Override num_train_epochs.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before\ performing a backward/update pass.") parser.add_argument('--learning_rate', type=float, default=6.25e-5) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--lr_schedule', type=str, default='warmup_linear') parser.add_argument('--weight_decay', type=float, default=0.01) parser.add_argument('--lm_coef', type=float, default=0.9) parser.add_argument('--n_valid', type=int, default=374) parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() print(args) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() logger.info("device: {}, n_gpu {}".format(device, n_gpu)) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset special_tokens = ['_start_', '_delimiter_', '_classify_'] tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name) tokenizer.add_tokens(special_tokens) special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens) model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name) model.resize_token_embeddings(len(tokenizer)) model.to(device) # Load and encode the datasets if not args.train_dataset and not args.eval_dataset: roc_stories = cached_path(ROCSTORIES_URL) def tokenize_and_encode(obj): """ Tokenize and encode a nested object """ if isinstance(obj, str): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj)) elif isinstance(obj, int): return obj return list(tokenize_and_encode(o) for o in obj) logger.info("Encoding dataset...") train_dataset = load_rocstories_dataset(args.train_dataset) eval_dataset = load_rocstories_dataset(args.eval_dataset) datasets = (train_dataset, eval_dataset) encoded_datasets = tokenize_and_encode(datasets) # Compute the max input length for the Transformer max_length = model.config.n_positions // 2 - 2 input_length = max(len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3 \ for dataset in encoded_datasets for story, cont1, cont2, _ in dataset) input_length = min(input_length, model.config.n_positions) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids) train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1] train_data = TensorDataset(*train_tensor_dataset) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) eval_data = TensorDataset(*eval_tensor_dataset) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) # Prepare optimizer if args.do_train: if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps //\ (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader)\ // args.gradient_accumulation_steps * args.num_train_epochs param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.do_train: nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_steps = 0 tqdm_bar = tqdm(train_dataloader, desc="Training") for step, batch in enumerate(tqdm_bar): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch losses = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels) loss = args.lm_coef * losses[0] + losses[1] loss.backward() scheduler.step() optimizer.step() optimizer.zero_grad() tr_loss += loss.item() exp_average_loss = loss.item() if exp_average_loss is None else 0.7*exp_average_loss+0.3*loss.item() nb_tr_steps += 1 tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, scheduler.get_lr()[0]) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer model_to_save = model.module if hasattr(model, 'module') else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) # Load a trained model and vocabulary that you have fine-tuned model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir) tokenizer = OpenAIGPTTokenizer.from_pretrained(args.output_dir) model.to(device) if args.do_eval: model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch with torch.no_grad(): _, mc_loss, _, mc_logits = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels) mc_logits = mc_logits.detach().cpu().numpy() mc_labels = mc_labels.to('cpu').numpy() tmp_eval_accuracy = accuracy(mc_logits, mc_labels) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples train_loss = tr_loss/nb_tr_steps if args.do_train else None result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == '__main__': main()
14,471
48.731959
132
py
fat-albert
fat-albert-master/bert/examples/contrib/run_movieqa.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner. Finetuning the library models for multiple choice on MovieQA (Bert). """ from __future__ import absolute_import, division, print_function import argparse import logging import csv import os import random import sys import glob import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForMultipleChoice, BertTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ for conf in [BertConfig]), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForMultipleChoice, BertTokenizer), } class MovieQAExample(object): """A single training/test example for the MovieQA dataset.""" def __init__(self, movieqa_id, context_sentence, start_ending, ending_0, ending_1, ending_2, ending_3, ending_4, label = None): self.movieqa_id = movieqa_id self.context_sentence = context_sentence self.start_ending = start_ending self.endings = [ ending_0, ending_1, ending_2, ending_3, ending_4, ] self.label = label def __str__(self): return self.__repr__() def __repr__(self): l = [ "movieqa_id: {}".format(self.movieqa_id), "context_sentence: {}".format(self.context_sentence), "start_ending: {}".format(self.start_ending), "ending_0: {}".format(self.endings[0]), "ending_1: {}".format(self.endings[1]), "ending_2: {}".format(self.endings[2]), "ending_3: {}".format(self.endings[3]), "ending_4: {}".format(self.endings[4]), ] if self.label is not None: l.append("label: {}".format(self.label)) return ", ".join(l) class InputFeatures(object): def __init__(self, example_id, choices_features, label ): self.example_id = example_id self.choices_features = [ { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } for _, input_ids, input_mask, segment_ids in choices_features ] self.label = label def read_movieqa_examples(input_file, is_training=True): with open(input_file, 'r', encoding='utf-8') as f: reader = csv.reader(f) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) if is_training and lines[0][-1] != 'label': raise ValueError( "For training, the input file must contain a label column." ) if lines[0][-1] not in ['0','1','2','3','4']: print("WRONG LABEL") import sys sys.exit() examples = [ MovieQAExample( movieqa_id = line[2], context_sentence = line[4], start_ending = line[5], # in the movieQA dataset, the # common beginning of each # choice is stored in "sent2". ending_0 = line[7], ending_1 = line[8], ending_2 = line[9], ending_3 = line[10], ending_4 = line[11], label = int(line[12]) if is_training else None ) for line in lines[1:] # we skip the line with the column names ] return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, is_training): """Loads a data file into a list of `InputBatch`s.""" # MovieQA is a multiple choice task. To perform this task using Bert, # we will use the formatting proposed in "Improving Language # Understanding by Generative Pre-Training" and suggested by # @jacobdevlin-google in this issue # https://github.com/google-research/bert/issues/38. # # Each choice will correspond to a sample on which we run the # inference. For a given MovieQA example, we will create the 4 # following inputs: # - [CLS] context [SEP] choice_1 [SEP] # - [CLS] context [SEP] choice_2 [SEP] # - [CLS] context [SEP] choice_3 [SEP] # - [CLS] context [SEP] choice_4 [SEP] # - [CLS] context [SEP] choice_5 [SEP] # The model will output a single value for each input. To get the # final decision of the model, we will run a softmax over these 4 # outputs. features = [] for example_index, example in tqdm(enumerate(examples)): context_tokens = tokenizer.tokenize(example.context_sentence) start_ending_tokens = tokenizer.tokenize(example.start_ending) choices_features = [] for ending_index, ending in enumerate(example.endings): # We create a copy of the context tokens in order to be # able to shrink it according to ending_tokens context_tokens_choice = context_tokens[:] ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) # Modifies `context_tokens_choice` and `ending_tokens` in # place so that the total length is less than the # specified length. Account for [CLS], [SEP], [SEP] with # "- 3" #_truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3) tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"] segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length choices_features.append((tokens, input_ids, input_mask, segment_ids)) label = example.label if example_index < 5: logger.info("*** Example ***") logger.info("movieqa_id: {}".format(example.movieqa_id)) for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features): logger.info("choice: {}".format(choice_idx)) logger.info("tokens: {}".format(' '.join(tokens))) logger.info("input_ids: {}".format(' '.join(map(str, input_ids)))) logger.info("input_mask: {}".format(' '.join(map(str, input_mask)))) logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids)))) if is_training: logger.info("label: {}".format(label)) features.append( InputFeatures( example_id = example.movieqa_id, choices_features = choices_features, label = label ) ) return features def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def select_field(features, field): return [ [ choice[field] for choice in feature.choices_features ] for feature in features ] def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length))) if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", input_file) examples = read_movieqa_examples(input_file) features = convert_examples_to_features( examples, tokenizer, args.max_seq_length, not evaluate) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor(select_field(features, 'input_ids'), dtype=torch.long) all_input_mask = torch.tensor(select_field(features, 'input_mask'), dtype=torch.long) all_segment_ids = torch.tensor(select_field(features, 'segment_ids'), dtype=torch.long) all_label = torch.tensor([f.label for f in features], dtype=torch.long) if evaluate: dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) else: dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) if output_examples: return dataset, examples, features return dataset def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], #'token_type_ids': None if args.model_type == 'xlm' else batch[2], 'token_type_ids': batch[2], 'labels': batch[3]} # if args.model_type in ['xlnet', 'xlm']: # inputs.update({'cls_index': batch[5], # 'p_mask': batch[6]}) outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_vocabulary(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], # 'token_type_ids': None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids 'token_type_ids': batch[2], 'labels': batch[3]} # if args.model_type in ['xlnet', 'xlm']: # inputs.update({'cls_index': batch[4], # 'p_mask': batch[5]}) outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() logits = logits.detach().cpu().numpy() label_ids = inputs['labels'].to('cpu').numpy() tmp_eval_accuracy = accuracy(logits, label_ids) eval_accuracy += tmp_eval_accuracy nb_eval_steps += 1 nb_eval_examples += inputs['input_ids'].size(0) eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info("%s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_file", default=None, type=str, required=True, help="MovieQA csv for training. E.g., train.csv") parser.add_argument("--predict_file", default=None, type=str, required=True, help="MovieQA csv for predictions. E.g., val.csv or test.csv") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.local_rank == -1 or torch.distributed.get_rank() == 0: # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: checkpoints = [args.output_dir] else: # if do_train is False and do_eval is true, load model directly from pretrained. checkpoints = [args.model_name_or_path] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) tokenizer = tokenizer_class.from_pretrained(checkpoint) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
35,214
45.274639
154
py
fat-albert
fat-albert-master/bert/examples/contrib/run_camembert.py
from pathlib import Path import tarfile import urllib.request import torch from transformers.tokenization_camembert import CamembertTokenizer from transformers.modeling_camembert import CamembertForMaskedLM def fill_mask(masked_input, model, tokenizer, topk=5): # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>') == 1 input_ids = torch.tensor(tokenizer.encode(masked_input, add_special_tokens=True)).unsqueeze(0) # Batch size 1 logits = model(input_ids)[0] # The last hidden-state is the first element of the output tuple masked_index = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() logits = logits[0, masked_index, :] prob = logits.softmax(dim=0) values, indices = prob.topk(k=topk, dim=0) topk_predicted_token_bpe = ' '.join([tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(indices))]) masked_token = tokenizer.mask_token topk_filled_outputs = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ')): predicted_token = predicted_token_bpe.replace('\u2581', ' ') if " {0}".format(masked_token) in masked_input: topk_filled_outputs.append(( masked_input.replace( ' {0}'.format(masked_token), predicted_token ), values[index].item(), predicted_token, )) else: topk_filled_outputs.append(( masked_input.replace(masked_token, predicted_token), values[index].item(), predicted_token, )) return topk_filled_outputs tokenizer = CamembertTokenizer.from_pretrained('camembert-base') model = CamembertForMaskedLM.from_pretrained('camembert-base') model.eval() masked_input = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
2,015
40.142857
114
py
fat-albert
fat-albert-master/bert/examples/distillation/grouped_batch_sampler.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Adapted from PyTorch Vision (https://github.com/pytorch/vision/blob/master/references/detection/group_by_aspect_ratio.py) """ import bisect import copy from collections import defaultdict import numpy as np from torch.utils.data.sampler import BatchSampler, Sampler from utils import logger def _quantize(x, bins): bins = copy.deepcopy(bins) bins = sorted(bins) quantized = list(map(lambda y: bisect.bisect_right(bins, y), x)) return quantized def create_lengths_groups(lengths, k=0): bins = np.arange(start=3, stop=k, step=4).tolist() if k > 0 else [10] groups = _quantize(lengths, bins) # count number of elements per group counts = np.unique(groups, return_counts=True)[1] fbins = [0] + bins + [np.inf] logger.info("Using {} as bins for aspect lengths quantization".format(fbins)) logger.info("Count of instances per bin: {}".format(counts)) return groups class GroupedBatchSampler(BatchSampler): """ Wraps another sampler to yield a mini-batch of indices. It enforces that the batch only contain elements from the same group. It also tries to provide mini-batches which follows an ordering which is as close as possible to the ordering from the original sampler. Arguments: sampler (Sampler): Base sampler. group_ids (list[int]): If the sampler produces indices in range [0, N), `group_ids` must be a list of `N` ints which contains the group id of each sample. The group ids must be a continuous set of integers starting from 0, i.e. they must be in the range [0, num_groups). batch_size (int): Size of mini-batch. """ def __init__(self, sampler, group_ids, batch_size): if not isinstance(sampler, Sampler): raise ValueError( "sampler should be an instance of " "torch.utils.data.Sampler, but got sampler={}".format(sampler) ) self.sampler = sampler self.group_ids = group_ids self.batch_size = batch_size def __iter__(self): buffer_per_group = defaultdict(list) samples_per_group = defaultdict(list) num_batches = 0 for idx in self.sampler: group_id = self.group_ids[idx] buffer_per_group[group_id].append(idx) samples_per_group[group_id].append(idx) if len(buffer_per_group[group_id]) == self.batch_size: yield buffer_per_group[group_id] #TODO num_batches += 1 del buffer_per_group[group_id] assert len(buffer_per_group[group_id]) < self.batch_size # now we have run out of elements that satisfy # the group criteria, let's return the remaining # elements so that the size of the sampler is # deterministic expected_num_batches = len(self) num_remaining = expected_num_batches - num_batches if num_remaining > 0: # for the remaining batches, group the batches by similar lengths batch_idx = [] for group_id, idxs in sorted(buffer_per_group.items(), key=lambda x: x[0]): batch_idx.extend(idxs) if len(batch_idx) >= self.batch_size: yield batch_idx[:self.batch_size] batch_idx = batch_idx[self.batch_size:] num_remaining -= 1 if len(batch_idx) > 0: yield batch_idx num_remaining -= 1 assert num_remaining == 0 def __len__(self): """ Return the number of mini-batches rather than the number of samples. """ return (len(self.sampler) + self.batch_size - 1) // self.batch_size
4,368
40.216981
125
py
fat-albert
fat-albert-master/bert/examples/distillation/utils.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utils to train DistilBERT adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import git import json import os import socket import torch import numpy as np import logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def git_log(folder_path: str): """ Log commit info. """ repo = git.Repo(search_parent_directories=True) repo_infos = { 'repo_id': str(repo), 'repo_sha': str(repo.head.object.hexsha), 'repo_branch': str(repo.active_branch) } with open(os.path.join(folder_path, 'git_log.json'), 'w') as f: json.dump(repo_infos, f, indent=4) def init_gpu_params(params): """ Handle single and multi-GPU / multi-node. """ if params.n_gpu <= 0: params.local_rank = 0 params.master_port = -1 params.is_master = True params.multi_gpu = False return assert torch.cuda.is_available() logger.info('Initializing GPUs') if params.n_gpu > 1: assert params.local_rank != -1 params.world_size = int(os.environ['WORLD_SIZE']) params.n_gpu_per_node = int(os.environ['N_GPU_NODE']) params.global_rank = int(os.environ['RANK']) # number of nodes / node ID params.n_nodes = params.world_size // params.n_gpu_per_node params.node_id = params.global_rank // params.n_gpu_per_node params.multi_gpu = True assert params.n_nodes == int(os.environ['N_NODES']) assert params.node_id == int(os.environ['NODE_RANK']) # local job (single GPU) else: assert params.local_rank == -1 params.n_nodes = 1 params.node_id = 0 params.local_rank = 0 params.global_rank = 0 params.world_size = 1 params.n_gpu_per_node = 1 params.multi_gpu = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode params.is_master = params.node_id == 0 and params.local_rank == 0 params.multi_node = params.n_nodes > 1 # summary PREFIX = f"--- Global rank: {params.global_rank} - " logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes) logger.info(PREFIX + "Node ID : %i" % params.node_id) logger.info(PREFIX + "Local rank : %i" % params.local_rank) logger.info(PREFIX + "World size : %i" % params.world_size) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node) logger.info(PREFIX + "Master : %s" % str(params.is_master)) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node)) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu)) logger.info(PREFIX + "Hostname : %s" % socket.gethostname()) # set GPU device torch.cuda.set_device(params.local_rank) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed") torch.distributed.init_process_group( init_method='env://', backend='nccl', ) def set_seed(args): """ Set the random seed. """ np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed)
4,308
32.146154
104
py
fat-albert
fat-albert-master/bert/examples/distillation/lm_seqs_dataset.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Dataset to distilled models adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import torch from torch.utils.data import Dataset import numpy as np from utils import logger class LmSeqsDataset(Dataset): """Custom Dataset wrapping language modeling sequences. Each sample will be retrieved by indexing the list of token_ids and their corresponding lengths. Input: ------ params: `NameSpace` parameters data: `List[np.array[int]] """ def __init__(self, params, data): self.params = params self.token_ids = np.array(data) self.lengths = np.array([len(t) for t in data]) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.check() self.print_statistics() def __getitem__(self, index): return (self.token_ids[index], self.lengths[index]) def __len__(self): return len(self.lengths) def check(self): """ Some sanity checks """ assert len(self.token_ids) == len(self.lengths) assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) def remove_long_sequences(self): """ Sequences that are too long are splitted by chunk of max_model_input_size. """ max_len = self.params.max_model_input_size indices = self.lengths > max_len logger.info(f'Splitting {sum(indices)} too long sequences.') def divide_chunks(l, n): return [l[i:i + n] for i in range(0, len(l), n)] new_tok_ids = [] new_lengths = [] if self.params.mlm: cls_id, sep_id = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: cls_id, sep_id = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids, self.lengths): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_) new_lengths.append(len_) else: sub_seqs = [] for sub_s in divide_chunks(seq_, max_len-2): if sub_s[0] != cls_id: sub_s = np.insert(sub_s, 0, cls_id) if sub_s[-1] != sep_id: sub_s = np.insert(sub_s, len(sub_s), sep_id) assert len(sub_s) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(sub_s) new_tok_ids.extend(sub_seqs) new_lengths.extend([len(l) for l in sub_seqs]) self.token_ids = np.array(new_tok_ids) self.lengths = np.array(new_lengths) def remove_empty_sequences(self): """ Too short sequences are simply removed. This could be tunedd. """ init_size = len(self) indices = self.lengths > 11 self.token_ids = self.token_ids[indices] self.lengths = self.lengths[indices] new_size = len(self) logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.') def print_statistics(self): """ Print some statistics on the corpus. Only the master process. """ if not self.params.is_master: return logger.info(f'{len(self)} sequences') # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unkown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unkown} unknown tokens (covering {100*nb_unkown/data_len:.2f}% of the data)') def batch_sequences(self, batch): """ Do the padding and transform into torch.tensor. """ token_ids = [t[0] for t in batch] lengths = [t[1] for t in batch] assert len(token_ids) == len(lengths) # Max for paddings max_seq_len_ = max(lengths) # Pad token ids if self.params.mlm: pad_idx = self.params.special_tok_ids['pad_token'] else: pad_idx = self.params.special_tok_ids['unk_token'] tk_ = [list(t.astype(int)) + [pad_idx]*(max_seq_len_-len(t)) for t in token_ids] assert len(tk_) == len(token_ids) assert all(len(t) == max_seq_len_ for t in tk_) tk_t = torch.tensor(tk_) # (bs, max_seq_len_) lg_t = torch.tensor(lengths) # (bs) return tk_t, lg_t
5,453
34.881579
111
py
fat-albert
fat-albert-master/bert/examples/distillation/run_squad_w_distillation.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This is the exact same script as `examples/run_squad.py` (as of 2019, October 4th) with an additional and optional step of distillation.""" from __future__ import absolute_import, division, print_function import argparse import logging import os import random import glob import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler import torch.nn.functional as F import torch.nn as nn try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForQuestionAnswering, BertTokenizer, XLMConfig, XLMForQuestionAnswering, XLMTokenizer, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer, DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup from ..utils_squad import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions, RawResultExtended, write_predictions_extended) # The follwing import is the official SQuAD evaluation script (2.0). # You can remove it from the dependencies if you are using this script outside of the library # We've added it here for automated tests (see examples/test_examples.py file) from ..utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ for conf in (BertConfig, XLNetConfig, XLMConfig)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer), 'xlnet': (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer), 'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer), 'distilbert': (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer, teacher=None): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() if teacher is not None: teacher.eval() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'start_positions': batch[3], 'end_positions': batch[4]} if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] if args.model_type in ['xlnet', 'xlm']: inputs.update({'cls_index': batch[5], 'p_mask': batch[6]}) outputs = model(**inputs) loss, start_logits_stu, end_logits_stu = outputs # Distillation loss if teacher is not None: if 'token_type_ids' not in inputs: inputs['token_type_ids'] = None if args.teacher_type == 'xlm' else batch[2] with torch.no_grad(): start_logits_tea, end_logits_tea = teacher(input_ids=inputs['input_ids'], token_type_ids=inputs['token_type_ids'], attention_mask=inputs['attention_mask']) assert start_logits_tea.size() == start_logits_stu.size() assert end_logits_tea.size() == end_logits_stu.size() loss_fct = nn.KLDivLoss(reduction='batchmean') loss_start = loss_fct(F.log_softmax(start_logits_stu/args.temperature, dim=-1), F.softmax(start_logits_tea/args.temperature, dim=-1)) * (args.temperature**2) loss_end = loss_fct(F.log_softmax(end_logits_stu/args.temperature, dim=-1), F.softmax(end_logits_tea/args.temperature, dim=-1)) * (args.temperature**2) loss_ce = (loss_start + loss_end)/2. loss = args.alpha_ce*loss_ce + args.alpha_squad*loss if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1] } if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids example_indices = batch[3] if args.model_type in ['xlnet', 'xlm']: inputs.update({'cls_index': batch[4], 'p_mask': batch[5]}) outputs = model(**inputs) for i, example_index in enumerate(example_indices): eval_feature = features[example_index.item()] unique_id = int(eval_feature.unique_id) if args.model_type in ['xlnet', 'xlm']: # XLNet uses a more complex post-processing procedure result = RawResultExtended(unique_id = unique_id, start_top_log_probs = to_list(outputs[0][i]), start_top_index = to_list(outputs[1][i]), end_top_log_probs = to_list(outputs[2][i]), end_top_index = to_list(outputs[3][i]), cls_logits = to_list(outputs[4][i])) else: result = RawResult(unique_id = unique_id, start_logits = to_list(outputs[0][i]), end_logits = to_list(outputs[1][i])) all_results.append(result) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None if args.model_type in ['xlnet', 'xlm']: # XLNet uses a more complex post-processing procedure write_predictions_extended(examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.predict_file, model.config.start_n_top, model.config.end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging) else: write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold) # Evaluate with the official SQuAD script evaluate_options = EVAL_OPTS(data_file=args.predict_file, pred_file=output_prediction_file, na_prob_file=output_null_log_odds_file) results = evaluate_on_squad(evaluate_options) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length))) if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", input_file) examples = read_squad_examples(input_file=input_file, is_training=not evaluate, version_2_with_negative=args.version_2_with_negative) features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) if evaluate: all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index, all_cls_index, all_p_mask) else: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask) if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_file", default=None, type=str, required=True, help="SQuAD json for training. E.g., train-v1.1.json") parser.add_argument("--predict_file", default=None, type=str, required=True, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") # Distillation parameters (optional) parser.add_argument('--teacher_type', default=None, type=str, help="Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for distillation.") parser.add_argument('--teacher_name_or_path', default=None, type=str, help="Path to the already SQuAD fine-tuned teacher model. Only for distillation.") parser.add_argument('--alpha_ce', default=0.5, type=float, help="Distillation loss linear weight. Only for distillation.") parser.add_argument('--alpha_squad', default=0.5, type=float, help="True SQuAD loss linear weight. Only for distillation.") parser.add_argument('--temperature', default=2.0, type=float, help="Distillation temperature. Only for distillation.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.') parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.") parser.add_argument("--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.") parser.add_argument("--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.") parser.add_argument("--verbose_logging", action='store_true', help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.teacher_type is not None: assert args.teacher_name_or_path is not None assert args.alpha_ce > 0. assert args.alpha_ce + args.alpha_squad > 0. assert args.teacher_type != 'distilbert', "We constraint teachers not to be of type DistilBERT." teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) teacher = teacher_model_class.from_pretrained(args.teacher_name_or_path, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None) teacher.to(args.device) else: teacher = None if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint, cache_dir=args.cache_dir if args.cache_dir else None) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
33,733
55.129784
151
py
fat-albert
fat-albert-master/bert/examples/distillation/train.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Training the distilled model. Supported architectures include: BERT -> DistilBERT, RoBERTa -> DistilRoBERTa, GPT2 -> DistilGPT2. """ import os import argparse import pickle import json import shutil import numpy as np import torch from transformers import BertConfig, BertForMaskedLM, BertTokenizer from transformers import RobertaConfig, RobertaForMaskedLM, RobertaTokenizer from transformers import DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer from transformers import GPT2Config, GPT2LMHeadModel, GPT2Tokenizer from distiller import Distiller from utils import git_log, logger, init_gpu_params, set_seed from lm_seqs_dataset import LmSeqsDataset MODEL_CLASSES = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer) } def sanity_checks(args): """ A bunch of args sanity checks to perform even starting... """ assert (args.mlm and args.alpha_mlm > 0.) or (not args.mlm and args.alpha_mlm == 0.) assert (args.alpha_mlm > 0. and args.alpha_clm == 0.) or (args.alpha_mlm == 0. and args.alpha_clm > 0.) if args.mlm: assert os.path.isfile(args.token_counts) assert (args.student_type in ['roberta', 'distilbert']) and (args.teacher_type in ['roberta', 'bert']) else: assert (args.student_type in ['gpt2']) and (args.teacher_type in ['gpt2']) assert args.teacher_type == args.student_type or (args.student_type=='distilbert' and args.teacher_type=='bert') assert os.path.isfile(args.student_config) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights) if args.freeze_token_type_embds: assert args.student_type in ['roberta'] assert args.alpha_ce >= 0. assert args.alpha_mlm >= 0. assert args.alpha_clm >= 0. assert args.alpha_mse >= 0. assert args.alpha_cos >= 0. assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0. def freeze_pos_embeddings(student, args): if args.student_type == 'roberta': student.roberta.embeddings.position_embeddings.weight.requires_grad = False elif args.student_type == 'gpt2': student.transformer.wpe.weight.requires_grad = False def freeze_token_type_embeddings(student, args): if args.student_type == 'roberta': student.roberta.embeddings.token_type_embeddings.weight.requires_grad = False def main(): parser = argparse.ArgumentParser(description="Training") parser.add_argument("--force", action='store_true', help="Overwrite dump_path if it already exists.") parser.add_argument("--dump_path", type=str, required=True, help="The output directory (log, checkpoints, parameters, etc.)") parser.add_argument("--data_file", type=str, required=True, help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.") parser.add_argument("--student_type", type=str, choices=["distilbert", "roberta", "gpt2"], required=True, help="The student type (DistilBERT, RoBERTa).") parser.add_argument("--student_config", type=str, required=True, help="Path to the student configuration.") parser.add_argument("--student_pretrained_weights", default=None, type=str, help="Load student initialization checkpoint.") parser.add_argument("--teacher_type", choices=["bert", "roberta", "gpt2"], required=True, help="Teacher type (BERT, RoBERTa).") parser.add_argument("--teacher_name", type=str, required=True, help="The teacher model.") parser.add_argument("--temperature", default=2., type=float, help="Temperature for the softmax temperature.") parser.add_argument("--alpha_ce", default=0.5, type=float, help="Linear weight for the distillation loss. Must be >=0.") parser.add_argument("--alpha_mlm", default=0.0, type=float, help="Linear weight for the MLM loss. Must be >=0. Should be used in coonjunction with `mlm` flag.") parser.add_argument("--alpha_clm", default=0.5, type=float, help="Linear weight for the CLM loss. Must be >=0.") parser.add_argument("--alpha_mse", default=0.0, type=float, help="Linear weight of the MSE loss. Must be >=0.") parser.add_argument("--alpha_cos", default=0.0, type=float, help="Linear weight of the cosine embedding loss. Must be >=0.") parser.add_argument("--mlm", action="store_true", help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.") parser.add_argument("--mlm_mask_prop", default=0.15, type=float, help="Proportion of tokens for which we need to make a prediction.") parser.add_argument("--word_mask", default=0.8, type=float, help="Proportion of tokens to mask out.") parser.add_argument("--word_keep", default=0.1, type=float, help="Proportion of tokens to keep.") parser.add_argument("--word_rand", default=0.1, type=float, help="Proportion of tokens to randomly replace.") parser.add_argument("--mlm_smoothing", default=0.7, type=float, help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).") parser.add_argument("--token_counts", type=str, help="The token counts in the data_file for MLM.") parser.add_argument("--restrict_ce_to_mask", action='store_true', help="If true, compute the distilation loss only the [MLM] prediction distribution.") parser.add_argument("--freeze_pos_embs", action="store_true", help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.") parser.add_argument("--freeze_token_type_embds", action="store_true", help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.") parser.add_argument("--n_epoch", type=int, default=3, help="Number of pass on the whole dataset.") parser.add_argument("--batch_size", type=int, default=5, help="Batch size (for each process).") parser.add_argument("--group_by_size", action='store_false', help="If true, group sequences that have similar length into the same batch. Default is true.") parser.add_argument("--gradient_accumulation_steps", type=int, default=50, help="Gradient accumulation for larger training batches.") parser.add_argument("--warmup_prop", default=0.05, type=float, help="Linear warmup proportion.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--learning_rate", default=5e-4, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=5.0, type=float, help="Max gradient norm.") parser.add_argument("--initializer_range", default=0.02, type=float, help="Random initialization range.") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--n_gpu", type=int, default=1, help="Number of GPUs in the node.") parser.add_argument("--local_rank", type=int, default=-1, help="Distributed training - Local rank") parser.add_argument("--seed", type=int, default=56, help="Random seed") parser.add_argument("--log_interval", type=int, default=500, help="Tensorboard logging interval.") parser.add_argument("--checkpoint_interval", type=int, default=4000, help="Checkpoint interval.") args = parser.parse_args() sanity_checks(args) ## ARGS ## init_gpu_params(args) set_seed(args) if args.is_master: if os.path.exists(args.dump_path): if not args.force: raise ValueError(f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite it' 'Use `--force` if you want to overwrite it') else: shutil.rmtree(args.dump_path) if not os.path.exists(args.dump_path): os.makedirs(args.dump_path) logger.info(f'Experiment will be dumped and logged in {args.dump_path}') ### SAVE PARAMS ### logger.info(f'Param: {args}') with open(os.path.join(args.dump_path, 'parameters.json'), 'w') as f: json.dump(vars(args), f, indent=4) git_log(args.dump_path) student_config_class, student_model_class, _ = MODEL_CLASSES[args.student_type] teacher_config_class, teacher_model_class, teacher_tokenizer_class = MODEL_CLASSES[args.teacher_type] ### TOKENIZER ### tokenizer = teacher_tokenizer_class.from_pretrained(args.teacher_name) special_tok_ids = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): idx = tokenizer.all_special_tokens.index(tok_symbol) special_tok_ids[tok_name] = tokenizer.all_special_ids[idx] logger.info(f'Special tokens {special_tok_ids}') args.special_tok_ids = special_tok_ids args.max_model_input_size = tokenizer.max_model_input_sizes[args.teacher_name] ## DATA LOADER ## logger.info(f'Loading data from {args.data_file}') with open(args.data_file, 'rb') as fp: data = pickle.load(fp) if args.mlm: logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)') with open(args.token_counts, 'rb') as fp: counts = pickle.load(fp) token_probs = np.maximum(counts, 1) ** -args.mlm_smoothing for idx in special_tok_ids.values(): token_probs[idx] = 0. # do not predict special tokens token_probs = torch.from_numpy(token_probs) else: token_probs = None train_lm_seq_dataset = LmSeqsDataset(params=args, data=data) logger.info(f'Data loader created.') ## STUDENT ## logger.info(f'Loading student config from {args.student_config}') stu_architecture_config = student_config_class.from_pretrained(args.student_config) stu_architecture_config.output_hidden_states = True if args.student_pretrained_weights is not None: logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}') student = student_model_class.from_pretrained(args.student_pretrained_weights, config=stu_architecture_config) else: student = student_model_class(stu_architecture_config) if args.n_gpu > 0: student.to(f'cuda:{args.local_rank}') logger.info(f'Student loaded.') ## TEACHER ## teacher = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=True) if args.n_gpu > 0: teacher.to(f'cuda:{args.local_rank}') logger.info(f'Teacher loaded from {args.teacher_name}.') ## FREEZING ## if args.freeze_pos_embs: freeze_pos_embeddings(student, args) if args.freeze_token_type_embds: freeze_token_type_embeddings(student, args) ## SANITY CHECKS ## assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0) == stu_architecture_config.vocab_size ## DISTILLER ## torch.cuda.empty_cache() distiller = Distiller(params=args, dataset=train_lm_seq_dataset, token_probs=token_probs, student=student, teacher=teacher) distiller.train() logger.info("Let's go get some drinks.") if __name__ == "__main__": main()
13,598
45.731959
135
py
fat-albert
fat-albert-master/bert/examples/distillation/distiller.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The distiller to distil the student. Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import os import math import psutil import time from tqdm import trange, tqdm import numpy as np import psutil import torch import torch.nn as nn import torch.nn.functional as F from torch.optim import AdamW from torch.utils.data.distributed import DistributedSampler from torch.utils.data import RandomSampler, BatchSampler, DataLoader try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from transformers import get_linear_schedule_with_warmup from utils import logger from lm_seqs_dataset import LmSeqsDataset from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups class Distiller: def __init__(self, params: dict, dataset: LmSeqsDataset, token_probs: torch.tensor, student: nn.Module, teacher: nn.Module): logger.info('Initializing Distiller') self.params = params self.dump_path = params.dump_path self.multi_gpu = params.multi_gpu self.fp16 = params.fp16 self.student = student self.teacher = teacher self.student_config = student.config self.vocab_size = student.config.vocab_size if params.n_gpu <= 1: sampler = RandomSampler(dataset) else: sampler = DistributedSampler(dataset) if params.group_by_size: groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size) sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size) else: sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False) self.dataloader = DataLoader(dataset=dataset, batch_sampler=sampler, collate_fn=dataset.batch_sequences) self.temperature = params.temperature assert self.temperature > 0. self.alpha_ce = params.alpha_ce self.alpha_mlm = params.alpha_mlm self.alpha_clm = params.alpha_clm self.alpha_mse = params.alpha_mse self.alpha_cos = params.alpha_cos self.mlm = params.mlm if self.mlm: logger.info(f'Using MLM loss for LM step.') self.mlm_mask_prop = params.mlm_mask_prop assert 0.0 <= self.mlm_mask_prop <= 1.0 assert params.word_mask + params.word_keep + params.word_rand == 1.0 self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand]) self.pred_probs = self.pred_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else self.pred_probs self.token_probs = token_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else token_probs if self.fp16: self.pred_probs = self.pred_probs.half() self.token_probs = self.token_probs.half() else: logger.info(f'Using CLM loss for LM step.') self.epoch = 0 self.n_iter = 0 self.n_total_iter = 0 self.n_sequences_epoch = 0 self.total_loss_epoch = 0 self.last_loss = 0 self.last_loss_ce = 0 self.last_loss_mlm = 0 self.last_loss_clm = 0 if self.alpha_mse > 0.: self.last_loss_mse = 0 if self.alpha_cos > 0.: self.last_loss_cos = 0 self.last_log = 0 self.ce_loss_fct = nn.KLDivLoss(reduction='batchmean') self.lm_loss_fct = nn.CrossEntropyLoss(ignore_index=-1) if self.alpha_mse > 0.: self.mse_loss_fct = nn.MSELoss(reduction='sum') if self.alpha_cos > 0.: self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction='mean') logger.info('--- Initializing model optimizer') assert params.gradient_accumulation_steps >= 1 self.num_steps_epoch = len(self.dataloader) num_train_optimization_steps = int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1 no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': params.weight_decay}, {'params': [p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': 0.0} ] logger.info("------ Number of trainable parameters (student): %i" % sum([p.numel() for p in self.student.parameters() if p.requires_grad])) logger.info("------ Number of parameters (student): %i" % sum([p.numel() for p in self.student.parameters()])) self.optimizer = AdamW(optimizer_grouped_parameters, lr=params.learning_rate, eps=params.adam_epsilon, betas=(0.9, 0.98)) warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop) self.scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps) if self.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") logger.info(f"Using fp16 training: {self.params.fp16_opt_level} level") self.student, self.optimizer = amp.initialize(self.student, self.optimizer, opt_level=self.params.fp16_opt_level) self.teacher = self.teacher.half() if self.multi_gpu: if self.fp16: from apex.parallel import DistributedDataParallel logger.info("Using apex.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel(self.student) else: from torch.nn.parallel import DistributedDataParallel logger.info("Using nn.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel(self.student, device_ids=[params.local_rank], output_device=params.local_rank, find_unused_parameters=True) self.is_master = params.is_master if self.is_master: logger.info('--- Initializing Tensorboard') self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, 'log', 'train')) self.tensorboard.add_text(tag='config/training', text_string=str(self.params), global_step=0) self.tensorboard.add_text(tag='config/student', text_string=str(self.student_config), global_step=0) def prepare_batch_mlm(self, batch): """ Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the masked label for MLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. mlm_labels: `torch.tensor(bs, seq_length)` - The masked languge modeling labels. There is a -1 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None]) bs, max_seq_len = token_ids.size() mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids) x_prob = self.token_probs[token_ids.flatten()] n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item()) tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False) pred_mask = torch.zeros(bs * max_seq_len, dtype=torch.bool, device=token_ids.device) # previously `dtype=torch.uint8`, cf pytorch 1.2.0 compatibility pred_mask[tgt_ids] = 1 pred_mask = pred_mask.view(bs, max_seq_len) pred_mask[token_ids == self.params.special_tok_ids['pad_token']] = 0 # mask a number of words == 0 [8] (faster with fp16) if self.fp16: n1 = pred_mask.sum().item() if n1 > 8: pred_mask = pred_mask.view(-1) n2 = max(n1 % 8, 8 * (n1 // 8)) if n2 != n1: pred_mask[torch.nonzero(pred_mask).view(-1)[:n1-n2]] = 0 pred_mask = pred_mask.view(bs, max_seq_len) assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item() _token_ids_real = token_ids[pred_mask] _token_ids_rand = _token_ids_real.clone().random_(self.vocab_size) _token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids['mask_token']) probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True) _token_ids = _token_ids_mask * (probs == 0).long() + _token_ids_real * (probs == 1).long() + _token_ids_rand * (probs == 2).long() token_ids = token_ids.masked_scatter(pred_mask, _token_ids) mlm_labels[~pred_mask] = -1 # previously `mlm_labels[1-pred_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, mlm_labels def prepare_batch_clm(self, batch): """ Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the labels for CLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. clm_labels: `torch.tensor(bs, seq_length)` - The causal languge modeling labels. There is a -1 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None]) clm_labels = token_ids.new(token_ids.size()).copy_(token_ids) clm_labels[~attn_mask] = -1 # previously `clm_labels[1-attn_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, clm_labels def round_batch(self, x: torch.tensor, lengths: torch.tensor): """ For float16 only. Sub-sample sentences in a batch, and add padding, so that each dimension is a multiple of 8. Input: ------ x: `torch.tensor(bs, seq_length)` - The token ids. lengths: `torch.tensor(bs, seq_length)` - The lengths of each of the sequence in the batch. Output: ------- x: `torch.tensor(new_bs, new_seq_length)` - The updated token ids. lengths: `torch.tensor(new_bs, new_seq_length)` - The updated lengths. """ if not self.fp16 or len(lengths) < 8: return x, lengths # number of sentences == 0 [8] bs1 = len(lengths) bs2 = 8 * (bs1 // 8) assert bs2 > 0 and bs2 % 8 == 0 if bs1 != bs2: idx = torch.randperm(bs1)[:bs2] lengths = lengths[idx] slen = lengths.max().item() x = x[idx, :slen] else: idx = None # sequence length == 0 [8] ml1 = x.size(1) if ml1 % 8 != 0: pad = 8 - (ml1 % 8) ml2 = ml1 + pad if self.mlm: pad_id = self.params.special_tok_ids['pad_token'] else: pad_id = self.params.special_tok_ids['unk_token'] padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id) x = torch.cat([x, padding_tensor], 1) assert x.size() == (bs2, ml2) assert x.size(0) % 8 == 0 assert x.size(1) % 8 == 0 return x, lengths def train(self): """ The real training loop. """ if self.is_master: logger.info('Starting training') self.last_log = time.time() self.student.train() self.teacher.eval() for _ in range(self.params.n_epoch): if self.is_master: logger.info(f'--- Starting epoch {self.epoch}/{self.params.n_epoch-1}') if self.multi_gpu: torch.distributed.barrier() iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0]) for batch in iter_bar: if self.params.n_gpu > 0: batch = tuple(t.to(f'cuda:{self.params.local_rank}') for t in batch) if self.mlm: token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch) else: token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch) self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels) iter_bar.update() iter_bar.set_postfix({'Last_loss': f'{self.last_loss:.2f}', 'Avg_cum_loss': f'{self.total_loss_epoch/self.n_iter:.2f}'}) iter_bar.close() if self.is_master: logger.info(f'--- Ending epoch {self.epoch}/{self.params.n_epoch-1}') self.end_epoch() if self.is_master: logger.info(f'Save very last checkpoint as `pytorch_model.bin`.') self.save_checkpoint(checkpoint_name=f'pytorch_model.bin') logger.info('Training is finished') def step(self, input_ids: torch.tensor, attention_mask: torch.tensor, lm_labels: torch.tensor): """ One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation), and possibly a parameter update (depending on the gradient accumulation). Input: ------ input_ids: `torch.tensor(bs, seq_length)` - The token ids. attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention. lm_labels: `torch.tensor(bs, seq_length)` - The language modeling labels (mlm labels for MLM and clm labels for CLM). """ if self.mlm: s_logits, s_hidden_states = self.student(input_ids=input_ids, attention_mask=attention_mask) # (bs, seq_length, voc_size) with torch.no_grad(): t_logits, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=attention_mask) # (bs, seq_length, voc_size) else: s_logits, _, s_hidden_states = self.student(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) with torch.no_grad(): t_logits, _, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) assert s_logits.size() == t_logits.size() #https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100 #https://github.com/peterliht/knowledge-distillation-pytorch/issues/2 if self.params.restrict_ce_to_mask: mask = (lm_labels>-1).unsqueeze(-1).expand_as(s_logits) # (bs, seq_lenth, voc_size) else: mask = attention_mask.unsqueeze(-1).expand_as(s_logits) # (bs, seq_lenth, voc_size) s_logits_slct = torch.masked_select(s_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask t_logits_slct = torch.masked_select(t_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask assert t_logits_slct.size() == s_logits_slct.size() loss_ce = self.ce_loss_fct(F.log_softmax(s_logits_slct/self.temperature, dim=-1), F.softmax(t_logits_slct/self.temperature, dim=-1)) * (self.temperature)**2 loss = self.alpha_ce*loss_ce if self.alpha_mlm > 0.: loss_mlm = self.lm_loss_fct(s_logits.view(-1, s_logits.size(-1)), lm_labels.view(-1)) loss += self.alpha_mlm * loss_mlm if self.alpha_clm > 0.: shift_logits = s_logits[..., :-1, :].contiguous() shift_labels = lm_labels[..., 1:].contiguous() loss_clm = self.lm_loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) loss += self.alpha_clm * loss_clm if self.alpha_mse > 0.: loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct)/s_logits_slct.size(0) # Reproducing batchmean reduction loss += self.alpha_mse * loss_mse if self.alpha_cos > 0.: s_hidden_states = s_hidden_states[-1] # (bs, seq_length, dim) t_hidden_states = t_hidden_states[-1] # (bs, seq_length, dim) mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states) # (bs, seq_length, dim) assert s_hidden_states.size() == t_hidden_states.size() dim = s_hidden_states.size(-1) s_hidden_states_slct = torch.masked_select(s_hidden_states, mask) # (bs * seq_length * dim) s_hidden_states_slct = s_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) t_hidden_states_slct = torch.masked_select(t_hidden_states, mask) # (bs * seq_length * dim) t_hidden_states_slct = t_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1) # (bs * seq_length,) loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target) loss += self.alpha_cos * loss_cos self.total_loss_epoch += loss.item() self.last_loss = loss.item() self.last_loss_ce = loss_ce.item() if self.alpha_mlm > 0.: self.last_loss_mlm = loss_mlm.item() if self.alpha_clm > 0.: self.last_loss_clm = loss_clm.item() if self.alpha_mse > 0.: self.last_loss_mse = loss_mse.item() if self.alpha_cos > 0.: self.last_loss_cos = loss_cos.item() self.optimize(loss) self.n_sequences_epoch += input_ids.size(0) def optimize(self, loss): """ Normalization on the loss (gradient accumulation or distributed training), followed by backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation). Also update the metrics for tensorboard. """ # Check for NaN if (loss != loss).data.any(): logger.error('NaN detected') exit() if self.multi_gpu: loss = loss.mean() if self.params.gradient_accumulation_steps > 1: loss = loss / self.params.gradient_accumulation_steps if self.fp16: from apex import amp with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() self.iter() if self.n_iter % self.params.gradient_accumulation_steps == 0: if self.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm) self.optimizer.step() self.optimizer.zero_grad() self.scheduler.step() def iter(self): """ Update global counts, write to tensorboard and save checkpoint. """ self.n_iter += 1 self.n_total_iter += 1 if self.n_total_iter % self.params.log_interval == 0: self.log_tensorboard() self.last_log = time.time() if self.n_total_iter % self.params.checkpoint_interval == 0: self.save_checkpoint() def log_tensorboard(self): """ Log into tensorboard. Only by the master process. """ if not self.is_master: return for param_name, param in self.student.named_parameters(): self.tensorboard.add_scalar(tag='parameter_mean/' + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter) self.tensorboard.add_scalar(tag='parameter_std/' + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter) if param.grad is None: continue self.tensorboard.add_scalar(tag="grad_mean/" + param_name, scalar_value=param.grad.data.mean(),global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="grad_std/" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="losses/cum_avg_loss_epoch", scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="losses/loss", scalar_value=self.last_loss, global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="losses/loss_ce", scalar_value=self.last_loss_ce, global_step=self.n_total_iter) if self.alpha_mlm > 0.: self.tensorboard.add_scalar(tag="losses/loss_mlm", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter) if self.alpha_clm > 0.: self.tensorboard.add_scalar(tag="losses/loss_clm", scalar_value=self.last_loss_clm, global_step=self.n_total_iter) if self.alpha_mse > 0.: self.tensorboard.add_scalar(tag="losses/loss_mse", scalar_value=self.last_loss_mse, global_step=self.n_total_iter) if self.alpha_cos > 0.: self.tensorboard.add_scalar(tag="losses/loss_cos", scalar_value=self.last_loss_cos, global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="learning_rate/lr", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="global/memory_usage", scalar_value=psutil.virtual_memory()._asdict()['used']/1_000_000, global_step=self.n_total_iter) self.tensorboard.add_scalar(tag="global/speed", scalar_value=time.time()-self.last_log, global_step=self.n_total_iter) def end_epoch(self): """ Finally arrived at the end of epoch (full pass on dataset). Do some tensorboard logging and checkpoint saving. """ logger.info(f'{self.n_sequences_epoch} sequences have been trained during this epoch.') if self.is_master: self.save_checkpoint(checkpoint_name=f'model_epoch_{self.epoch}.pth') self.tensorboard.add_scalar(tag='epoch/loss', scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.epoch) self.epoch += 1 self.n_sequences_epoch = 0 self.n_iter = 0 self.total_loss_epoch = 0 def save_checkpoint(self, checkpoint_name: str = 'checkpoint.pth'): """ Save the current state. Only by the master process. """ if not self.is_master: return mdl_to_save = self.student.module if hasattr(self.student, 'module') else self.student mdl_to_save.config.save_pretrained(self.dump_path) state_dict = mdl_to_save.state_dict() torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))
25,933
46.848708
163
py
fat-albert
fat-albert-master/bert/examples/distillation/scripts/token_counts.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before training the distilled model. """ from collections import Counter import argparse import pickle import logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) if __name__ == '__main__': parser = argparse.ArgumentParser(description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)") parser.add_argument("--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset.") parser.add_argument("--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file.") parser.add_argument("--vocab_size", default=30522, type=int) args = parser.parse_args() logger.info(f'Loading data from {args.data_file}') with open(args.data_file, 'rb') as fp: data = pickle.load(fp) logger.info('Counting occurences for MLM.') counter = Counter() for tk_ids in data: counter.update(tk_ids) counts = [0]*args.vocab_size for k, v in counter.items(): counts[k] = v logger.info(f'Dump to {args.token_counts_dump}') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
2,062
38.673077
129
py
fat-albert
fat-albert-master/bert/examples/distillation/scripts/extract.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before training the distilled model. Specific to RoBERTa -> DistilRoBERTa and GPT2 -> DistilGPT2. """ from transformers import BertForMaskedLM, RobertaForMaskedLM, GPT2LMHeadModel import torch import argparse if __name__ == '__main__': parser = argparse.ArgumentParser(description="Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned Distillation") parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) parser.add_argument("--model_name", default='roberta-large', type=str) parser.add_argument("--dump_checkpoint", default='serialization_dir/tf_roberta_048131723.pth', type=str) parser.add_argument("--vocab_transform", action='store_true') args = parser.parse_args() if args.model_type == 'roberta': model = RobertaForMaskedLM.from_pretrained(args.model_name) prefix = 'roberta' elif args.model_type == 'gpt2': model = GPT2LMHeadModel.from_pretrained(args.model_name) prefix = 'transformer' state_dict = model.state_dict() compressed_sd = {} ### Embeddings ### if args.model_type == 'gpt2': for param_name in ['wte.weight', 'wpe.weight']: compressed_sd[f'{prefix}.{param_name}'] = state_dict[f'{prefix}.{param_name}'] else: for w in ['word_embeddings', 'position_embeddings', 'token_type_embeddings']: param_name = f'{prefix}.embeddings.{w}.weight' compressed_sd[param_name] = state_dict[param_name] for w in ['weight', 'bias']: param_name = f'{prefix}.embeddings.LayerNorm.{w}' compressed_sd[param_name] = state_dict[param_name] ### Transformer Blocks ### std_idx = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == 'gpt2': for layer in ['ln_1', 'attn.c_attn', 'attn.c_proj', 'ln_2', 'mlp.c_fc', 'mlp.c_proj']: for w in ['weight', 'bias']: compressed_sd[f'{prefix}.h.{std_idx}.{layer}.{w}'] = \ state_dict[f'{prefix}.h.{teacher_idx}.{layer}.{w}'] compressed_sd[f'{prefix}.h.{std_idx}.attn.bias'] = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias'] else: for layer in ['attention.self.query', 'attention.self.key', 'attention.self.value', 'attention.output.dense', 'attention.output.LayerNorm', 'intermediate.dense', 'output.dense', 'output.LayerNorm']: for w in ['weight', 'bias']: compressed_sd[f'{prefix}.encoder.layer.{std_idx}.{layer}.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'] std_idx += 1 ### Language Modeling Head ###s if args.model_type == 'roberta': for layer in ['lm_head.decoder.weight', 'lm_head.bias']: compressed_sd[f'{layer}'] = state_dict[f'{layer}'] if args.vocab_transform: for w in ['weight', 'bias']: compressed_sd[f'lm_head.dense.{w}'] = state_dict[f'lm_head.dense.{w}'] compressed_sd[f'lm_head.layer_norm.{w}'] = state_dict[f'lm_head.layer_norm.{w}'] elif args.model_type == 'gpt2': for w in ['weight', 'bias']: compressed_sd[f'{prefix}.ln_f.{w}'] = state_dict[f'{prefix}.ln_f.{w}'] compressed_sd[f'lm_head.weight'] = state_dict[f'lm_head.weight'] print(f'N layers selected for distillation: {std_idx}') print(f'Number of params transfered for distillation: {len(compressed_sd.keys())}') print(f'Save transfered checkpoint to {args.dump_checkpoint}.') torch.save(compressed_sd, args.dump_checkpoint)
4,326
47.077778
158
py
fat-albert
fat-albert-master/bert/examples/distillation/scripts/extract_distilbert.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before training DistilBERT. Specific to BERT -> DistilBERT. """ from transformers import BertForMaskedLM, RobertaForMaskedLM import torch import argparse if __name__ == '__main__': parser = argparse.ArgumentParser(description="Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned Distillation") parser.add_argument("--model_type", default="bert", choices=["bert"]) parser.add_argument("--model_name", default='bert-base-uncased', type=str) parser.add_argument("--dump_checkpoint", default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument("--vocab_transform", action='store_true') args = parser.parse_args() if args.model_type == 'bert': model = BertForMaskedLM.from_pretrained(args.model_name) prefix = 'bert' else: raise ValueError(f'args.model_type should be "bert".') state_dict = model.state_dict() compressed_sd = {} for w in ['word_embeddings', 'position_embeddings']: compressed_sd[f'distilbert.embeddings.{w}.weight'] = \ state_dict[f'{prefix}.embeddings.{w}.weight'] for w in ['weight', 'bias']: compressed_sd[f'distilbert.embeddings.LayerNorm.{w}'] = \ state_dict[f'{prefix}.embeddings.LayerNorm.{w}'] std_idx = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ['weight', 'bias']: compressed_sd[f'distilbert.transformer.layer.{std_idx}.attention.q_lin.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.attention.k_lin.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.attention.v_lin.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.attention.out_lin.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.sa_layer_norm.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.ffn.lin1.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.ffn.lin2.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'] compressed_sd[f'distilbert.transformer.layer.{std_idx}.output_layer_norm.{w}'] = \ state_dict[f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'] std_idx += 1 compressed_sd[f'vocab_projector.weight'] = state_dict[f'cls.predictions.decoder.weight'] compressed_sd[f'vocab_projector.bias'] = state_dict[f'cls.predictions.bias'] if args.vocab_transform: for w in ['weight', 'bias']: compressed_sd[f'vocab_transform.{w}'] = state_dict[f'cls.predictions.transform.dense.{w}'] compressed_sd[f'vocab_layer_norm.{w}'] = state_dict[f'cls.predictions.transform.LayerNorm.{w}'] print(f'N layers selected for distillation: {std_idx}') print(f'Number of params transfered for distillation: {len(compressed_sd.keys())}') print(f'Save transfered checkpoint to {args.dump_checkpoint}.') torch.save(compressed_sd, args.dump_checkpoint)
4,255
50.277108
158
py
fat-albert
fat-albert-master/bert/examples/distillation/scripts/binarized_data.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before distillation. """ import argparse import pickle import random import time import numpy as np from transformers import BertTokenizer, RobertaTokenizer, GPT2Tokenizer import logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser(description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).") parser.add_argument('--file_path', type=str, default='data/dump.txt', help='The path to the data.') parser.add_argument('--tokenizer_type', type=str, default='bert', choices=['bert', 'roberta', 'gpt2']) parser.add_argument('--tokenizer_name', type=str, default='bert-base-uncased', help="The tokenizer to use.") parser.add_argument('--dump_file', type=str, default='data/dump', help='The dump file prefix.') args = parser.parse_args() logger.info(f'Loading Tokenizer ({args.tokenizer_name})') if args.tokenizer_type == 'bert': tokenizer = BertTokenizer.from_pretrained(args.tokenizer_name) bos = tokenizer.special_tokens_map['cls_token'] # `[CLS]` sep = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == 'roberta': tokenizer = RobertaTokenizer.from_pretrained(args.tokenizer_name) bos = tokenizer.special_tokens_map['cls_token'] # `<s>` sep = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == 'gpt2': tokenizer = GPT2Tokenizer.from_pretrained(args.tokenizer_name) bos = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` sep = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(f'Loading text from {args.file_path}') with open(args.file_path, 'r', encoding='utf8') as fp: data = fp.readlines() logger.info(f'Start encoding') logger.info(f'{len(data)} examples to process.') rslt = [] iter = 0 interval = 10000 start = time.time() for text in data: text = f'{bos} {text.strip()} {sep}' token_ids = tokenizer.encode(text, add_special_tokens=False) rslt.append(token_ids) iter += 1 if iter % interval == 0: end = time.time() logger.info(f'{iter} examples processed. - {(end-start)/interval:.2f}s/expl') start = time.time() logger.info('Finished binarization') logger.info(f'{len(data)} examples processed.') dp_file = f'{args.dump_file}.{args.tokenizer_name}.pickle' rslt_ = [np.uint16(d) for d in rslt] random.shuffle(rslt_) logger.info(f'Dump to {dp_file}') with open(dp_file, 'wb') as handle: pickle.dump(rslt_, handle, protocol=pickle.HIGHEST_PROTOCOL) if __name__ == "__main__": main()
3,630
38.043011
140
py
fat-albert
fat-albert-master/bert/templates/adding_a_new_model/modeling_xxx.py
# coding=utf-8 # Copyright 2018 XXX Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch XXX model. """ #################################################### # In this template, replace all the XXX (various casings) with your model name #################################################### from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import math import os import sys from io import open import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from .modeling_utils import PreTrainedModel, prune_linear_layer from .configuration_xxx import XxxConfig from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) #################################################### # This dict contrains shortcut names and associated url # for the pretrained weights provided with the models #################################################### XXX_PRETRAINED_MODEL_ARCHIVE_MAP = { 'xxx-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-pytorch_model.bin", 'xxx-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-pytorch_model.bin", } #################################################### # This is a conversion method from TF 1.0 to PyTorch # More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28 #################################################### def load_tf_weights_in_xxx(model, config, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model. """ try: import re import numpy as np import tensorflow as tf except ImportError: logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m", "global_step"] for n in name): logger.info("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif l[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, l[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model #################################################### # PyTorch Models are constructed by sub-classing # - torch.nn.Module for the layers and # - PreTrainedModel for the models (itself a sub-class of torch.nn.Module) #################################################### #################################################### # Here is an example of typical layer in a PyTorch model of the library # The classes are usually identical to the TF 2.0 ones without the 'TF' prefix. # # See the conversion methods in modeling_tf_pytorch_utils.py for more details #################################################### class XxxLayer(nn.Module): def __init__(self, config): super(XxxLayer, self).__init__() self.attention = XxxAttention(config) self.intermediate = XxxIntermediate(config) self.output = XxxOutput(config) def forward(self, hidden_states, attention_mask=None, head_mask=None): attention_outputs = self.attention(hidden_states, attention_mask, head_mask) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs #################################################### # PreTrainedModel is a sub-class of torch.nn.Module # which take care of loading and saving pretrained weights # and various common utilities. # # Here you just need to specify a few (self-explanatory) # pointers for your model and the weights initialization # method if its not fully covered by PreTrainedModel's default method #################################################### class XxxPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = XxxConfig pretrained_model_archive_map = XXX_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_xxx base_model_prefix = "transformer" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, XxxLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() XXX_START_DOCSTRING = r""" The XXX model was proposed in `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer pre-trained using a combination of masked language modeling objective and next sentence prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia. This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. .. _`XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`: https://arxiv.org/abs/1810.04805 .. _`torch.nn.Module`: https://pytorch.org/docs/stable/nn.html#module Parameters: config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ XXX_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. To match pre-training, XXX input sequence should be formatted with [CLS] and [SEP] tokens as follows: (a) For sequence pairs: ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` (b) For single sequences: ``tokens: [CLS] the dog is hairy . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0`` Xxx is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`transformers.XxxTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token (see `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare Xxx Model transformer outputting raw hidden-states without any specific head on top.", XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class XxxModel(XxxPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the output of the last layer of the model. **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during Xxx pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = XxxModel.from_pretrained('xxx-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config): super(XxxModel, self).__init__(config) self.embeddings = XxxEmbeddings(config) self.encoder = XxxEncoder(config) self.pooler = XxxPooler(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.num_hidden_layers ################################## # Replace this with your model code embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask) sequence_output = encoder_outputs[0] outputs = (sequence_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here return outputs # sequence_output, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model with a `language modeling` head on top. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class XxxForMaskedLM(XxxPreTrainedModel): r""" **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the masked language modeling loss. Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Masked language modeling loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = XxxForMaskedLM.from_pretrained('xxx-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids, masked_lm_labels=input_ids) loss, prediction_scores = outputs[:2] """ def __init__(self, config): super(XxxForMaskedLM, self).__init__(config) self.transformer = XxxModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size) self.init_weights() def get_output_embeddings(self): return self.lm_head def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None): outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) outputs = (masked_lm_loss,) + outputs return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class XxxForSequenceClassification(XxxPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = XxxForSequenceClassification.from_pretrained('xxx-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ def __init__(self, config): super(XxxForSequenceClassification, self).__init__(config) self.num_labels = config.num_labels self.transformer = XxxModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None): outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), logits, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class XxxForTokenClassification(XxxPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss. **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` Classification scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = XxxForTokenClassification.from_pretrained('xxx-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, scores = outputs[:2] """ def __init__(self, config): super(XxxForTokenClassification, self).__init__(config) self.num_labels = config.num_labels self.transformer = XxxModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None): outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), scores, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class XxxForQuestionAnswering(XxxPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = XxxForQuestionAnswering.from_pretrained('xxx-large-uncased-whole-word-masking-finetuned-squad') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]" input_ids = tokenizer.encode(input_text) token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))] start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids])) all_tokens = tokenizer.convert_ids_to_tokens(input_ids) print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])) # a nice puppet """ def __init__(self, config): super(XxxForQuestionAnswering, self).__init__(config) self.num_labels = config.num_labels self.transformer = XxxModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None): outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) outputs = (start_logits, end_logits,) + outputs[2:] if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 outputs = (total_loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
34,579
51.473445
151
py
fat-albert
fat-albert-master/bert/templates/adding_a_new_model/tokenization_xxx.py
# coding=utf-8 # Copyright 2018 XXX Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model XXX.""" from __future__ import absolute_import, division, print_function, unicode_literals import collections import logging import os import unicodedata from io import open from .tokenization_utils import PreTrainedTokenizer logger = logging.getLogger(__name__) #################################################### # In this template, replace all the XXX (various casings) with your model name #################################################### #################################################### # Mapping from the keyword arguments names of Tokenizer `__init__` # to file names for serializing Tokenizer instances #################################################### VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} #################################################### # Mapping from the keyword arguments names of Tokenizer `__init__` # to pretrained vocabulary URL for all the model shortcut names. #################################################### PRETRAINED_VOCAB_FILES_MAP = { 'vocab_file': { 'xxx-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-vocab.txt", 'xxx-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-vocab.txt", } } #################################################### # Mapping from model shortcut names to max length of inputs #################################################### PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'xxx-base-uncased': 512, 'xxx-large-uncased': 512, } #################################################### # Mapping from model shortcut names to a dictionary of additional # keyword arguments for Tokenizer `__init__`. # To be used for checkpoint specific configurations. #################################################### PRETRAINED_INIT_CONFIGURATION = { 'xxx-base-uncased': {'do_lower_case': True}, 'xxx-large-uncased': {'do_lower_case': True}, } def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip('\n') vocab[token] = index return vocab class XxxTokenizer(PreTrainedTokenizer): r""" Constructs a XxxTokenizer. :class:`~transformers.XxxTokenizer` runs end-to-end tokenization: punctuation splitting + wordpiece Args: vocab_file: Path to a one-wordpiece-per-line vocabulary file do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", **kwargs): """Constructs a XxxTokenizer. Args: **vocab_file**: Path to a one-wordpiece-per-line vocabulary file **do_lower_case**: (`optional`) boolean (default True) Whether to lower case the input Only has an effect when do_basic_tokenize=True """ super(XxxTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs) self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = XxxTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) self.vocab = load_vocab(vocab_file) @property def vocab_size(self): return len(self.vocab) def _tokenize(self, text): """ Take as input a string and return a list of strings (tokens) for words/sub-words """ split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """ Converts a token (str/unicode) in an id using the vocab. """ return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (string/unicode) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: single sequence: [CLS] X [SEP] pair of sequences: [CLS] A [SEP] B [SEP] """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0: list of ids (must not contain special tokens) token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids for sequence pairs already_has_special_tokens: (default False) Set to True if the token list is already formated with special tokens for the model Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError("You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model.") return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 | first sequence | second sequence if token_ids_1 is None, only returns the first portion of the mask (0's). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary to a directory or file.""" index = 0 if os.path.isdir(vocab_path): vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = vocab_path with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!".format(vocab_file)) index = token_index writer.write(token + u'\n') index += 1 return (vocab_file,)
9,578
42.739726
117
py
fat-albert
fat-albert-master/bert/templates/adding_a_new_model/configuration_xxx.py
# coding=utf-8 # Copyright 2010, XXX authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XXX model configuration """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import sys import six from io import open from .configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) XXX_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'xxx-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-config.json", 'xxx-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-config.json", } class XxxConfig(PretrainedConfig): r""" :class:`~transformers.XxxConfig` is the configuration class to store the configuration of a `XxxModel`. Arguments: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `XxxModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `XxxModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps: The epsilon used by LayerNorm. """ pretrained_config_archive_map = XXX_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__(self, vocab_size_or_config_json_file=50257, n_positions=1024, n_ctx=1024, n_embd=768, n_layer=12, n_head=12, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, num_labels=1, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs): super(XxxConfig, self).__init__(**kwargs) self.vocab_size = vocab_size_or_config_json_file if isinstance(vocab_size_or_config_json_file, six.string_types) else -1 self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.num_labels = num_labels self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels if isinstance(vocab_size_or_config_json_file, six.string_types): with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif not isinstance(vocab_size_or_config_json_file, int): raise ValueError( "First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)" ) @property def max_position_embeddings(self): return self.n_positions @property def hidden_size(self): return self.n_embd @property def num_attention_heads(self): return self.n_head @property def num_hidden_layers(self): return self.n_layer
5,273
39.259542
128
py
fat-albert
fat-albert-master/bert/templates/adding_a_new_model/convert_xxx_original_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert XXX checkpoint.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import torch from transformers import XxxConfig, XxxForPreTraining, load_tf_weights_in_xxx import logging logging.basicConfig(level=logging.INFO) def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, xxx_config_file, pytorch_dump_path): # Initialise PyTorch model config = XxxConfig.from_json_file(xxx_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = XxxForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_xxx(model, config, tf_checkpoint_path) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--tf_checkpoint_path", default = None, type = str, required = True, help = "Path to the TensorFlow checkpoint path.") parser.add_argument("--xxx_config_file", default = None, type = str, required = True, help = "The config json file corresponding to the pre-trained XXX model. \n" "This specifies the model architecture.") parser.add_argument("--pytorch_dump_path", default = None, type = str, required = True, help = "Path to the output PyTorch model.") args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.xxx_config_file, args.pytorch_dump_path)
2,565
37.878788
100
py
fat-albert
fat-albert-master/bert/templates/adding_a_new_model/modeling_tf_xxx.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 XXX model. """ #################################################### # In this template, replace all the XXX (various casings) with your model name #################################################### from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import math import os import sys from io import open import numpy as np import tensorflow as tf from .configuration_xxx import XxxConfig from .modeling_tf_utils import TFPreTrainedModel, get_initializer from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) #################################################### # This dict contrains shortcut names and associated url # for the pretrained weights provided with the models #################################################### TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP = { 'xxx-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-tf_model.h5", 'xxx-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-tf_model.h5", } #################################################### # TF 2.0 Models are constructed using Keras imperative API by sub-classing # - tf.keras.layers.Layer for the layers and # - TFPreTrainedModel for the models (itself a sub-class of tf.keras.Model) #################################################### #################################################### # Here is an example of typical layer in a TF 2.0 model of the library # The classes are usually identical to the PyTorch ones and prefixed with 'TF'. # # Note that class __init__ parameters includes **kwargs (send to 'super'). # This let us have a control on class scope and variable names: # More precisely, we set the names of the class attributes (lower level layers) to # to the equivalent attributes names in the PyTorch model so we can have equivalent # class and scope structure between PyTorch and TF 2.0 models and easily load one in the other. # # See the conversion methods in modeling_tf_pytorch_utils.py for more details #################################################### class TFXxxLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super(TFXxxLayer, self).__init__(**kwargs) self.attention = TFXxxAttention(config, name='attention') self.intermediate = TFXxxIntermediate(config, name='intermediate') self.transformer_output = TFXxxOutput(config, name='output') def call(self, inputs, training=False): hidden_states, attention_mask, head_mask = inputs attention_outputs = self.attention([hidden_states, attention_mask, head_mask], training=training) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.transformer_output([intermediate_output, attention_output], training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs #################################################### # The full model without a specific pretrained or finetuning head is # provided as a tf.keras.layers.Layer usually called "TFXxxMainLayer" #################################################### class TFXxxMainLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super(TFXxxMainLayer, self).__init__(**kwargs) def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models def _prune_heads(self, heads_to_prune): raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, training=False): # We allow three types of multi-inputs: # - traditional keyword arguments in the call method # - all the arguments provided as a dict in the first positional argument of call # - all the arguments provided as a list/tuple (ordered) in the first positional argument of call # The last two options are useful to use the tf.keras fit() method. if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask assert len(inputs) <= 5, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) assert len(inputs) <= 5, "Too many inputs." else: input_ids = inputs if attention_mask is None: attention_mask = tf.fill(tf.shape(input_ids), 1) if token_type_ids is None: token_type_ids = tf.fill(tf.shape(input_ids), 0) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, tf.float32) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if not head_mask is None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) ################################## # Replace this with your model code embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids) encoder_outputs = self.encoder([embedding_output, extended_attention_mask, head_mask], training=training) sequence_output = encoder_outputs[0] outputs = (sequence_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here return outputs # sequence_output, (hidden_states), (attentions) #################################################### # TFXxxPreTrainedModel is a sub-class of tf.keras.Model # which take care of loading and saving pretrained weights # and various common utilities. # Here you just need to specify a few (self-explanatory) # pointers for your model. #################################################### class TFXxxPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = XxxConfig pretrained_model_archive_map = TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "transformer" XXX_START_DOCSTRING = r""" The XXX model was proposed in `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer pre-trained using a combination of masked language modeling objective and next sentence prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia. This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. _`XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`: https://arxiv.org/abs/1810.04805 .. _`tf.keras.Model`: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model Note on the model inputs: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with input_ids only and nothing else: `model(inputs_ids) - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associaed to the input names given in the docstring: `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` Parameters: config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ XXX_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. To match pre-training, XXX input sequence should be formatted with [CLS] and [SEP] tokens as follows: (a) For sequence pairs: ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` (b) For single sequences: ``tokens: [CLS] the dog is hairy . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0`` Xxx is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`transformers.XxxTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token (see `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). **position_ids**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare Xxx Model transformer outputing raw hidden-states without any specific head on top.", XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class TFXxxModel(TFXxxPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the output of the last layer of the model. **pooler_output**: ``tf.Tensor`` of shape ``(batch_size, hidden_size)`` Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during Xxx pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import XxxTokenizer, TFXxxModel tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = TFXxxModel.from_pretrained('xxx-base-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config, *inputs, **kwargs): super(TFXxxModel, self).__init__(config, *inputs, **kwargs) self.transformer = TFXxxMainLayer(config, name='transformer') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) return outputs @add_start_docstrings("""Xxx Model with a `language modeling` head on top. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class TFXxxForMaskedLM(TFXxxPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **prediction_scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import XxxTokenizer, TFXxxForMaskedLM tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = TFXxxForMaskedLM.from_pretrained('xxx-base-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) prediction_scores = outputs[0] """ def __init__(self, config, *inputs, **kwargs): super(TFXxxForMaskedLM, self).__init__(config, *inputs, **kwargs) self.transformer = TFXxxMainLayer(config, name='transformer') self.mlm = TFXxxMLMHead(config, self.transformer.embeddings, name='mlm') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) sequence_output = outputs[0] prediction_scores = self.mlm(sequence_output, training=kwargs.get('training', False)) outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here return outputs # prediction_scores, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class TFXxxForSequenceClassification(TFXxxPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **logits**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import XxxTokenizer, TFXxxForSequenceClassification tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = TFXxxForSequenceClassification.from_pretrained('xxx-base-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) logits = outputs[0] """ def __init__(self, config, *inputs, **kwargs): super(TFXxxForSequenceClassification, self).__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXxxMainLayer(config, name='transformer') self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=kwargs.get('training', False)) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here return outputs # logits, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class TFXxxForTokenClassification(TFXxxPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` Classification scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import XxxTokenizer, TFXxxForTokenClassification tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = TFXxxForTokenClassification.from_pretrained('xxx-base-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) scores = outputs[0] """ def __init__(self, config, *inputs, **kwargs): super(TFXxxForTokenClassification, self).__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXxxMainLayer(config, name='transformer') self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=kwargs.get('training', False)) logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here return outputs # scores, (hidden_states), (attentions) @add_start_docstrings("""Xxx Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) class TFXxxForQuestionAnswering(TFXxxPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **start_scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import XxxTokenizer, TFXxxForQuestionAnswering tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') model = TFXxxForQuestionAnswering.from_pretrained('xxx-base-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) start_scores, end_scores = outputs[:2] """ def __init__(self, config, *inputs, **kwargs): super(TFXxxForQuestionAnswering, self).__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXxxMainLayer(config, name='transformer') self.qa_outputs = tf.keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs') def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) outputs = (start_logits, end_logits,) + outputs[2:] return outputs # start_logits, end_logits, (hidden_states), (attentions)
27,575
53.605941
193
py
fat-albert
fat-albert-master/bert/templates/adding_a_new_model/tests/tokenization_xxx_test.py
# coding=utf-8 # Copyright 2018 XXX Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals import os import unittest from io import open from transformers.tokenization_bert import (XxxTokenizer, VOCAB_FILES_NAMES) from .tokenization_tests_commons import CommonTestCases class XxxTokenizationTest(CommonTestCases.CommonTokenizerTester): tokenizer_class = XxxTokenizer def setUp(self): super(XxxTokenizationTest, self).setUp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file, "w", encoding='utf-8') as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_tokenizer(self, **kwargs): return XxxTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self): input_text = u"UNwant\u00E9d,running" output_text = u"unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize(u"UNwant\u00E9d,running") self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) if __name__ == '__main__': unittest.main()
2,092
35.086207
90
py
fat-albert
fat-albert-master/bert/templates/adding_a_new_model/tests/modeling_tf_xxx_test.py
# coding=utf-8 # Copyright 2018 XXX Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import shutil import pytest import sys from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester from transformers import XxxConfig, is_tf_available if is_tf_available(): import tensorflow as tf from transformers.modeling_tf_xxx import (TFXxxModel, TFXxxForMaskedLM, TFXxxForSequenceClassification, TFXxxForTokenClassification, TFXxxForQuestionAnswering, TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP) else: pytestmark = pytest.mark.skip("Require TensorFlow") class TFXxxModelTest(TFCommonTestCases.TFCommonModelTester): all_model_classes = (TFXxxModel, TFXxxForMaskedLM, TFXxxForQuestionAnswering, TFXxxForSequenceClassification, TFXxxForTokenClassification) if is_tf_available() else () class TFXxxModelTester(object): def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = XxxConfig( vocab_size_or_config_json_file=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_xxx_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = TFXxxModel(config=config) inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} sequence_output, pooled_output = model(inputs) inputs = [input_ids, input_mask] sequence_output, pooled_output = model(inputs) sequence_output, pooled_output = model(input_ids) result = { "sequence_output": sequence_output.numpy(), "pooled_output": pooled_output.numpy(), } self.parent.assertListEqual( list(result["sequence_output"].shape), [self.batch_size, self.seq_length, self.hidden_size]) self.parent.assertListEqual(list(result["pooled_output"].shape), [self.batch_size, self.hidden_size]) def create_and_check_xxx_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = TFXxxForMaskedLM(config=config) inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} prediction_scores, = model(inputs) result = { "prediction_scores": prediction_scores.numpy(), } self.parent.assertListEqual( list(result["prediction_scores"].shape), [self.batch_size, self.seq_length, self.vocab_size]) def create_and_check_xxx_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_labels = self.num_labels model = TFXxxForSequenceClassification(config=config) inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} logits, = model(inputs) result = { "logits": logits.numpy(), } self.parent.assertListEqual( list(result["logits"].shape), [self.batch_size, self.num_labels]) def create_and_check_xxx_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_labels = self.num_labels model = TFXxxForTokenClassification(config=config) inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} logits, = model(inputs) result = { "logits": logits.numpy(), } self.parent.assertListEqual( list(result["logits"].shape), [self.batch_size, self.seq_length, self.num_labels]) def create_and_check_xxx_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = TFXxxForQuestionAnswering(config=config) inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} start_logits, end_logits = model(inputs) result = { "start_logits": start_logits.numpy(), "end_logits": end_logits.numpy(), } self.parent.assertListEqual( list(result["start_logits"].shape), [self.batch_size, self.seq_length]) self.parent.assertListEqual( list(result["end_logits"].shape), [self.batch_size, self.seq_length]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict def setUp(self): self.model_tester = TFXxxModelTest.TFXxxModelTester(self) self.config_tester = ConfigTester(self, config_class=XxxConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_xxx_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_token_classification(*config_and_inputs) @pytest.mark.slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in ['xxx-base-uncased']: model = TFXxxModel.from_pretrained(model_name, cache_dir=cache_dir) shutil.rmtree(cache_dir) self.assertIsNotNone(model) if __name__ == "__main__": unittest.main()
11,411
43.404669
160
py
fat-albert
fat-albert-master/bert/templates/adding_a_new_model/tests/modeling_xxx_test.py
# coding=utf-8 # Copyright 2018 XXX Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import shutil import pytest from transformers import is_torch_available from .modeling_common_test import (CommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester if is_torch_available(): from transformers import (XxxConfig, XxxModel, XxxForMaskedLM, XxxForNextSentencePrediction, XxxForPreTraining, XxxForQuestionAnswering, XxxForSequenceClassification, XxxForTokenClassification, XxxForMultipleChoice) from transformers.modeling_xxx import XXX_PRETRAINED_MODEL_ARCHIVE_MAP else: pytestmark = pytest.mark.skip("Require Torch") class XxxModelTest(CommonTestCases.CommonModelTester): all_model_classes = (XxxModel, XxxForMaskedLM, XxxForQuestionAnswering, XxxForSequenceClassification, XxxForTokenClassification) if is_torch_available() else () class XxxModelTester(object): def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = XxxConfig( vocab_size_or_config_json_file=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def check_loss_output(self, result): self.parent.assertListEqual( list(result["loss"].size()), []) def create_and_check_xxx_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = XxxModel(config=config) model.eval() sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) sequence_output, pooled_output = model(input_ids, token_type_ids=token_type_ids) sequence_output, pooled_output = model(input_ids) result = { "sequence_output": sequence_output, "pooled_output": pooled_output, } self.parent.assertListEqual( list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]) self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size]) def create_and_check_xxx_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = XxxForMaskedLM(config=config) model.eval() loss, prediction_scores = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels) result = { "loss": loss, "prediction_scores": prediction_scores, } self.parent.assertListEqual( list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]) self.check_loss_output(result) def create_and_check_xxx_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = XxxForQuestionAnswering(config=config) model.eval() loss, start_logits, end_logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels) result = { "loss": loss, "start_logits": start_logits, "end_logits": end_logits, } self.parent.assertListEqual( list(result["start_logits"].size()), [self.batch_size, self.seq_length]) self.parent.assertListEqual( list(result["end_logits"].size()), [self.batch_size, self.seq_length]) self.check_loss_output(result) def create_and_check_xxx_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_labels = self.num_labels model = XxxForSequenceClassification(config) model.eval() loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) result = { "loss": loss, "logits": logits, } self.parent.assertListEqual( list(result["logits"].size()), [self.batch_size, self.num_labels]) self.check_loss_output(result) def create_and_check_xxx_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_labels = self.num_labels model = XxxForTokenClassification(config=config) model.eval() loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) result = { "loss": loss, "logits": logits, } self.parent.assertListEqual( list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels]) self.check_loss_output(result) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict def setUp(self): self.model_tester = XxxModelTest.XxxModelTester(self) self.config_tester = ConfigTester(self, config_class=XxxConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_xxx_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xxx_for_token_classification(*config_and_inputs) @pytest.mark.slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(XXX_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: model = XxxModel.from_pretrained(model_name, cache_dir=cache_dir) shutil.rmtree(cache_dir) self.assertIsNotNone(model) if __name__ == "__main__": unittest.main()
11,628
44.425781
160
py
fat-albert
fat-albert-master/bert/templates/adding_a_new_example_script/utils_xxx.py
# coding=utf-8 # Copyright 2018 XXX. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Load XXX dataset. """ from __future__ import absolute_import, division, print_function import json import logging import math import collections from io import open from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize # Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method) from utils_squad_evaluate import find_all_best_thresh_v2, make_qid_to_has_ans, get_raw_scores logger = logging.getLogger(__name__) class SquadExample(object): """ A single training/test example for the Squad dataset. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=None): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (self.qas_id) s += ", question_text: %s" % ( self.question_text) s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.end_position: s += ", end_position: %d" % (self.end_position) if self.is_impossible: s += ", is_impossible: %r" % (self.is_impossible) return s class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map, token_is_max_context, input_ids, input_mask, segment_ids, cls_index, p_mask, paragraph_len, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.cls_index = cls_index self.p_mask = p_mask self.paragraph_len = paragraph_len self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def read_squad_examples(input_file, is_training, version_2_with_negative): """Read a SQuAD json file into a list of SquadExample.""" with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: if version_2_with_negative: is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, cls_token_at_end=False, cls_token='[CLS]', sep_token='[SEP]', pad_token=0, sequence_a_segment_id=0, sequence_b_segment_id=1, cls_token_segment_id=0, pad_token_segment_id=0, mask_padding_with_zero=True): """Loads a data file into a list of `InputBatch`s.""" unique_id = 1000000000 # cnt_pos, cnt_neg = 0, 0 # max_N, max_M = 1024, 1024 # f = np.zeros((max_N, max_M), dtype=np.float32) features = [] for (example_index, example) in enumerate(examples): # if example_index % 100 == 0: # logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg) query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # Original TF implem also keep the classification token (set to 0) (not sure why...) p_mask = [] # CLS token at the beginning if not cls_token_at_end: tokens.append(cls_token) segment_ids.append(cls_token_segment_id) p_mask.append(0) cls_index = 0 # Query for token in query_tokens: tokens.append(token) segment_ids.append(sequence_a_segment_id) p_mask.append(1) # SEP token tokens.append(sep_token) segment_ids.append(sequence_a_segment_id) p_mask.append(1) # Paragraph for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(sequence_b_segment_id) p_mask.append(0) paragraph_len = doc_span.length # SEP token tokens.append(sep_token) segment_ids.append(sequence_b_segment_id) p_mask.append(1) # CLS token at the end if cls_token_at_end: tokens.append(cls_token) segment_ids.append(cls_token_segment_id) p_mask.append(0) cls_index = len(tokens) - 1 # Index of classification token input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(pad_token) input_mask.append(0 if mask_padding_with_zero else 1) segment_ids.append(pad_token_segment_id) p_mask.append(1) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length span_is_impossible = example.is_impossible start_position = None end_position = None if is_training and not span_is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 span_is_impossible = True else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and span_is_impossible: start_position = cls_index end_position = cls_index if example_index < 20: logger.info("*** Example ***") logger.info("unique_id: %s" % (unique_id)) logger.info("example_index: %s" % (example_index)) logger.info("doc_span_index: %s" % (doc_span_index)) logger.info("tokens: %s" % " ".join(tokens)) logger.info("token_to_orig_map: %s" % " ".join([ "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])) logger.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in token_is_max_context.items() ])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training and span_is_impossible: logger.info("impossible example") if is_training and not span_is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) logger.info("start_position: %d" % (start_position)) logger.info("end_position: %d" % (end_position)) logger.info( "answer: %s" % (answer_text)) features.append( InputFeatures( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, cls_index=cls_index, p_mask=p_mask, paragraph_len=paragraph_len, start_position=start_position, end_position=end_position, is_impossible=span_is_impossible)) unique_id += 1 return features def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold): """Write final predictions to the json file and log-odds of null if needed.""" logger.info("Writing predictions to: %s" % (output_prediction_file)) logger.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min null score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't include the empty option in the n-best, include it if version_2_with_negative: if "" not in seen_predictions: nbest.append( _NbestPrediction( text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if len(nbest)==1: nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - ( best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions # For XLNet (and XLM which uses the same head) RawResultExtended = collections.namedtuple("RawResultExtended", ["unique_id", "start_top_log_probs", "start_top_index", "end_top_log_probs", "end_top_index", "cls_logits"]) def write_predictions_extended(all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, orig_data_file, start_n_top, end_n_top, version_2_with_negative, tokenizer, verbose_logging): """ XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of null if needed. Requires utils_squad_evaluate.py """ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]) logger.info("Writing predictions to: %s", output_prediction_file) # logger.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] cur_null_score = result.cls_logits # if we could have irrelevant answers, get the min score of irrelevant score_null = min(score_null, cur_null_score) for i in range(start_n_top): for j in range(end_n_top): start_log_prob = result.start_top_log_probs[i] start_index = result.start_top_index[i] j_index = i * end_n_top + j end_log_prob = result.end_top_log_probs[j_index] end_index = result.end_top_index[j_index] # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= feature.paragraph_len - 1: continue if end_index >= feature.paragraph_len - 1: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_log_prob=start_log_prob, end_log_prob=end_log_prob)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] # XLNet un-tokenizer # Let's keep it simple for now and see if we need all this later. # # tok_start_to_orig_index = feature.tok_start_to_orig_index # tok_end_to_orig_index = feature.tok_end_to_orig_index # start_orig_pos = tok_start_to_orig_index[pred.start_index] # end_orig_pos = tok_end_to_orig_index[pred.end_index] # paragraph_text = example.paragraph_text # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() # Previously used Bert untokenizer tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = tokenizer.convert_tokens_to_string(tok_tokens) # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, tokenizer.do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6)) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_log_prob + entry.end_log_prob) if not best_non_null_entry: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_log_prob"] = entry.start_log_prob output["end_log_prob"] = entry.end_log_prob nbest_json.append(output) assert len(nbest_json) >= 1 assert best_non_null_entry is not None score_diff = score_null scores_diff_json[example.qas_id] = score_diff # note(zhiliny): always predict best_non_null_entry # and the evaluation script will search for the best threshold all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") with open(orig_data_file, "r", encoding='utf-8') as reader: orig_data = json.load(reader)["data"] qid_to_has_ans = make_qid_to_has_ans(orig_data) has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(orig_data, all_predictions) out_eval = {} find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans) return out_eval def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heuristic between # `pred_text` and `orig_text` to get a character-to-character alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info( "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs
41,415
40.582329
112
py
fat-albert
fat-albert-master/bert/templates/adding_a_new_example_script/run_xxx.py
# coding=utf-8 # Copyright 2018 XXX. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for task XXX.""" from __future__ import absolute_import, division, print_function import argparse import logging import os import random import glob import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForQuestionAnswering, BertTokenizer, XLMConfig, XLMForQuestionAnswering, XLMTokenizer, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer, DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) from transformers import AdamW, get_linear_schedule_with_warmup from utils_squad import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions, RawResultExtended, write_predictions_extended) # The follwing import is the official SQuAD evaluation script (2.0). # You can remove it from the dependencies if you are using this script outside of the library # We've added it here for automated tests (see examples/test_examples.py file) from utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad logger = logging.getLogger(__name__) ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ for conf in (BertConfig, XLNetConfig, XLMConfig)), ()) MODEL_CLASSES = { 'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer), 'xlnet': (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer), 'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer), 'distilbert': (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'start_positions': batch[3], 'end_positions': batch[4]} if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] if args.model_type in ['xlnet', 'xlm']: inputs.update({'cls_index': batch[5], 'p_mask': batch[6]}) outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1] } if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids example_indices = batch[3] if args.model_type in ['xlnet', 'xlm']: inputs.update({'cls_index': batch[4], 'p_mask': batch[5]}) outputs = model(**inputs) for i, example_index in enumerate(example_indices): eval_feature = features[example_index.item()] unique_id = int(eval_feature.unique_id) if args.model_type in ['xlnet', 'xlm']: # XLNet uses a more complex post-processing procedure result = RawResultExtended(unique_id = unique_id, start_top_log_probs = to_list(outputs[0][i]), start_top_index = to_list(outputs[1][i]), end_top_log_probs = to_list(outputs[2][i]), end_top_index = to_list(outputs[3][i]), cls_logits = to_list(outputs[4][i])) else: result = RawResult(unique_id = unique_id, start_logits = to_list(outputs[0][i]), end_logits = to_list(outputs[1][i])) all_results.append(result) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None if args.model_type in ['xlnet', 'xlm']: # XLNet uses a more complex post-processing procedure write_predictions_extended(examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.predict_file, model.config.start_n_top, model.config.end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging) else: write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold) # Evaluate with the official SQuAD script evaluate_options = EVAL_OPTS(data_file=args.predict_file, pred_file=output_prediction_file, na_prob_file=output_null_log_odds_file) results = evaluate_on_squad(evaluate_options) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length))) if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", input_file) examples = read_squad_examples(input_file=input_file, is_training=not evaluate, version_2_with_negative=args.version_2_with_negative) features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) if evaluate: all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index, all_cls_index, all_p_mask) else: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask) if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_file", default=None, type=str, required=True, help="SQuAD json for training. E.g., train-v1.1.json") parser.add_argument("--predict_file", default=None, type=str, required=True, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.') parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.") parser.add_argument("--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.") parser.add_argument("--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.") parser.add_argument("--verbose_logging", action='store_true', help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, 'einsum') except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
30,654
53.741071
151
py
fat-albert
fat-albert-master/bert/datasets/SWAG/pytorch_misc.py
""" Miscellaneous functions that might be useful for pytorch """ import h5py import numpy as np import torch from torch.autograd import Variable import os import dill as pkl from itertools import tee from torch import nn import time def optimistic_restore(network, state_dict): mismatch = False own_state = network.state_dict() for name, param in state_dict.items(): if name not in own_state: print("Unexpected key {} in state_dict with size {}".format(name, param.size())) mismatch = True elif param.size() == own_state[name].size(): own_state[name].copy_(param) else: print("Network has {} with size {}, ckpt has {}".format(name, own_state[name].size(), param.size())) mismatch = True missing = set(own_state.keys()) - set(state_dict.keys()) if len(missing) > 0: print("We couldn't find {}".format(','.join(missing))) mismatch = True return not mismatch def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = tee(iterable) next(b, None) return zip(a, b) def get_ranking(predictions, labels, num_guesses=5): """ Given a matrix of predictions and labels for the correct ones, get the number of guesses required to get the prediction right per example. :param predictions: [batch_size, range_size] predictions :param labels: [batch_size] array of labels :param num_guesses: Number of guesses to return :return: """ assert labels.size(0) == predictions.size(0) assert labels.dim() == 1 assert predictions.dim() == 2 values, full_guesses = predictions.topk(predictions.size(1), dim=1) _, ranking = full_guesses.topk(full_guesses.size(1), dim=1, largest=False) gt_ranks = torch.gather(ranking.data, 1, labels.data[:, None]).squeeze() guesses = full_guesses[:, :num_guesses] return gt_ranks, guesses def cache(f): """ Caches a computation """ def cache_wrapper(fn, *args, **kwargs): if os.path.exists(fn): with open(fn, 'rb') as file: data = pkl.load(file) else: print("file {} not found, so rebuilding".format(fn)) data = f(*args, **kwargs) with open(fn, 'wb') as file: pkl.dump(data, file) return data return cache_wrapper class Flattener(nn.Module): def __init__(self): """ Flattens last 3 dimensions to make it only batch size, -1 """ super(Flattener, self).__init__() def forward(self, x): return x.view(x.size(0), -1) def to_variable(f): """ Decorator that pushes all the outputs to a variable :param f: :return: """ def variable_wrapper(*args, **kwargs): rez = f(*args, **kwargs) if isinstance(rez, tuple): return tuple([Variable(x) for x in rez]) return Variable(rez) return variable_wrapper def arange(base_tensor, n=None): new_size = base_tensor.size(0) if n is None else n new_vec = base_tensor.new(new_size).long() torch.arange(0, new_size, out=new_vec) return new_vec def to_onehot(vec, num_classes, on_fill=1, off_fill=0): """ Creates a [size, num_classes] torch FloatTensor where one_hot[i, vec[i]] = on_fill and off_fill otherwise. :param vec: 1d torch LongTensor (not a variable) :param num_classes: int :param on_fill: what to fill the things that are on :param off_fill: fill things that are off :return: """ onehot_result = vec.new(vec.size(0), num_classes).float().fill_(off_fill) arange_inds = vec.new(vec.size(0)) torch.arange(0, vec.size(0), out=arange_inds) onehot_result[arange_inds, vec] = on_fill return onehot_result def save_net(fname, net): h5f = h5py.File(fname, mode='w') for k, v in list(net.state_dict().items()): h5f.create_dataset(k, data=v.cpu().numpy()) def load_net(fname, net): h5f = h5py.File(fname, mode='r') for k, v in list(net.state_dict().items()): param = torch.from_numpy(np.asarray(h5f[k])) if v.size() != param.size(): print("On k={} desired size is {} but supplied {}".format(k, v.size(), param.size())) else: v.copy_(param) def batch_index_iterator(len_l, batch_size, skip_end=True): """ Provides indices that iterate over a list :param len_l: int representing size of thing that we will iterate over :param batch_size: size of each batch :param skip_end: if true, don't iterate over the last batch :return: A generator that returns (start, end) tuples as it goes through all batches """ iterate_until = len_l if skip_end: iterate_until = (len_l // batch_size) * batch_size for b_start in range(0, iterate_until, batch_size): yield (b_start, min(b_start + batch_size, len_l)) def batch_map(f, a, batch_size): """ Maps f over the array a in chunks of batch_size. :param f: function to be applied. Must take in a block of (batch_size, dim_a) and map it to (batch_size, something). :param a: Array to be applied over of shape (num_rows, dim_a). :param batch_size: size of each array :return: Array of size (num_rows, something). """ rez = [] for s, e in batch_index_iterator(a.size(0), batch_size, skip_end=False): print("Calling on {}".format(a[s:e].size())) rez.append(f(a[s:e])) return torch.cat(rez) def const_row(fill, l, volatile=False): input_tok = Variable(torch.LongTensor([fill] * l), volatile=volatile) if torch.cuda.is_available(): input_tok = input_tok.cuda() return input_tok def print_para(model): """ Prints parameters of a model :param opt: :return: """ st = {} strings = [] total_params = 0 for p_name, p in model.named_parameters(): if not ('bias' in p_name.split('.')[-1] or 'bn' in p_name.split('.')[-1]): st[p_name] = ([str(x) for x in p.size()], np.prod(p.size()), p.requires_grad) total_params += np.prod(p.size()) for p_name, (size, prod, p_req_grad) in sorted(st.items(), key=lambda x: -x[1][1]): strings.append("{:<60s}: {:<16s}({:8d}) ({})".format( p_name, '[{}]'.format(','.join(size)), prod, 'grad' if p_req_grad else ' ' )) return '\n {:.1f}M total parameters \n ----- \n \n{}'.format(total_params / 1000000.0, '\n'.join(strings)) def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res def nonintersecting_2d_inds(x): """ Returns np.array([(a,b) for a in range(x) for b in range(x) if a != b]) efficiently :param x: Size :return: a x*(x-1) array that is [(0,1), (0,2)... (0, x-1), (1,0), (1,2), ..., (x-1, x-2)] """ rs = 1 - np.diag(np.ones(x, dtype=np.int32)) relations = np.column_stack(np.where(rs)) return relations def intersect_2d(x1, x2): """ Given two arrays [m1, n], [m2,n], returns a [m1, m2] array where each entry is True if those rows match. :param x1: [m1, n] numpy array :param x2: [m2, n] numpy array :return: [m1, m2] bool array of the intersections """ if x1.shape[1] != x2.shape[1]: raise ValueError("Input arrays must have same #columns") # This performs a matrix multiplication-esque thing between the two arrays # Instead of summing, we want the equality, so we reduce in that way res = (x1[..., None] == x2.T[None, ...]).all(1) return res def np_to_variable(x, is_cuda=True, dtype=torch.FloatTensor): v = Variable(torch.from_numpy(x).type(dtype)) if is_cuda: v = v.cuda() return v def gather_nd(x, index): """ :param x: n dimensional tensor [x0, x1, x2, ... x{n-1}, dim] :param index: [num, n-1] where each row contains the indices we'll use :return: [num, dim] """ nd = x.dim() - 1 assert nd > 0 assert index.dim() == 2 assert index.size(1) == nd dim = x.size(-1) sel_inds = index[:, nd - 1].clone() mult_factor = x.size(nd - 1) for col in range(nd - 2, -1, -1): # [n-2, n-3, ..., 1, 0] sel_inds += index[:, col] * mult_factor mult_factor *= x.size(col) grouped = x.view(-1, dim)[sel_inds] return grouped def enumerate_by_image(im_inds): im_inds_np = im_inds.cpu().numpy() initial_ind = int(im_inds_np[0]) s = 0 for i, val in enumerate(im_inds_np): if val != initial_ind: yield initial_ind, s, i initial_ind = int(val) s = i yield initial_ind, s, len(im_inds_np) # num_im = im_inds[-1] + 1 # # print("Num im is {}".format(num_im)) # for i in range(num_im): # # print("On i={}".format(i)) # inds_i = (im_inds == i).nonzero() # if inds_i.dim() == 0: # continue # inds_i = inds_i.squeeze(1) # s = inds_i[0] # e = inds_i[-1] + 1 # # print("On i={} we have s={} e={}".format(i, s, e)) # yield i, s, e def diagonal_inds(tensor): """ Returns the indices required to go along first 2 dims of tensor in diag fashion :param tensor: thing :return: """ assert tensor.dim() >= 2 assert tensor.size(0) == tensor.size(1) size = tensor.size(0) arange_inds = tensor.new(size).long() torch.arange(0, tensor.size(0), out=arange_inds) return (size + 1) * arange_inds def enumerate_imsize(im_sizes): s = 0 for i, (h, w, scale, num_anchors) in enumerate(im_sizes): na = int(num_anchors) e = s + na yield i, s, e, h, w, scale, na s = e def argsort_desc(scores): """ Returns the indices that sort scores descending in a smart way :param scores: Numpy array of arbitrary size :return: an array of size [numel(scores), dim(scores)] where each row is the index you'd need to get the score. """ return np.column_stack(np.unravel_index(np.argsort(-scores.ravel()), scores.shape)) def unravel_index(index, dims): unraveled = [] index_cp = index.clone() for d in dims[::-1]: unraveled.append(index_cp % d) index_cp /= d return torch.cat([x[:, None] for x in unraveled[::-1]], 1) def de_chunkize(tensor, chunks): s = 0 for c in chunks: yield tensor[s:(s + c)] s = s + c def random_choose(tensor, num): "randomly choose indices" num_choose = min(tensor.size(0), num) if num_choose == tensor.size(0): return tensor # Gotta do this in numpy because of https://github.com/pytorch/pytorch/issues/1868 rand_idx = np.random.choice(tensor.size(0), size=num, replace=False) rand_idx = torch.LongTensor(rand_idx).cuda(tensor.get_device()) chosen = tensor[rand_idx].contiguous() # rand_values = tensor.new(tensor.size(0)).float().normal_() # _, idx = torch.sort(rand_values) # # chosen = tensor[idx[:num]].contiguous() return chosen def transpose_packed_sequence_inds(lengths): """ Goes from a TxB packed sequence to a BxT or vice versa. Assumes that nothing is a variable :param ps: PackedSequence :return: """ new_inds = [] new_lens = [] cum_add = np.cumsum([0] + lengths) max_len = lengths[0] length_pointer = len(lengths) - 1 for i in range(max_len): while length_pointer > 0 and lengths[length_pointer] <= i: length_pointer -= 1 new_inds.append(cum_add[:(length_pointer + 1)].copy()) cum_add[:(length_pointer + 1)] += 1 new_lens.append(length_pointer + 1) new_inds = np.concatenate(new_inds, 0) return new_inds, new_lens def right_shift_packed_sequence_inds(lengths): """ :param lengths: e.g. [2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1] :return: perm indices for the old stuff (TxB) to shift it right 1 slot so as to accomodate BOS toks visual example: of lengths = [4,3,1,1] before: a (0) b (4) c (7) d (8) a (1) b (5) a (2) b (6) a (3) after: bos a (0) b (4) c (7) bos a (1) bos a (2) bos """ cur_ind = 0 inds = [] for (l1, l2) in zip(lengths[:-1], lengths[1:]): for i in range(l2): inds.append(cur_ind + i) cur_ind += l1 return inds def clip_grad_norm(named_parameters, max_norm, clip=False, verbose=False): r"""Clips gradient norm of an iterable of parameters. The norm is computed over all gradients together, as if they were concatenated into a single vector. Gradients are modified in-place. Arguments: parameters (Iterable[Variable]): an iterable of Variables that will have gradients normalized max_norm (float or int): max norm of the gradients Returns: Total norm of the parameters (viewed as a single vector). """ max_norm = float(max_norm) total_norm = 0 param_to_norm = {} param_to_shape = {} for n, p in named_parameters: if p.grad is not None: param_norm = p.grad.data.norm(2) total_norm += param_norm ** 2 param_to_norm[n] = param_norm param_to_shape[n] = tuple(p.size()) total_norm = total_norm ** (1. / 2) clip_coef = max_norm / (total_norm + 1e-6) if clip_coef < 1 and clip: for _, p in named_parameters: if p.grad is not None: p.grad.data.mul_(clip_coef) if verbose: print('---Total norm {:.3f} clip coef {:.3f}-----------------'.format(total_norm, clip_coef)) for name, norm in sorted(param_to_norm.items(), key=lambda x: -x[1]): print("{:<60s}: {:.3f}, ({}: {})".format(name, norm, np.prod(param_to_shape[name]), param_to_shape[name])) print('-------------------------------', flush=True) return total_norm def time_batch(gen, reset_every=100): """ Gets timing info for a batch :param gen: :param reset_every: How often we'll reset :return: """ start = time.time() start_t = 0 for i, item in enumerate(gen): time_per_batch = (time.time() - start) / (i+1-start_t) yield time_per_batch, item if i % reset_every == 0: start = time.time() start_t = i def update_lr(optimizer, lr=1e-4): print("------ Learning rate -> {}".format(lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr def all_upper_triangular_pairs(gen): """ Iterates over all pairs from a generator where x < y """ hist = [] for g in gen: for h in hist: yield (h, g) hist.append(g) def pad_last_dim(tensor, new_size): """ Pads an n-dimensional tensor by 0's in the last dimension :param tensor: N-dimensional tensor :param new_size: how much the last dim should be :return: padded tensor """ assert tensor.size(-1) < new_size to_add = tensor.new(tensor.size()[:-1] + (new_size-tensor.size(-1),)).fill_(0) return torch.cat((tensor, to_add), -1)
15,621
29.216634
118
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/__init__.py
0
0
0
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/unarylstm/lstm_swag.py
from typing import Dict, List, TextIO, Optional from overrides import overrides import torch from torch.nn.modules import Linear, Dropout import torch.nn.functional as F from allennlp.common import Params from allennlp.common.checks import check_dimensions_match from allennlp.data import Vocabulary from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder from allennlp.modules.token_embedders import Embedding, ElmoTokenEmbedder from allennlp.models.model import Model from allennlp.nn import InitializerApplicator, RegularizerApplicator from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits from allennlp.nn.util import get_lengths_from_binary_sequence_mask, viterbi_decode from allennlp.training.metrics import SpanBasedF1Measure from allennlp.training.metrics import CategoricalAccuracy @Model.register("lstm_swag") class LstmSwag(Model): """ This model performs semantic role labeling using BIO tags using Propbank semantic roles. Specifically, it is an implmentation of `Deep Semantic Role Labeling - What works and what's next <https://homes.cs.washington.edu/~luheng/files/acl2017_hllz.pdf>`_ . This implementation is effectively a series of stacked interleaved LSTMs with highway connections, applied to embedded sequences of words concatenated with a binary indicator containing whether or not a word is the verbal predicate to generate predictions for in the sentence. Additionally, during inference, Viterbi decoding is applied to constrain the predictions to contain valid BIO sequences. Parameters ---------- vocab : ``Vocabulary``, required A Vocabulary, required in order to compute sizes for input/output projections. text_field_embedder : ``TextFieldEmbedder``, required Used to embed the ``tokens`` ``TextField`` we get as input to the model. encoder : ``Seq2SeqEncoder`` The encoder (with its own internal stacking) that we will use in between embedding tokens and predicting output tags. binary_feature_dim : int, required. The dimensionality of the embedding of the binary verb predicate features. initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize the model parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``) If provided, will be used to calculate the regularization penalty during training. label_smoothing : ``float``, optional (default = 0.0) Whether or not to use label smoothing on the labels when computing cross entropy loss. """ def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, # binary_feature_dim: int, embedding_dropout: float = 0.0, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(LstmSwag, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder # For the span based evaluation, we don't want to consider labels # for verb, because the verb index is provided to the model. self.encoder = encoder self.embedding_dropout = Dropout(p=embedding_dropout) self.output_prediction = Linear(self.encoder.get_output_dim(), 1, bias=False) check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(), "text embedding dim", "eq encoder input dim") self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self) def forward(self, # type: ignore hypothesis0: Dict[str, torch.LongTensor], hypothesis1: Dict[str, torch.LongTensor], hypothesis2: Dict[str, torch.LongTensor], hypothesis3: Dict[str, torch.LongTensor], label: torch.IntTensor = None, ) -> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ """ Parameters ---------- Returns ------- An output dictionary consisting of: logits : torch.FloatTensor A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing unnormalised log probabilities of the tag classes. class_probabilities : torch.FloatTensor A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing a distribution of the tag classes per word. loss : torch.FloatTensor, optional A scalar loss to be optimised. """ logits = [] for tokens in [hypothesis0, hypothesis1, hypothesis2, hypothesis3]: if isinstance(self.text_field_embedder, ElmoTokenEmbedder): self.text_field_embedder._elmo._elmo_lstm._elmo_lstm.reset_states() embedded_text_input = self.embedding_dropout(self.text_field_embedder(tokens)) mask = get_text_field_mask(tokens) batch_size, sequence_length, _ = embedded_text_input.size() encoded_text = self.encoder(embedded_text_input, mask) logits.append(self.output_prediction(encoded_text.max(1)[0])) logits = torch.cat(logits, -1) class_probabilities = F.softmax(logits, dim=-1).view([batch_size, 4]) output_dict = {"label_logits": logits, "label_probs": class_probabilities} if label is not None: loss = self._loss(logits, label.long().view(-1)) self._accuracy(logits, label.squeeze(-1)) output_dict["loss"] = loss return output_dict def get_metrics(self, reset: bool = False) -> Dict[str, float]: return { 'accuracy': self._accuracy.get_metric(reset), } @classmethod def from_params(cls, vocab: Vocabulary, params: Params) -> 'LstmSwag': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, initializer=initializer, regularizer=regularizer)
6,737
45.791667
97
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/unarylstm/__init__.py
0
0
0
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/unarylstm/predict.py
""" adapted from Allennlp because their version doesn't seem to work 😢😢😢 """ from typing import Dict, Any, Iterable import argparse import logging from allennlp.commands.subcommand import Subcommand from allennlp.common.util import prepare_environment, import_submodules from allennlp.common.tqdm import Tqdm from allennlp.data import Instance from allennlp.data.dataset_readers.dataset_reader import DatasetReader from allennlp.data.iterators import DataIterator from allennlp.models.archival import load_archive from allennlp.models.model import Model import numpy as np import pandas as pd logger = logging.getLogger(__name__) # pylint: disable=invalid-name if __name__ == '__main__': parser = argparse.ArgumentParser(description="predict") parser.add_argument('archive_file', type=str, help='path to an archived trained model') parser.add_argument('input_file', type=str, help='path to input file') cuda_device = parser.add_mutually_exclusive_group(required=False) cuda_device.add_argument('--cuda-device', type=int, default=0, help='id of GPU to use (if any)') parser.add_argument('-o', '--overrides', type=str, default="", help='a HOCON structure used to override the experiment configuration') parser.add_argument('--include-package', type=str, action='append', default=[], help='additional packages to include') parser.add_argument('--output-file', type=str, help='path to output file') args = parser.parse_args() # Disable some of the more verbose logging statements logging.getLogger('allennlp.common.params').disabled = True logging.getLogger('allennlp.nn.initializers').disabled = True logging.getLogger('allennlp.modules.token_embedders.embedding').setLevel(logging.INFO) # Import any additional modules needed (to register custom classes) for package_name in args.include_package: import_submodules(package_name) # Load from archive import ipdb ipdb.set_trace() archive = load_archive(args.archive_file, args.cuda_device, args.overrides) config = archive.config prepare_environment(config) model = archive.model model.eval() # Load the evaluation data # Try to use the validation dataset reader if there is one - otherwise fall back # to the default dataset_reader used for both training and validation. validation_dataset_reader_params = config.pop('validation_dataset_reader', None) if validation_dataset_reader_params is not None: dataset_reader = DatasetReader.from_params(validation_dataset_reader_params) else: dataset_reader = DatasetReader.from_params(config.pop('dataset_reader')) evaluation_data_path = args.input_file logger.info("Reading evaluation data from %s", evaluation_data_path) instances = dataset_reader.read(evaluation_data_path) config['iterator']['type'] = 'basic' del config['iterator']['sorting_keys'] data_iterator = DataIterator.from_params(config.pop("iterator")) data_iterator.index_with(model.vocab) cuda_device = args.cuda_device #### EVALUATION AQUI model.eval() iterator = data_iterator(instances, num_epochs=1, shuffle=False, cuda_device=cuda_device, for_training=False) logger.info("Iterating over dataset") generator_tqdm = Tqdm.tqdm(iterator, total=data_iterator.get_num_batches(instances)) label_probs = [] for batch in generator_tqdm: lol = model(**batch) label_probs.append(model(**batch)['label_probs'].data.cpu().numpy()) label_probs = np.concatenate(label_probs) my_preds = pd.DataFrame(label_probs, columns=['ending0','ending1','ending2','ending3']) my_preds['pred'] = label_probs.argmax(1) my_preds.to_csv(args.output_file)
4,020
35.889908
113
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/unarylstm/dataset_reader.py
# slightly different from the other dataset reader from typing import Dict, List import json import logging from overrides import overrides from allennlp.common import Params from allennlp.common.file_utils import cached_path from allennlp.data.dataset_readers.dataset_reader import DatasetReader from allennlp.data.fields import Field, TextField, LabelField from allennlp.data.instance import Instance from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer from allennlp.data.tokenizers import Tokenizer, WordTokenizer import pandas as pd import numpy as np logger = logging.getLogger(__name__) # pylint: disable=invalid-name @DatasetReader.register("swag") class SwagReader(DatasetReader): """ Reads a file from the Stanford Natural Language Inference (SNLI) dataset. This data is formatted as jsonl, one json-formatted instance per line. The keys in the data are "gold_label", "sentence1", and "sentence2". We convert these keys into fields named "label", "premise" and "hypothesis". Parameters ---------- tokenizer : ``Tokenizer``, optional (default=``WordTokenizer()``) We use this ``Tokenizer`` for both the premise and the hypothesis. See :class:`Tokenizer`. token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``) We similarly use this for both the premise and the hypothesis. See :class:`TokenIndexer`. """ def __init__(self, tokenizer: Tokenizer = None, token_indexers: Dict[str, TokenIndexer] = None, use_only_gold_examples: bool = False, only_end: bool = False) -> None: super().__init__(lazy=False) self._tokenizer = tokenizer or WordTokenizer() self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()} self.use_only_gold_examples = use_only_gold_examples self.only_end = only_end @overrides def _read(self, file_path: str): swag = pd.read_csv(file_path) if self.use_only_gold_examples and file_path.endswith('train.csv'): swag = swag[swag['gold-source'].str.startswith('gold')] for _, row in swag.iterrows(): premise = row['sent1'] endings = [row['ending{}'.format(i)] for i in range(4)] if self.only_end: # NOTE: we're JUST USING THE ENDING HERE, so hope that's what you intend hypos = endings else: hypos = ['{} {} {}'.format(row['sent1'], row['sent2'], end) for end in endings] yield self.text_to_instance(premise, hypos, label=row['label'] if hasattr(row, 'label') else None) @overrides def text_to_instance(self, # type: ignore premise: str, hypotheses: List[str], label: int = None) -> Instance: # pylint: disable=arguments-differ fields: Dict[str, Field] = {} # premise_tokens = self._tokenizer.tokenize(premise) # fields['premise'] = TextField(premise_tokens, self._token_indexers) # This could be another way to get randomness for i, hyp in enumerate(hypotheses): hypothesis_tokens = self._tokenizer.tokenize(hyp) fields['hypothesis{}'.format(i)] = TextField(hypothesis_tokens, self._token_indexers) if label is not None: fields['label'] = LabelField(label, skip_indexing=True) return Instance(fields) @classmethod def from_params(cls, params: Params) -> 'SwagReader': tokenizer = Tokenizer.from_params(params.pop('tokenizer', {})) token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {})) use_only_gold_examples = params.pop('use_only_gold_examples', False) only_end = params.pop('only_end', False) params.assert_empty(cls.__name__) return cls(tokenizer=tokenizer, token_indexers=token_indexers, use_only_gold_examples=use_only_gold_examples, only_end=only_end)
4,139
40.4
110
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/fasttext/prep_data.py
import pandas as pd from tqdm import tqdm from allennlp.common.util import get_spacy_model USE_ONLY_GOLD_EXAMPLES = False spacy_model = get_spacy_model("en_core_web_sm", pos_tags=False, parse=False, ner=False) def _tokenize(sent): return ' '.join([x.orth_.lower() for x in spacy_model(sent)]) for split in ('train', 'val', 'test'): df = pd.read_csv('../../data/{}.csv'.format(split)) df['distractor-3'].fillna('', inplace=True) if USE_ONLY_GOLD_EXAMPLES and split == 'train': oldsize = df.shape[0] df = df[df['gold-source'].str.startswith('gold')] print("Going from {} -> {} items in train".format(oldsize, df.shape[0])) with open('{}-{}.txt'.format(split, 'goldonly' if USE_ONLY_GOLD_EXAMPLES else 'genalso'), 'w') as f: for _, item in tqdm(df.iterrows()): # num_distractors = 4 if (len(item['distractor-3']) != 0 and split == 'train') else 3 num_distractors = 3 prefix_tokenized = '{} '.format(_tokenize(item['sent1'])) gold_ending_tokenized = _tokenize('{} {}'.format(item['sent2'], item['gold-ending'])) for i in range(num_distractors if split == 'train' else 1): f.write('__label__gold {}\n'.format(prefix_tokenized + gold_ending_tokenized)) for i in range(num_distractors): f.write('__label__fake {}\n'.format(prefix_tokenized + _tokenize('{} {}'.format(item['sent2'], item['distractor-{}'.format(i)])))) # let's just automate this... """ Unigrams ~/tools/fastText/fasttext supervised -input train-goldonly.txt -output model -lr 0.01 -wordNgrams 1 -epoch 5 ~/tools/fastText/fasttext predict-prob model.bin val-goldonly.txt 2 > valpreds-goldonly.txt python compute_performance.py valpreds-goldonly.txt ------------ accuracy 29.2% with 5-grams ~/tools/fastText/fasttext supervised -input train.txt -output model -lr 0.1 -wordNgrams 5 -epoch 50 ~/tools/fastText/fasttext predict-prob model.bin val.txt 2 > val_preds.txt python compute_performance.py rm val_preds.txt ~/tools/fastText/fasttext predict-prob model.bin test.txt 2 > val_preds.txt python compute_performance.py rm val_preds.txt dafuq ~/tools/fastText/fasttext supervised -input train-genalso.txt -output model -lr 0.921 -wordNgrams 1 -epoch 45 ~/tools/fastText/fasttext predict-prob model.bin val-goldonly.txt 2 > preds.txt python compute_performance.py preds.txt """
2,404
39.083333
146
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/fasttext/compute_performance.py
import numpy as np import argparse import os # neg probability, pos prob parser = argparse.ArgumentParser(description='compute performance') parser.add_argument('fn', metavar='fn', type=str, help='filename 2 use') fn = parser.parse_args().fn rez = [] with open(fn, 'r') as f: all_lines = f.read().splitlines() for line in all_lines: linesplit = line.split(' ') if linesplit[0] == '__label__gold': rez.append((float(linesplit[3]), float(linesplit[1]))) else: rez.append((float(linesplit[1]), float(linesplit[3]))) rez_np = np.array(rez)[:,1].reshape((len(rez) // 4, 4)) ranks = (-rez_np).argsort(1).argsort(1)[:,0] print("accuracy is {:.3f}".format(np.mean(ranks == 0))) print('deleting {}'.format(fn)) os.remove(fn)
772
27.62963
67
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/fasttext/__init__.py
0
0
0
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/esim/esim_swag.py
# TODO: projection dropout with ELMO # l2 reg with ELMO # multiple ELMO layers # doc from typing import Dict, Optional import torch from torch.autograd import Variable from allennlp.common import Params from allennlp.common.checks import check_dimensions_match from allennlp.data import Vocabulary from allennlp.models.model import Model from allennlp.modules import FeedForward, MatrixAttention from allennlp.modules import Seq2SeqEncoder, SimilarityFunction, TimeDistributed, TextFieldEmbedder from allennlp.modules.token_embedders import Embedding, ElmoTokenEmbedder from allennlp.nn import InitializerApplicator, RegularizerApplicator from allennlp.nn.util import get_text_field_mask, last_dim_softmax, weighted_sum, replace_masked_values from allennlp.training.metrics import CategoricalAccuracy import logging logger = logging.getLogger(__name__) # pylint: disable=invalid-name class VariationalDropout(torch.nn.Dropout): def forward(self, input): """ input is shape (batch_size, timesteps, embedding_dim) Samples one mask of size (batch_size, embedding_dim) and applies it to every time step. """ # ones = Variable(torch.ones(input.shape[0], input.shape[-1])) ones = Variable(input.data.new(input.shape[0], input.shape[-1]).fill_(1)) dropout_mask = torch.nn.functional.dropout(ones, self.p, self.training, inplace=False) if self.inplace: input *= dropout_mask.unsqueeze(1) return None else: return dropout_mask.unsqueeze(1) * input @Model.register("esim_swag") class ESIM(Model): """ This ``Model`` implements the ESIM sequence model described in `"Enhanced LSTM for Natural Language Inference" <https://www.semanticscholar.org/paper/Enhanced-LSTM-for-Natural-Language-Inference-Chen-Zhu/83e7654d545fbbaaf2328df365a781fb67b841b4>`_ by Chen et al., 2017. Parameters ---------- vocab : ``Vocabulary`` text_field_embedder : ``TextFieldEmbedder`` Used to embed the ``premise`` and ``hypothesis`` ``TextFields`` we get as input to the model. attend_feedforward : ``FeedForward`` This feedforward network is applied to the encoded sentence representations before the similarity matrix is computed between words in the premise and words in the hypothesis. similarity_function : ``SimilarityFunction`` This is the similarity function used when computing the similarity matrix between words in the premise and words in the hypothesis. compare_feedforward : ``FeedForward`` This feedforward network is applied to the aligned premise and hypothesis representations, individually. aggregate_feedforward : ``FeedForward`` This final feedforward network is applied to the concatenated, summed result of the ``compare_feedforward`` network, and its output is used as the entailment class logits. premise_encoder : ``Seq2SeqEncoder``, optional (default=``None``) After embedding the premise, we can optionally apply an encoder. If this is ``None``, we will do nothing. hypothesis_encoder : ``Seq2SeqEncoder``, optional (default=``None``) After embedding the hypothesis, we can optionally apply an encoder. If this is ``None``, we will use the ``premise_encoder`` for the encoding (doing nothing if ``premise_encoder`` is also ``None``). initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize the model parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``) If provided, will be used to calculate the regularization penalty during training. """ def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, similarity_function: SimilarityFunction, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, initializer: InitializerApplicator = InitializerApplicator(), dropout: float = 0.5, regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab, regularizer) self._text_field_embedder = text_field_embedder self._encoder = encoder self._matrix_attention = MatrixAttention(similarity_function) self._projection_feedforward = projection_feedforward self._inference_encoder = inference_encoder if dropout: self.dropout = torch.nn.Dropout(dropout) self.rnn_input_dropout = VariationalDropout(dropout) else: self.dropout = None self.rnn_input_dropout = None self._output_feedforward = output_feedforward self._output_logit = output_logit self._num_labels = vocab.get_vocab_size(namespace="labels") check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(), "text field embedding dim", "encoder input dim") check_dimensions_match(encoder.get_output_dim() * 4, projection_feedforward.get_input_dim(), "encoder output dim", "projection feedforward input") check_dimensions_match(projection_feedforward.get_output_dim(), inference_encoder.get_input_dim(), "proj feedforward output dim", "inference lstm input dim") self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self) def forward(self, # type: ignore premise: Dict[str, torch.LongTensor], hypothesis0: Dict[str, torch.LongTensor], hypothesis1: Dict[str, torch.LongTensor], hypothesis2: Dict[str, torch.LongTensor], hypothesis3: Dict[str, torch.LongTensor], label: torch.IntTensor = None) -> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ """ Parameters ---------- premise : Dict[str, torch.LongTensor] From a ``TextField`` hypothesis : Dict[str, torch.LongTensor] From a ``TextField`` label : torch.IntTensor, optional (default = None) From a ``LabelField`` Returns ------- An output dictionary consisting of: label_logits : torch.FloatTensor A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log probabilities of the entailment label. label_probs : torch.FloatTensor A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the entailment label. loss : torch.FloatTensor, optional A scalar loss to be optimised. """ hyps = [hypothesis0, hypothesis1, hypothesis2, hypothesis3] if isinstance(self._text_field_embedder, ElmoTokenEmbedder): self._text_field_embedder._elmo._elmo_lstm._elmo_lstm.reset_states() embedded_premise = self._text_field_embedder(premise) embedded_hypotheses = [] for hypothesis in hyps: if isinstance(self._text_field_embedder, ElmoTokenEmbedder): self._text_field_embedder._elmo._elmo_lstm._elmo_lstm.reset_states() embedded_hypotheses.append(self._text_field_embedder(hypothesis)) premise_mask = get_text_field_mask(premise).float() hypothesis_masks = [get_text_field_mask(hypothesis).float() for hypothesis in hyps] # apply dropout for LSTM if self.rnn_input_dropout: embedded_premise = self.rnn_input_dropout(embedded_premise) embedded_hypotheses = [self.rnn_input_dropout(hyp) for hyp in embedded_hypotheses] # encode premise and hypothesis encoded_premise = self._encoder(embedded_premise, premise_mask) label_logits = [] for i, (embedded_hypothesis, hypothesis_mask) in enumerate(zip(embedded_hypotheses, hypothesis_masks)): encoded_hypothesis = self._encoder(embedded_hypothesis, hypothesis_mask) # Shape: (batch_size, premise_length, hypothesis_length) similarity_matrix = self._matrix_attention(encoded_premise, encoded_hypothesis) # Shape: (batch_size, premise_length, hypothesis_length) p2h_attention = last_dim_softmax(similarity_matrix, hypothesis_mask) # Shape: (batch_size, premise_length, embedding_dim) attended_hypothesis = weighted_sum(encoded_hypothesis, p2h_attention) # Shape: (batch_size, hypothesis_length, premise_length) h2p_attention = last_dim_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask) # Shape: (batch_size, hypothesis_length, embedding_dim) attended_premise = weighted_sum(encoded_premise, h2p_attention) # the "enhancement" layer premise_enhanced = torch.cat( [encoded_premise, attended_hypothesis, encoded_premise - attended_hypothesis, encoded_premise * attended_hypothesis], dim=-1 ) hypothesis_enhanced = torch.cat( [encoded_hypothesis, attended_premise, encoded_hypothesis - attended_premise, encoded_hypothesis * attended_premise], dim=-1 ) # embedding -> lstm w/ do -> enhanced attention -> dropout_proj, only if ELMO -> ff proj -> lstm w/ do -> dropout -> ff 300 -> dropout -> output # add dropout here with ELMO # the projection layer down to the model dimension # no dropout in projection projected_enhanced_premise = self._projection_feedforward(premise_enhanced) projected_enhanced_hypothesis = self._projection_feedforward(hypothesis_enhanced) # Run the inference layer if self.rnn_input_dropout: projected_enhanced_premise = self.rnn_input_dropout(projected_enhanced_premise) projected_enhanced_hypothesis = self.rnn_input_dropout(projected_enhanced_hypothesis) v_ai = self._inference_encoder(projected_enhanced_premise, premise_mask) v_bi = self._inference_encoder(projected_enhanced_hypothesis, hypothesis_mask) # The pooling layer -- max and avg pooling. # (batch_size, model_dim) v_a_max, _ = replace_masked_values( v_ai, premise_mask.unsqueeze(-1), -1e7 ).max(dim=1) v_b_max, _ = replace_masked_values( v_bi, hypothesis_mask.unsqueeze(-1), -1e7 ).max(dim=1) v_a_avg = torch.sum(v_ai * premise_mask.unsqueeze(-1), dim=1) / torch.sum(premise_mask, 1, keepdim=True) v_b_avg = torch.sum(v_bi * hypothesis_mask.unsqueeze(-1), dim=1) / torch.sum(hypothesis_mask, 1, keepdim=True) # Now concat # (batch_size, model_dim * 2 * 4) v = torch.cat([v_a_avg, v_a_max, v_b_avg, v_b_max], dim=1) # the final MLP -- apply dropout to input, and MLP applies to output & hidden if self.dropout: v = self.dropout(v) output_hidden = self._output_feedforward(v) logit = self._output_logit(output_hidden) assert logit.size(-1) == 1 label_logits.append(logit) label_logits = torch.cat(label_logits, -1) label_probs = torch.nn.functional.softmax(label_logits, dim=-1) output_dict = {"label_logits": label_logits, "label_probs": label_probs} if label is not None: loss = self._loss(label_logits, label.long().view(-1)) self._accuracy(label_logits, label.squeeze(-1)) output_dict["loss"] = loss return output_dict def get_metrics(self, reset: bool = False) -> Dict[str, float]: return { 'accuracy': self._accuracy.get_metric(reset), } @classmethod def from_params(cls, vocab: Vocabulary, params: Params) -> 'ESIM': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) projection_feedforward = FeedForward.from_params(params.pop('projection_feedforward')) inference_encoder = Seq2SeqEncoder.from_params(params.pop("inference_encoder")) output_feedforward = FeedForward.from_params(params.pop('output_feedforward')) output_logit = FeedForward.from_params(params.pop('output_logit')) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) dropout = params.pop("dropout", 0) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, similarity_function=similarity_function, projection_feedforward=projection_feedforward, inference_encoder=inference_encoder, output_feedforward=output_feedforward, output_logit=output_logit, initializer=initializer, dropout=dropout, regularizer=regularizer)
13,868
45.69697
156
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/esim/__init__.py
0
0
0
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/esim/predict.py
""" adapted from Allennlp because their version doesn't seem to work 😢😢😢 """ from typing import Dict, Any, Iterable import argparse import logging from allennlp.commands.subcommand import Subcommand from allennlp.common.util import prepare_environment, import_submodules from allennlp.common.tqdm import Tqdm from allennlp.data import Instance from allennlp.data.dataset_readers.dataset_reader import DatasetReader from allennlp.data.iterators import DataIterator from allennlp.models.archival import load_archive from allennlp.models.model import Model import numpy as np import pandas as pd logger = logging.getLogger(__name__) # pylint: disable=invalid-name if __name__ == '__main__': parser = argparse.ArgumentParser(description="predict") parser.add_argument('archive_file', type=str, help='path to an archived trained model') parser.add_argument('input_file', type=str, help='path to input file') cuda_device = parser.add_mutually_exclusive_group(required=False) cuda_device.add_argument('--cuda-device', type=int, default=0, help='id of GPU to use (if any)') parser.add_argument('-o', '--overrides', type=str, default="", help='a HOCON structure used to override the experiment configuration') parser.add_argument('--include-package', type=str, action='append', default=[], help='additional packages to include') parser.add_argument('--output-file', type=str, help='path to output file') args = parser.parse_args() # Disable some of the more verbose logging statements logging.getLogger('allennlp.common.params').disabled = True logging.getLogger('allennlp.nn.initializers').disabled = True logging.getLogger('allennlp.modules.token_embedders.embedding').setLevel(logging.INFO) # Import any additional modules needed (to register custom classes) for package_name in args.include_package: import_submodules(package_name) # Load from archive archive = load_archive(args.archive_file, args.cuda_device, args.overrides) config = archive.config prepare_environment(config) model = archive.model model.eval() # Load the evaluation data # Try to use the validation dataset reader if there is one - otherwise fall back # to the default dataset_reader used for both training and validation. validation_dataset_reader_params = config.pop('validation_dataset_reader', None) if validation_dataset_reader_params is not None: dataset_reader = DatasetReader.from_params(validation_dataset_reader_params) else: dataset_reader = DatasetReader.from_params(config.pop('dataset_reader')) evaluation_data_path = args.input_file logger.info("Reading evaluation data from %s", evaluation_data_path) instances = dataset_reader.read(evaluation_data_path) config['iterator']['type'] = 'basic' del config['iterator']['sorting_keys'] data_iterator = DataIterator.from_params(config.pop("iterator")) data_iterator.index_with(model.vocab) cuda_device = args.cuda_device #### EVALUATION AQUI model.eval() iterator = data_iterator(instances, num_epochs=1, shuffle=False, cuda_device=cuda_device, for_training=False) logger.info("Iterating over dataset") generator_tqdm = Tqdm.tqdm(iterator, total=data_iterator.get_num_batches(instances)) label_probs = [] for batch in generator_tqdm: lol = model(**batch) label_probs.append(model(**batch)['label_probs'].data.cpu().numpy()) label_probs = np.concatenate(label_probs) my_preds = pd.DataFrame(label_probs, columns=['ending0','ending1','ending2','ending3']) my_preds['pred'] = label_probs.argmax(1) my_preds.to_csv(args.output_file)
3,983
36.233645
113
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/esim/dataset_reader.py
# Exactly the same as the other dataset reader from typing import Dict, List import json import logging from overrides import overrides from allennlp.common import Params from allennlp.common.file_utils import cached_path from allennlp.data.dataset_readers.dataset_reader import DatasetReader from allennlp.data.fields import Field, TextField, LabelField from allennlp.data.instance import Instance from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer from allennlp.data.tokenizers import Tokenizer, WordTokenizer import pandas as pd import numpy as np logger = logging.getLogger(__name__) # pylint: disable=invalid-name USE_S1 = True @DatasetReader.register("swag") class SwagReader(DatasetReader): """ Reads a file from the Stanford Natural Language Inference (SNLI) dataset. This data is formatted as jsonl, one json-formatted instance per line. The keys in the data are "gold_label", "sentence1", and "sentence2". We convert these keys into fields named "label", "premise" and "hypothesis". Parameters ---------- tokenizer : ``Tokenizer``, optional (default=``WordTokenizer()``) We use this ``Tokenizer`` for both the premise and the hypothesis. See :class:`Tokenizer`. token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``) We similarly use this for both the premise and the hypothesis. See :class:`TokenIndexer`. """ def __init__(self, tokenizer: Tokenizer = None, token_indexers: Dict[str, TokenIndexer] = None, use_only_gold_examples: bool = False) -> None: super().__init__(lazy=False) self._tokenizer = tokenizer or WordTokenizer() self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()} self.use_only_gold_examples = use_only_gold_examples @overrides def _read(self, file_path: str): if not USE_S1: logger.warning("We're splitting the sentences up here!!!! WATCH OUT\n\n\n") swag = pd.read_csv(file_path) if self.use_only_gold_examples and file_path.endswith('train.csv'): swag = swag[swag['gold-source'].str.startswith('gold')] for _, row in swag.iterrows(): if USE_S1: premise = row['sent1'] endings = [row['ending{}'.format(i)] for i in range(4)] hypos = ['{} {}'.format(row['sent2'], end) for end in endings] else: premise = row['sent2'] hypos = [row['ending{}'.format(i)] for i in range(4)] yield self.text_to_instance(premise, hypos, label=row['label'] if hasattr(row, 'label') else None) @overrides def text_to_instance(self, # type: ignore premise: str, hypotheses: List[str], label: int = None) -> Instance: # pylint: disable=arguments-differ fields: Dict[str, Field] = {} premise_tokens = self._tokenizer.tokenize(premise) fields['premise'] = TextField(premise_tokens, self._token_indexers) # This could be another way to get randomness for i, hyp in enumerate(hypotheses): hypothesis_tokens = self._tokenizer.tokenize(hyp) fields['hypothesis{}'.format(i)] = TextField(hypothesis_tokens, self._token_indexers) if label is not None: fields['label'] = LabelField(label, skip_indexing=True) return Instance(fields) @classmethod def from_params(cls, params: Params) -> 'SwagReader': tokenizer = Tokenizer.from_params(params.pop('tokenizer', {})) token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {})) use_only_gold_examples = params.pop('use_only_gold_examples', False) params.assert_empty(cls.__name__) return cls(tokenizer=tokenizer, token_indexers=token_indexers, use_only_gold_examples=use_only_gold_examples)
4,065
40.917526
110
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/decomposable_attention/decomposable_attention_swag.py
from typing import Dict, Optional import torch from allennlp.common import Params from allennlp.common.checks import check_dimensions_match from allennlp.data import Vocabulary from allennlp.models.model import Model from allennlp.modules import FeedForward, MatrixAttention from allennlp.modules import Seq2SeqEncoder, SimilarityFunction, TimeDistributed, TextFieldEmbedder from allennlp.nn import InitializerApplicator, RegularizerApplicator from allennlp.nn.util import get_text_field_mask, last_dim_softmax, weighted_sum from allennlp.training.metrics import CategoricalAccuracy from allennlp.modules.token_embedders import Embedding, ElmoTokenEmbedder import logging logger = logging.getLogger(__name__) # pylint: disable=invalid-name @Model.register("decomposable_attention_swag") class DecomposableAttention(Model): """ This ``Model`` implements the Decomposable Attention model described in `"A Decomposable Attention Model for Natural Language Inference" <https://www.semanticscholar.org/paper/A-Decomposable-Attention-Model-for-Natural-Languag-Parikh-T%C3%A4ckstr%C3%B6m/07a9478e87a8304fc3267fa16e83e9f3bbd98b27>`_ by Parikh et al., 2016, with some optional enhancements before the decomposable attention actually happens. Parikh's original model allowed for computing an "intra-sentence" attention before doing the decomposable entailment step. We generalize this to any :class:`Seq2SeqEncoder` that can be applied to the premise and/or the hypothesis before computing entailment. The basic outline of this model is to get an embedded representation of each word in the premise and hypothesis, align words between the two, compare the aligned phrases, and make a final entailment decision based on this aggregated comparison. Each step in this process uses a feedforward network to modify the representation. Parameters ---------- vocab : ``Vocabulary`` text_field_embedder : ``TextFieldEmbedder`` Used to embed the ``premise`` and ``hypothesis`` ``TextFields`` we get as input to the model. attend_feedforward : ``FeedForward`` This feedforward network is applied to the encoded sentence representations before the similarity matrix is computed between words in the premise and words in the hypothesis. similarity_function : ``SimilarityFunction`` This is the similarity function used when computing the similarity matrix between words in the premise and words in the hypothesis. compare_feedforward : ``FeedForward`` This feedforward network is applied to the aligned premise and hypothesis representations, individually. aggregate_feedforward : ``FeedForward`` This final feedforward network is applied to the concatenated, summed result of the ``compare_feedforward`` network, and its output is used as the entailment class logits. premise_encoder : ``Seq2SeqEncoder``, optional (default=``None``) After embedding the premise, we can optionally apply an encoder. If this is ``None``, we will do nothing. hypothesis_encoder : ``Seq2SeqEncoder``, optional (default=``None``) After embedding the hypothesis, we can optionally apply an encoder. If this is ``None``, we will use the ``premise_encoder`` for the encoding (doing nothing if ``premise_encoder`` is also ``None``). initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize the model parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``) If provided, will be used to calculate the regularization penalty during training. """ def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, attend_feedforward: FeedForward, similarity_function: SimilarityFunction, compare_feedforward: FeedForward, aggregate_feedforward: FeedForward, premise_encoder: Optional[Seq2SeqEncoder] = None, hypothesis_encoder: Optional[Seq2SeqEncoder] = None, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None, preload_path: Optional[str] = None) -> None: super(DecomposableAttention, self).__init__(vocab, regularizer) self._text_field_embedder = text_field_embedder self._attend_feedforward = TimeDistributed(attend_feedforward) self._matrix_attention = MatrixAttention(similarity_function) self._compare_feedforward = TimeDistributed(compare_feedforward) self._aggregate_feedforward = aggregate_feedforward self._premise_encoder = premise_encoder self._hypothesis_encoder = hypothesis_encoder or premise_encoder # self._num_labels = vocab.get_vocab_size(namespace="labels") check_dimensions_match(text_field_embedder.get_output_dim(), attend_feedforward.get_input_dim(), "text field embedding dim", "attend feedforward input dim") # check_dimensions_match(aggregate_feedforward.get_output_dim(), self._num_labels, # "final output dimension", "number of labels") self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self) # Do we want to initialize with the SNLI stuff? let's say yes. # 'snli-decomposable-attention/weights.th' if preload_path is not None: logger.info("Preloading!") preload = torch.load(preload_path) own_state = self.state_dict() for name, param in preload.items(): if name not in own_state: logger.info("Unexpected key {} in state_dict with size {}".format(name, param.size())) elif param.size() == own_state[name].size(): own_state[name].copy_(param) else: logger.info("Network has {} with size {}, ckpt has {}".format(name, own_state[name].size(), param.size())) missing = set(own_state.keys()) - set(preload.keys()) if len(missing) > 0: logger.info("We couldn't find {}".format(','.join(missing))) def forward(self, # type: ignore premise: Dict[str, torch.LongTensor], hypothesis0: Dict[str, torch.LongTensor], hypothesis1: Dict[str, torch.LongTensor], hypothesis2: Dict[str, torch.LongTensor], hypothesis3: Dict[str, torch.LongTensor], label: torch.IntTensor = None) -> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ """ Parameters ---------- premise : Dict[str, torch.LongTensor] From a ``TextField`` hypothesis : Dict[str, torch.LongTensor] From a ``TextField`` label : torch.IntTensor, optional (default = None) From a ``LabelField`` Returns ------- An output dictionary consisting of: label_logits : torch.FloatTensor A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log probabilities of the entailment label. label_probs : torch.FloatTensor A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the entailment label. loss : torch.FloatTensor, optional A scalar loss to be optimised. """ if isinstance(self._text_field_embedder, ElmoTokenEmbedder): self._text_field_embedder._elmo._elmo_lstm._elmo_lstm.reset_states() hyps = [hypothesis0, hypothesis1, hypothesis2, hypothesis3] embedded_premise = self._text_field_embedder(premise) if isinstance(self._text_field_embedder, ElmoTokenEmbedder): self._text_field_embedder._elmo._elmo_lstm._elmo_lstm.reset_states() embedded_hypotheses = [] for hypothesis in hyps: if isinstance(self._text_field_embedder, ElmoTokenEmbedder): self.text_field_embedder._elmo._elmo_lstm._elmo_lstm.reset_states() embedded_hypotheses.append(self._text_field_embedder(hypothesis)) premise_mask = get_text_field_mask(premise).float() hypothesis_masks = [get_text_field_mask(hypothesis).float() for hypothesis in hyps] if self._premise_encoder: embedded_premise = self._premise_encoder(embedded_premise, premise_mask) if self._hypothesis_encoder: embedded_hypotheses = [self._hypothesis_encoder(emb, mask) for emb, mask in zip(embedded_hypotheses, hypothesis_masks)] projected_premise = self._attend_feedforward(embedded_premise) label_logits = [] for i, (embedded_hypothesis, hypothesis_mask) in enumerate(zip(embedded_hypotheses, hypothesis_masks)): projected_hypothesis = self._attend_feedforward(embedded_hypothesis) # Shape: (batch_size, premise_length, hypothesis_length) similarity_matrix = self._matrix_attention(projected_premise, projected_hypothesis) # Shape: (batch_size, premise_length, hypothesis_length) p2h_attention = last_dim_softmax(similarity_matrix, hypothesis_mask) # Shape: (batch_size, premise_length, embedding_dim) attended_hypothesis = weighted_sum(embedded_hypothesis, p2h_attention) # Shape: (batch_size, hypothesis_length, premise_length) h2p_attention = last_dim_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask) # Shape: (batch_size, hypothesis_length, embedding_dim) attended_premise = weighted_sum(embedded_premise, h2p_attention) premise_compare_input = torch.cat([embedded_premise, attended_hypothesis], dim=-1) hypothesis_compare_input = torch.cat([embedded_hypothesis, attended_premise], dim=-1) compared_premise = self._compare_feedforward(premise_compare_input) compared_premise = compared_premise * premise_mask.unsqueeze(-1) # Shape: (batch_size, compare_dim) compared_premise = compared_premise.sum(dim=1) compared_hypothesis = self._compare_feedforward(hypothesis_compare_input) compared_hypothesis = compared_hypothesis * hypothesis_mask.unsqueeze(-1) # Shape: (batch_size, compare_dim) compared_hypothesis = compared_hypothesis.sum(dim=1) aggregate_input = torch.cat([compared_premise, compared_hypothesis], dim=-1) logit = self._aggregate_feedforward(aggregate_input) assert logit.size(-1) == 1 label_logits.append(logit) label_logits = torch.cat(label_logits, -1) label_probs = torch.nn.functional.softmax(label_logits, dim=-1) output_dict = {"label_logits": label_logits, "label_probs": label_probs} if label is not None: loss = self._loss(label_logits, label.long().view(-1)) self._accuracy(label_logits, label.squeeze(-1)) output_dict["loss"] = loss return output_dict def get_metrics(self, reset: bool = False) -> Dict[str, float]: return { 'accuracy': self._accuracy.get_metric(reset), } @classmethod def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) premise_encoder_params = params.pop("premise_encoder", None) if premise_encoder_params is not None: premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params) else: premise_encoder = None hypothesis_encoder_params = params.pop("hypothesis_encoder", None) if hypothesis_encoder_params is not None: hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params) else: hypothesis_encoder = None attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward')) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward')) aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward')) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) preload_path = params.pop('preload_path', None) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, attend_feedforward=attend_feedforward, similarity_function=similarity_function, compare_feedforward=compare_feedforward, aggregate_feedforward=aggregate_feedforward, premise_encoder=premise_encoder, hypothesis_encoder=hypothesis_encoder, initializer=initializer, regularizer=regularizer, preload_path=preload_path)
13,628
50.430189
164
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/decomposable_attention/__init__.py
0
0
0
py
fat-albert
fat-albert-master/bert/datasets/SWAG/swag_baselines/decomposable_attention/dataset_reader.py
# Exactly the same as the other dataset reader from typing import Dict, List import json import logging from overrides import overrides from allennlp.common import Params from allennlp.common.file_utils import cached_path from allennlp.data.dataset_readers.dataset_reader import DatasetReader from allennlp.data.fields import Field, TextField, LabelField from allennlp.data.instance import Instance from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer from allennlp.data.tokenizers import Tokenizer, WordTokenizer import pandas as pd import numpy as np logger = logging.getLogger(__name__) # pylint: disable=invalid-name USE_S1 = True @DatasetReader.register("swag") class SwagReader(DatasetReader): """ Reads a file from the Stanford Natural Language Inference (SNLI) dataset. This data is formatted as jsonl, one json-formatted instance per line. The keys in the data are "gold_label", "sentence1", and "sentence2". We convert these keys into fields named "label", "premise" and "hypothesis". Parameters ---------- tokenizer : ``Tokenizer``, optional (default=``WordTokenizer()``) We use this ``Tokenizer`` for both the premise and the hypothesis. See :class:`Tokenizer`. token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``) We similarly use this for both the premise and the hypothesis. See :class:`TokenIndexer`. """ def __init__(self, tokenizer: Tokenizer = None, token_indexers: Dict[str, TokenIndexer] = None, use_only_gold_examples: bool = False) -> None: super().__init__(lazy=False) self._tokenizer = tokenizer or WordTokenizer() self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()} self.use_only_gold_examples = use_only_gold_examples @overrides def _read(self, file_path: str): if not USE_S1: logger.warning("We're splitting the sentences up here!!!! WATCH OUT\n\n\n") swag = pd.read_csv(file_path) if self.use_only_gold_examples and file_path.endswith('train.csv'): swag = swag[swag['gold-source'].str.startswith('gold')] for _, row in swag.iterrows(): if USE_S1: premise = row['sent1'] endings = [row['ending{}'.format(i)] for i in range(4)] hypos = ['{} {}'.format(row['sent2'], end) for end in endings] else: premise = row['sent2'] hypos = [row['ending{}'.format(i)] for i in range(4)] yield self.text_to_instance(premise, hypos, label=row['label'] if hasattr(row, 'label') else None) @overrides def text_to_instance(self, # type: ignore premise: str, hypotheses: List[str], label: int = None) -> Instance: # pylint: disable=arguments-differ fields: Dict[str, Field] = {} premise_tokens = self._tokenizer.tokenize(premise) fields['premise'] = TextField(premise_tokens, self._token_indexers) # This could be another way to get randomness for i, hyp in enumerate(hypotheses): hypothesis_tokens = self._tokenizer.tokenize(hyp) fields['hypothesis{}'.format(i)] = TextField(hypothesis_tokens, self._token_indexers) if label is not None: fields['label'] = LabelField(label, skip_indexing=True) return Instance(fields) @classmethod def from_params(cls, params: Params) -> 'SwagReader': tokenizer = Tokenizer.from_params(params.pop('tokenizer', {})) token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {})) use_only_gold_examples = params.pop('use_only_gold_examples', False) params.assert_empty(cls.__name__) return cls(tokenizer=tokenizer, token_indexers=token_indexers, use_only_gold_examples=use_only_gold_examples)
4,065
40.917526
110
py
fat-albert
fat-albert-master/bert/datasets/SWAG/raw_data/events.py
""" Dataloader for event data. this includes - rocstories - didemo - MPII - activitynet captions """ import pandas as pd import json from collections import defaultdict import numpy as np import re import os from tqdm import tqdm from allennlp.common.util import get_spacy_model from tqdm import tqdm from unidecode import unidecode from num2words import num2words # TODO! change with where you're keeping the data raise ValueError("you should make sure the data you want is here. then you can delete this error") DATA_PATH = os.path.dirname(os.path.realpath(__file__)) def remove_allcaps(sent): """ Given a sentence, filter it so that it doesn't contain some words that are ALLcaps :param sent: string, like SOMEONE wheels SOMEONE on, mouthing silent words of earnest prayer. :return: Someone wheels someone on, mouthing silent words of earnest prayer. """ # Remove all caps def _sanitize(word, is_first): if word in ("I"): return word num_capitals = len([x for x in word if not x.islower()]) if num_capitals > len(word) // 2: # We have an all caps word here. if is_first: return word[0] + word[1:].lower() return word.lower() return word return ' '.join([_sanitize(word, i == 0) for i, word in enumerate(sent.split(' '))]) def load_rocstories(split): """ Load rocstories dataset :param split: in train, val, or test. note that training doesn't have the endings. for now we'll remove the endings :return: """ assert split in ('train', 'val', 'test') if split in 'train': df = pd.concat(( pd.read_csv(os.path.join(DATA_PATH, 'rocstories', 'ROCStories__spring2016 - ROCStories_spring2016.csv')), pd.read_csv(os.path.join(DATA_PATH, 'rocstories', 'ROCStories_winter2017 - ROCStories_winter2017.csv')), ), 0) else: df = pd.read_csv( os.path.join(DATA_PATH, 'rocstories', 'cloze_test_{}__spring2016 - cloze_test_ALL_{}.csv'.format( split, split))) # FOR NOW REMOVE THE ENDINGS AND PRETEND IT'S THE SAME AS TRAINING DATA df['InputSentence5'] = df.apply( lambda x: [x['RandomFifthSentenceQuiz1'], x['RandomFifthSentenceQuiz2']][x['AnswerRightEnding'] - 1], axis=1) df = df[['InputStoryid'] + ['InputSentence{}'.format(i + 1) for i in range(5)]] df.columns = ['storyid'] + ['sentence{}'.format(i + 1) for i in range(5)] rocstories = [] for i, item in df.iterrows(): rocstories.append( {'id': item['storyid'], 'sentences': [item['sentence{}'.format(i + 1)] for i in range(5)]}) return rocstories # def load_rocstories_nogender(split): # spacy = get_spacy_model("en_core_web_sm", pos_tags=False, parse=False, ner=True) # import gender_guesser.detector as gender # d = gender.Detector() # # def _replace(name, is_cap=False): # gend = d.get_gender(name) # if gend in ('male', 'mostly_male', 'andy'): # return 'The man' if is_cap else 'the man' # if gend in ('female', 'mostly_female'): # return 'The woman' if is_cap else 'the woman' # return name # # def _fix_sentence(sent): # doc = spacy(sent) # sentence = doc.text # for ent in doc.ents: # if ent.label_ == 'PERSON': # repl_text_cap = _replace(ent.text, is_cap=True) # if repl_text_cap != ent.text: # repl_text_lower = _replace(ent.text, is_cap=False) # sentence = re.sub('^' + ent.text, repl_text_cap, sentence) # sentence = re.sub(ent.text, repl_text_lower, sentence) # return sentence # # roc = load_rocstories(split) # for story in tqdm(roc): # for i in range(5): # story['sentences'][i] = _fix_sentence(story['sentences'][i]) # return roc def _to_time(pandas_col): offset = pd.to_datetime('00.00.00.000', format='%H.%M.%S.%f') return (pd.to_datetime(pandas_col, format='%H.%M.%S.%f') - offset).dt.total_seconds() def _lsmdc_to_list(lsmdc, lsmdc_window=20): """ :param lsmdc: Dataframe :param lmsdc_window: # We'll allow lsmdc_window seconds of a gap between chains :return: a list of annotations """ movie = '' t_end = 0 lsmdc_list = [] cur_chain = {'sentences': []} for (i, item) in lsmdc.iterrows(): # If new movie then push if movie != item['movie'] or item['start_aligned'] > t_end + lsmdc_window: if len(cur_chain['sentences']) > 1: lsmdc_list.append(cur_chain) cur_chain = {'sentences': [], 'timestamps': [], 'id': '{}-{}'.format(item['movie'], i)} movie = item['movie'] t_end = item['end_aligned'] cur_chain['sentences'].append(item['sentence']) cur_chain['timestamps'].append((item['start_aligned'], item['end_aligned'])) cur_chain['duration'] = item['end_aligned'] - cur_chain['timestamps'][0][0] lsmdc_list.append(cur_chain) return lsmdc_list def load_mpii(split): """ Load MPII dataset with the <person> </person> annotations :return: """ lsmdc = pd.read_csv(os.path.join(DATA_PATH, 'movies', 'annotations-original-coreferences-ner.csv'), sep='\t', header=None, names=['id', 'sentence']) lsmdc['movie'] = lsmdc['id'].apply(lambda x: '_'.join(x.split('_')[:-1])) lsmdc['start_aligned'] = _to_time(lsmdc['id'].apply(lambda x: x.split('_')[-1].split('-')[0])) lsmdc['end_aligned'] = _to_time(lsmdc['id'].apply(lambda x: x.split('_')[-1].split('-')[1])) datasplit = pd.read_csv(os.path.join(DATA_PATH, 'movies', 'dataSplit.txt'), sep='\t', header=None, names=['movie', 'split']) include_these = set([m['movie'] for _, m in datasplit.iterrows() if m['split'] == {'train': 'training', 'val': 'validation', 'test': 'test'}[split]]) lsmdc = lsmdc[lsmdc.movie.isin(include_these)] assert len(include_these) == len(set(lsmdc['movie'].values)) # This is a bit crude but whatever def _pop_tags(sent): sent_no_person = re.sub(r'</?PERSON>', '', sent) # Match everything of the form She<BLAH BLAH>. sometimes there is she<> so we dont want that sent_no_pronoun = re.sub(r'\w*<([^<>]+?)>', lambda x: x[1], sent_no_person) # Some have brackets in the original sentences so get rid of those sent_no_pronoun = re.sub(r'<>', '', sent_no_pronoun) return sent_no_pronoun lsmdc['sentence'] = lsmdc['sentence'].apply(_pop_tags) return _lsmdc_to_list(lsmdc) def load_mpii_depersonized(split): """ Loads the MPII dataset, but remove names. Instead we'll replace them with their genders. EX <PERSON>Helen</PERSON> gets replaced with the woman. :return: """ lsmdc = pd.read_csv(os.path.join(DATA_PATH, 'movies', 'annotations-original-coreferences-ner.csv'), sep='\t', header=None, names=['id', 'sentence']) lsmdc['movie'] = lsmdc['id'].apply(lambda x: '_'.join(x.split('_')[:-1])) lsmdc['start_aligned'] = _to_time(lsmdc['id'].apply(lambda x: x.split('_')[-1].split('-')[0])) lsmdc['end_aligned'] = _to_time(lsmdc['id'].apply(lambda x: x.split('_')[-1].split('-')[1])) datasplit = pd.read_csv(os.path.join(DATA_PATH, 'movies', 'dataSplit.txt'), sep='\t', header=None, names=['movie', 'split']) include_these = set([m['movie'] for _, m in datasplit.iterrows() if m['split'] == {'train': 'training', 'val': 'validation', 'test': 'test'}[split]]) lsmdc = lsmdc[lsmdc.movie.isin(include_these)] movies_depersonized = [] for movie_name, movie_df in tqdm(lsmdc.groupby('movie')): chars = pd.read_csv(os.path.join(DATA_PATH, 'movies', 'coref_characters', '{}-characters.csv'.format(movie_name)), sep='\t', header=None, names=['longname', 'shortname', 'gender'], index_col='longname') char2gender = {char: gender for char, gender in chars['gender'].items()} def _replace(name, is_cap=False): if name not in char2gender: return 'Someone' if is_cap else 'someone' if char2gender[name] == 'W': return 'The woman' if is_cap else 'the woman' return 'The man' if is_cap else 'the man' def _pop_tags(sent): # Match the start of the string. sent0 = re.sub( r'^<PERSON>(.+?)</PERSON>', lambda match: _replace(match[1], is_cap=True), sent) # Match other parts of the string sent1 = re.sub( r'<PERSON>(.+?)</PERSON>', lambda match: _replace(match[1], is_cap=False), sent0) # Match everything of the form She<BLAH BLAH>. # sometimes there is she<> so we dont want that sent2 = re.sub(r'(\w*)<[^<>]+?>', lambda x: x[1], sent1) # Some have brackets in the original sentences so get rid of those sent3 = re.sub(r'<>', '', sent2) sent4 = remove_allcaps(sent3) # This is really minor but if it ends with " ." then change that. sent5 = sent4[:-1].rstrip() + '.' if sent4.endswith(' .') else sent4 # # Verbose # print("----\n{}->{}->{}->{}->{}\n".format(sent0, sent1, sent2, sent3, sent4)) return sent5 movie_df_copy = movie_df.copy() movie_df_copy['sentence'] = movie_df_copy['sentence'].apply(_pop_tags) movies_depersonized.append(movie_df_copy) return _lsmdc_to_list(pd.concat(movies_depersonized, 0)) def load_visual_madlibs(split): """ Loads the Visual Madlibs dataset, including captions from COCO as the premises :return: """ # Let's make sure each contains a verb. spacy = get_spacy_model("en_core_web_sm", pos_tags=True, parse=True, ner=False) from nltk.tokenize.moses import MosesDetokenizer # from pattern.en import conjugate, verbs, PRESENT detokenizer = MosesDetokenizer() def _sentence_contains_verb(sent): spacy_parse = spacy(sent) for tok in spacy_parse: if tok.pos_ == 'VERB' and tok.lemma_ not in ('is', 'has'): return True return False def order_sents(sent_list): sentence_has_verb = np.array([_sentence_contains_verb(s) for i, s in enumerate(sent_list)]) sentence_length = np.array([len(x) for x in sent_list]) sentence_score = sentence_has_verb.astype(np.float32) + sentence_length / sentence_length.max() best_to_worst = np.argsort(-sentence_score).tolist() return [sent_list[i] for i in best_to_worst] futures_fn = { 'train': 'tr_futures.json', 'val': 'val_easy_multichoice_futures.json', 'test': 'val_hard_multichoice_futures.json', }[split] key = {'train': 'tr_futures', 'val': "multichoice_futures", 'test': "multichoice_futures", }[split] id2futureandpast = defaultdict(lambda: {'captions': [], 'future': [], 'past': []}) # with open(os.path.join(DATA_PATH, 'visualmadlibs', 'tr_pasts.json'), 'r') as f: # for item in json.load(f)['tr_pasts']: # id2futureandpast[item['image_id']]['past'] = order_sents(item['fitbs']) with open(os.path.join(DATA_PATH, 'visualmadlibs', futures_fn), 'r') as f: for item in json.load(f)[key]: if split == 'train': id2futureandpast[item['image_id']]['future'] = order_sents(item['fitbs']) else: id2futureandpast[item['image_id']]['future'] = [item['pos']] with open(os.path.join(DATA_PATH, 'coco', 'dataset_coco.json'), 'r') as f: imgid2caps = {item['cocoid']: ([sent['raw'] for sent in item['sentences']], item['split']) for item in json.load(f)['images']} vml = [] for k in tqdm(id2futureandpast): for cap, future in zip(order_sents(imgid2caps[k][0]), id2futureandpast[k]['future']): # Spacy parse the future sentence, change to present tense spacy_parse = [(x.orth_, x.pos_, x.dep_) for x in spacy(future)] # If there's a ROOT that doesn't start with ing, parse that is_match = False for i, (word, pos, dep) in enumerate(spacy_parse): if pos == 'VERB' and dep == 'ROOT' and not word.endswith('ing'): spacy_parse[i] = (conjugate(word, tense=PRESENT), pos, dep) is_match = True # Else convert AUXes if not is_match: for i, (word, pos, dep) in enumerate(spacy_parse): if pos == 'VERB' and dep == 'aux': spacy_parse[i] = (conjugate(word, tense=PRESENT), pos, dep) future_fixed = detokenizer.detokenize([x[0] for x in spacy_parse], return_str=True) print("{} -> {}".format(future, future_fixed), flush=True) future_fixed = future_fixed[0].capitalize() + future_fixed[1:] vml.append({'id': k, 'sentences': [cap, future_fixed]}) # ABANDON THIS FOR NOW. # # id2futureandpast[k]['captions'] = (order_sents(imgid2caps[k][0]), imgid2caps[k][1]) # # # # Join everything # vml = [] # for id, val in id2futureandpast.items(): # vml.append({'id': id, 'sentences': ['{} Afterwards, {}.'.format(cap, future) # for cap, future in zip( # val['captions'][0], val['future'])]}) # import ipdb # ipdb.set_trace() return vml def load_vist(split): """ Loads the VIST dataset :return: """ id_to_annos = defaultdict(list) with open(os.path.join(DATA_PATH, 'vist', '{}.story-in-sequence.json'.format(split)), 'r') as f: for item in json.load(f)['annotations']: assert len(item) == 1 id_to_annos[int(item[0]['story_id'])].append(item[0]) for k in id_to_annos: id_to_annos[k] = sorted(id_to_annos[k], key=lambda x: x['story_id']) res = [{'id': id, 'sentences': [x['original_text'] for x in v]} for id, v in id_to_annos.items()] return res def load_lsmdc(split): """ Loads LSMDC with <someone> annotations #TODO: investigate filtering things. # 1: all sentences need verbs # 2: filter out all sentences that don't begin with a capital letter (these are often incomplete) # 3. all sentences need objects :return: """ lsmdc = pd.read_csv(os.path.join(DATA_PATH, 'movies', 'LSMDC16_annos_{}.csv'.format( {'train': 'training', 'val': 'val', 'test': 'test'}[split])), sep='\t', header=None, names=['movie', 'start_aligned', 'end_aligned', 'start_extracted', 'end_extracted', 'sentence']) lsmdc['movie'] = lsmdc['movie'].apply(lambda x: '_'.join(x.split('_')[:-1])) del lsmdc['start_extracted'] del lsmdc['end_extracted'] lsmdc['start_aligned'] = _to_time(lsmdc['start_aligned']) lsmdc['end_aligned'] = _to_time(lsmdc['end_aligned']) def _fix_sent(sent): sent1 = remove_allcaps(sent) # This is really minor but if it ends with " ." then change that. sent2 = sent1[:-1].rstrip() + '.' if sent1.endswith(' .') else sent1 return unidecode(sent2) from nltk.tokenize.moses import MosesDetokenizer detokenizer = MosesDetokenizer() spacy = get_spacy_model("en_core_web_sm", pos_tags=True, parse=True, ner=False) def check_if_sent_is_grammatical(sent): if sent[0].islower(): print("{} is not grammatical (lowercase start)".format(sent)) return '' # Sanitize the sentence sent_sanitized = remove_allcaps(sent) # Loop over subsentences to find a good one for sent_parsed in spacy(sent_sanitized).sents: root = sent_parsed.root if root.pos_ != 'VERB': print("{} is not grammatical (noverb)".format(sent)) pass elif sent_parsed[0].orth_ in ('and', 'where', 'when'): print("{} is not grammatical (and)".format(sent)) pass elif sent_parsed[-2].orth_ in ('and', 'then'): print("{} is not grammatical (and then)".format(sent)) pass elif not any([x.dep_ in ('nsubj', 'nsubjpass') for x in sent_parsed]): print("{} is not grammatical (no subj)".format(sent)) pass else: print('good! {}'.format(sent)) return unidecode(detokenizer.detokenize([x.orth_ for x in sent_parsed], return_str=True)) return '' lsmdc['sentence'] = lsmdc['sentence'].apply(check_if_sent_is_grammatical) lsmdc = lsmdc[lsmdc['sentence'].str.len() > 0] return _lsmdc_to_list(lsmdc) def load_didemo(split): with open(os.path.join(DATA_PATH, 'didemo', '{}_data.json'.format(split)), 'r') as f: didemo = json.load(f) all_didemo = defaultdict(list) for item in didemo: # Make didemo look the same as activitynet captions timestamp = np.round(np.array(item['times']).mean(0)) * 5 timestamp[1] += 5 # technically it can be less if we're at the last segment but whatever item['timestamp'] = timestamp.tolist() all_didemo[item['dl_link']].append(item) didemo_list = [] for k, vid in sorted(all_didemo.items(), key=lambda x: x[0]): clip_info = {'duration': max([item['timestamp'][1] for item in vid]), 'sentences': [], 'timestamps': [], 'id': k} for item in sorted(vid, key=lambda x: x['timestamp'][0]): clip_info['sentences'].append(item['description']) clip_info['timestamps'].append(item['timestamp']) didemo_list.append(clip_info) return didemo_list def load_anet(split): with open(os.path.join(DATA_PATH, 'activitynetcaptions', '{}.json'.format( {'train': 'train', 'val': 'val_1', 'test': 'val_2'}[split])), 'r') as f: anet = json.load(f) for k, v in anet.items(): v['id'] = k v['sentences'] = [remove_allcaps(unidecode(x.strip())) for x in v['sentences']] anet = [anet[k] for k in sorted(anet.keys())] return anet def load_ava(split): assert split in ('val', 'test') ava_annos = pd.read_csv(os.path.join(DATA_PATH, 'ava', 'ava_{}_v2.0.csv'.format(split)), sep=',', header=None, names=['video_id', 'middle_frame_timestamp', 'x1', 'y1', 'x2', 'y2', 'action_id']) key = pd.read_csv(os.path.join(DATA_PATH, 'ava', 'ava_action_list_v2.0.csv'), index_col='label_id') ava_annos['action'] = ava_annos['action_id'].replace({i: item['label_name'] for i, item in key.iterrows()}) ava_annos_grouped = ava_annos.groupby('video_id') for name, group in ava_annos_grouped: results_by_time = {} for timestep, subgroup in group.groupby('middle_frame_timestamp'): results_by_time[timestep] = set(subgroup['action'].unique().tolist()) def n2w_1k(x, use_ordinal=False): if x > 1000: return '' return num2words(x, to='ordinal' if use_ordinal else 'cardinal') def _postprocess(sentence): """ make sure punctuation is followed by a space :param sentence: :return: """ # Aggressively get rid of some punctuation markers sent0 = re.sub(r'^.*(\\|/|!!!|~|=|#|@|\*|¡|©|¿|«|»|¬|{|}|\||\(|\)|\+|\]|\[).*$', ' ', sentence, flags=re.MULTILINE|re.IGNORECASE) # Less aggressively get rid of quotes, apostrophes sent1 = re.sub(r'"', ' ', sent0) sent2 = re.sub(r'`', '\'', sent1) # match ordinals sent3 = re.sub(r'(\d+(?:rd|st|nd))', lambda x: n2w_1k(int(x.group(0)[:-2]), use_ordinal=True), sent2) #These things all need to be followed by spaces or else we'll run into problems sent4 = re.sub(r'[:;,\"\!\.\-\?](?! )', lambda x: x.group(0) + ' ', sent3) #These things all need to be preceded by spaces or else we'll run into problems sent5 = re.sub(r'(?! )[-]', lambda x: ' ' + x.group(0), sent4) # Several spaces sent6 = re.sub(r'\s\s+', ' ', sent5) sent7 = sent6.strip() return sent7 def load_everything(): def _stamp(l, stamp_name): for x in l: x['dataset'] = stamp_name return l everything = {} for split in ('train', 'val', 'test'): everything_this_split = [] # everything_this_split += _stamp(load_mpii_depersonized(split), 'mpii') # everything_this_split += _stamp(load_didemo(split), 'didemo') everything_this_split += _stamp(load_lsmdc(split), 'lsmdc') everything_this_split += _stamp(load_anet(split), 'anet') # if split == 'train': # everything_this_split += _stamp(load_visual_madlibs(split), 'vml') # everything_this_split += _stamp(load_vist(split), 'vist') # everything_this_split += _stamp(load_rocstories_nogender(split), 'rocstories') everything[split] = everything_this_split # Postprocessing for split in everything: for item in everything[split]: for i in range(len(item['sentences'])): item['sentences'][i] = _postprocess(item['sentences'][i]) with open('events-3.json', 'w') as f: json.dump(everything, f) return everything ##### # Get what portion is projective # non_projective = [x for x in anet if any(t0[1] > t1[0]+0.5 for t0, t1 in zip(x['timestamps'][:-1], # x['timestamps'][1:]))] if __name__ == '__main__': # mpii = load_mpii_depersonized('train') # lsmdc = load_lsmdc('train') # didemo = load_didemo('train') # anet = load_anet('train') # visualmadlibs = load_visual_madlibs('train') everything = load_everything() # # assert False # # Count how many sentences are there # mpii_num = sum([len(item['sentences']) for item in mpii]) # lsmdc_num = sum([len(item['sentences']) for item in lsmdc]) # didemo_num = sum([len(item['sentences']) for item in didemo]) # anet_num = sum([len(item['sentences']) for item in anet]) # roc_num = len(roc) * 5 # # spacy = get_spacy_model("en_core_web_sm", pos_tags=True, parse=True, ner=False) # # # def _count_verbs_in_dataset(dataset): # verb_counts = defaultdict(int) # for x in tqdm(dataset): # for sent in x['sentences']: # for tok in spacy(sent): # if tok.pos_ == 'VERB': # verb_counts[tok.lemma_.lower()] += 1 # return verb_counts # # # mpii_counts = _count_verbs_in_dataset(mpii) # anet_counts = _count_verbs_in_dataset(anet) # # # rocstories scrambled: # import random # # random.shuffle(roc) # # # def scrambled_stories(): # for story in roc: # perm = np.random.permutation(5) # unperm = np.argsort(perm) # story_permed = [story['sentences'][i] for i in perm] # yield story_permed, unperm
23,643
39.417094
148
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/__init__.py
0
0
0
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/generate_candidates/questions2mturk.py
import random import pickle as pkl import numpy as np from tqdm import tqdm from nltk.tokenize.moses import MosesDetokenizer import pandas as pd import re detokenizer = MosesDetokenizer() NUM_DISTRACTORS = 5 def _detokenize(sent): s0 = detokenizer.detokenize(sent, return_str=True) s1 = re.sub(r'\b(ca|do|wo)\sn\'t', r"\1n't", s0, flags=re.IGNORECASE) return s1 assignments = np.load('assignments-22.npy') start_ind = 0 df = [] for fold_id in range(5): with open('../../generate_candidates/examples{}-of-5.pkl'.format(fold_id), 'rb') as f: ex_this_fold = pkl.load(f) assignments_this_fold = assignments[start_ind:start_ind+len(ex_this_fold)] start_ind += len(ex_this_fold) for i, (this_example, assignments_i) in enumerate(zip(tqdm(ex_this_fold), assignments_this_fold)): selected_gens = [this_example['generations'][i] for i in assignments_i.tolist()] # Find sent1 from the given sentences sent1 = _detokenize(this_example['sent1']) if sent1[0].islower(): sent1 = sent1[0].upper() + sent1[1:] sent2 = _detokenize(this_example['startphrase']) # perm = np.random.permutation(NUM_DISTRACTORS+1) perm = np.arange(10) series_dict = { 'item_ind': i, 'fold_id': 0, 'item_id': this_example['dataset'] + this_example['id'], 'startphrase': '{} {}'.format(sent1,sent2), 'sent1': sent1, 'sent2': sent2, 'gold': int(np.where(perm == 0)[0][0]), } for i, perm in enumerate(perm.tolist()): series_dict['completion-{:d}'.format(i)] = _detokenize(selected_gens[perm]) df.append(pd.Series(series_dict)) random.seed(123456) random.shuffle(df) df = pd.DataFrame(df) batch_size=1 batch_df = [] for j in range(df.shape[0] // batch_size): batch_df.append(pd.Series({'{}-{}'.format(i, name):val for i, (_, item) in enumerate(df[j*batch_size:(j+1)*batch_size].iterrows()) for name, val in item.items()})) batch_df = pd.DataFrame(batch_df) batch_df.to_csv('batch_df_FULL.csv', index=False)
2,111
32.52381
167
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/generate_candidates/rebalance_dataset_mlp.py
""" The big idea will be to add in the worst scoring one. But we want to use a MULTILAYER PERCEPTRON. Also not using word features for now """ import matplotlib as mpl mpl.use('Agg') import seaborn as sns import matplotlib.pyplot as plt from allennlp.data import Vocabulary from torch.nn import functional as F from torch import nn from torch.autograd import Variable import pickle as pkl import numpy as np from torch import optim import torch from tqdm import tqdm, trange from pytorch_misc import clip_grad_norm, time_batch import pandas as pd import os ######### PARAMETERS NUM_DISTRACTORS = 9 TRAIN_PERC = 0.8 vocab = Vocabulary.from_files('../lm/vocabulary') all_data = [] if os.path.exists('feats_cached.npy'): all_data = np.load('feats_cached.npy') else: print("loading data. this will take hella time probably!", flush=True) for fold in trange(5): print("tryna load {}".format(fold, flush=True)) with open('examples{}-of-5.pkl'.format(fold), 'rb') as f: examples = pkl.load(f) for this_ex in examples: feats_vals = this_ex['scores'].values if np.isinf(feats_vals).any(): feats_vals[np.isinf(feats_vals)] = 1e17 feats = np.column_stack(( np.log(feats_vals), np.array([len(gen) for gen in this_ex['generations']], dtype=np.float32), np.ones(feats_vals.shape[0], dtype=np.float32) * len(this_ex['startphrase']), np.ones(feats_vals.shape[0], dtype=np.float32) * len(this_ex['sent1']), )) all_data.append(feats) all_data = np.stack(all_data) np.save('feats_cached.npy', all_data) print("There are {} things".format(all_data.shape[0]), flush=True) assignments = np.arange(NUM_DISTRACTORS + 1, dtype=np.uint16)[None].repeat(all_data.shape[0], axis=0) class SimpleCudaLoader(object): """ silly cuda loader""" def __init__(self, indices, is_train=True, recompute_assignments=False, batch_size=512, ): self.indices = indices self.is_train = is_train self.recompute_assignments = recompute_assignments if self.recompute_assignments: self.feats = all_data[self.indices] else: self.feats = all_data[np.arange(all_data.shape[0])[:, None], assignments][self.indices] self.batch_size = batch_size def __iter__(self): """ Iterator for a cuda type application. :return: """ # First cuda-ize everything if self.is_train: perm_vec = np.random.permutation(self.feats.shape[0]) feats_to_use = self.feats[perm_vec] inds_to_use = self.indices[perm_vec] else: feats_to_use = self.feats inds_to_use = self.indices feats_cuda = torch.FloatTensor(feats_to_use).contiguous().cuda(async=True) for s_idx in range(len(self)): s_ind = s_idx * self.batch_size e_ind = min(s_ind + self.batch_size, self.feats.shape[0]) if e_ind < self.batch_size and self.is_train: # Skip small batch on training return yield Variable(feats_cuda[s_ind:e_ind], volatile=not self.is_train), inds_to_use[s_ind:e_ind] @classmethod def randomsplits(cls): """ Makes some random splits! But keeping in mind the (global) assignments info :return: """ idx = np.random.permutation(all_data.shape[0]) train_idx = idx[:int(TRAIN_PERC * idx.shape[0])] val_idx = np.sort(idx[int(TRAIN_PERC * idx.shape[0]):]) return cls(train_idx, is_train=True), cls(val_idx, is_train=False), cls(val_idx, recompute_assignments=True, is_train=False), def __len__(self): if self.is_train: return self.feats.shape[0] // self.batch_size else: return (self.feats.shape[0] + self.batch_size - 1) // self.batch_size class MLPModel(nn.Module): def __init__(self): super(MLPModel, self).__init__() # self.mapping = nn.Linear(train_data.feats.shape[2], 1, bias=False) self.mapping = nn.Sequential( nn.Linear(all_data.shape[-1], 2048, bias=True), nn.SELU(), nn.AlphaDropout(p=0.2), nn.Linear(2048, 2048, bias=True), nn.SELU(), nn.AlphaDropout(p=0.2), nn.Linear(2048, 1, bias=False), ) def forward(self, feats): # Contribution from embeddings # (batch, #ex, length, dim) -> (batch, #ex, dim) return self.mapping(feats).squeeze(-1) def fit(self, data, val_data=None, n_epoch=10): self.train() optimizer = optim.Adam(self.parameters(), weight_decay=1e-4, lr=1e-3) best_val = 0.0 for epoch_num in range(n_epoch): tr = [] for b, (time_per_batch, batch) in enumerate(time_batch(data, reset_every=100)): feats, inds_to_use = batch results = model(feats) loss = F.cross_entropy(results, Variable(results.data.new(results.size(0)).long().fill_(0))) summ_dict = {'loss': loss.data[0], 'acc': (results.max(1)[1] == 0).float().mean().data[0]} tr.append(pd.Series(summ_dict)) optimizer.zero_grad() loss.backward() clip_grad_norm( [(n, p) for n, p in model.named_parameters() if p.grad is not None], max_norm=1.0, verbose=False, clip=True) optimizer.step() mean_stats = pd.concat(tr, axis=1).mean(1) if val_data is not None: vp, val_acc = self.predict(val_data) print("e{:2d}: train loss {:.3f} train acc {:.3f} val acc {:.3f}".format(epoch_num, mean_stats['loss'], mean_stats['acc'], val_acc), flush=True) if val_acc < best_val or epoch_num == (n_epoch - 1): return best_val = val_acc def predict(self, data): self.eval() all_predictions = [] for b, (time_per_batch, batch) in enumerate(time_batch(data, reset_every=100)): feats, inds_to_use = batch all_predictions.append(model(feats).data.cpu().numpy()) all_predictions = np.concatenate(all_predictions, 0) if data.recompute_assignments: masked_predictions = all_predictions[np.arange(data.feats.shape[0])[:, None], assignments[data.indices]] else: masked_predictions = all_predictions acc = (masked_predictions.argmax(1) == 0).mean() mr = (-masked_predictions).argsort(1).argsort(1)[:, 0].mean() # print("acc is {:.3f}, mean rank is {:.3f}".format(acc, mr)) return all_predictions, acc accs = [] for iter in trange(100): train, val, test = SimpleCudaLoader.randomsplits() model = MLPModel() model.cuda() model.fit(train, val) predictions, acc = model.predict(test) accs.append(acc) # Now do some remapping n2chs = [] for pred, val_ind in zip(predictions, test.indices): high2low = (-pred).argsort() # Things at the beginning of this list seem real idx2rank = high2low.argsort() cur_assign = assignments[val_ind] adversarial_examples = high2low[:idx2rank[0]] adversarial_examples = adversarial_examples[ ~np.in1d(adversarial_examples, cur_assign)] # not currently assigned easy_idxs = high2low[idx2rank[0] + 1:][::-1] easy_idxs = easy_idxs[np.in1d(easy_idxs, cur_assign)] # Make the easy indices map according to their position in the assignments easy_inds = np.argmax(easy_idxs[:, None] == cur_assign[None], 1) assert np.allclose(cur_assign[easy_inds], easy_idxs) num2change = min(2, adversarial_examples.shape[0], easy_idxs.shape[0]) n2chs.append(num2change) # print("adversarial ex we can add {:4d} easy idxs {:4d} were changing {:4d}".format( # adversarial_examples.shape[0], easy_idxs.shape[0], num2change)) if num2change == 0: # print("Continuing, nothing we can change") pass else: # change a random index ind_loc = np.random.choice(easy_inds, replace=False, size=num2change) adv_loc = np.random.choice(adversarial_examples, replace=False, size=num2change) assignments[val_ind, ind_loc] = adv_loc # Change the first index over. # ind_loc = easy_inds[0] # assignments[val_ind, ind_loc] = adversarial_examples[0] print("{:.3f} val accuracy: {:.3f} n2chs".format(acc, np.mean(n2chs)), flush=True) assert np.all(assignments[:, 0] == 0) # Plot the accuracy as time goes by np.save('assignments-pretrained.npy', assignments) start_idx = 0 for fold in trange(5): with open('examples{}-of-5.pkl'.format(fold), 'rb') as f: examples = pkl.load(f) assignments_this_fold = assignments[start_idx:start_idx+len(examples)] np.save('assignments-pretrained-fold{}.npy'.format(fold), assignments_this_fold) start_idx += len(examples) plt.clf() accuracy = pd.Series(np.array(accs)) df = pd.DataFrame(pd.concat([accuracy, # accuracy.rolling(window=int(1/(1-TRAIN_PERC)), win_type='gaussian', min_periods=1, center=True).mean(std=2) accuracy.rolling(window=2 * int(1 / (1 - TRAIN_PERC)), win_type=None, min_periods=1, center=True).mean() ], 0), columns=['accuracy']) df['subject'] = 0 df['series'] = ['accuracy'] * accuracy.shape[0] + ['smoothed accuracy'] * accuracy.shape[0] df.index.rename('iteration', inplace=True) df.reset_index(inplace=True) df.to_csv('pretrain-rebalance-mlp.csv') sns.set(color_codes=True) fig = sns.tsplot(time='iteration', value='accuracy', data=df, unit='subject', condition='series').get_figure() fig.savefig('rebalancing-mlp-acc.pdf')
10,256
39.066406
138
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/generate_candidates/classifiers.py
""" The big idea will be to add in the worst scoring one. But we want to use a MULTILAYER PERCEPTRON. Also not using word features for now """ import torch from allennlp.common import Params from allennlp.modules.augmented_lstm import AugmentedLstm from allennlp.modules.seq2seq_encoders.pytorch_seq2seq_wrapper import PytorchSeq2SeqWrapper from allennlp.modules.token_embedders.embedding import Embedding from torch import nn from torch.nn import functional as F from torch.autograd import Variable import pandas as pd from model.pytorch_misc import clip_grad_norm, optimistic_restore, print_para, time_batch from torch import optim import numpy as np #################### Model types def reshape(f): def wrapper(self, *args, **kwargs): sizes = [x.size() for x in args] + [x.size() for x in kwargs.values()] batch_size, num_ex = sizes[0][:2] res = f(self, *[x.view((-1,) + x.size()[2:]) for x in args], **{k: v.view((-1,), + v.size()[2:]) for k, v in kwargs}) if isinstance(res, tuple): return tuple([x.view((batch_size, num_ex,) + x.size()[1:]) for x in res]) return res.view((batch_size, num_ex,) + res.size()[1:]) return wrapper class LMFeatsModel(nn.Module): def __init__(self, input_dim=5, hidden_dim=1024): """ Averaged embeddings of ending -> label :param embed_dim: dimension to use """ super(LMFeatsModel, self).__init__() self.mapping = nn.Sequential( nn.Linear(input_dim, hidden_dim, bias=True), nn.SELU(), nn.AlphaDropout(p=0.2), ) self.prediction = nn.Sequential( nn.Linear(hidden_dim, hidden_dim, bias=True), nn.SELU(), nn.AlphaDropout(p=0.2), nn.Linear(hidden_dim, 1, bias=False), ) @reshape def forward(self, feats): """ :param words: [batch, dim] indices :return: [batch] scores of real-ness. """ inter_feats = self.mapping(feats) preds = self.prediction(inter_feats).squeeze(1) return preds, inter_feats def fit(self, data, val_data=None, num_epoch=10): self.train() optimizer = optim.Adam(self.parameters(), weight_decay=1e-4, lr=1e-3) best_val = 0.0 for epoch_num in range(num_epoch): tr = [] for b, (time_per_batch, batch) in enumerate(time_batch(data, reset_every=100)): results = self(batch['lm_feats'].cuda(async=True))[0] loss = F.cross_entropy(results, Variable(results.data.new(results.size(0)).long().fill_(0))) summ_dict = {'loss': loss.data[0], 'acc': (results.max(1)[1] == 0).float().mean().data[0]} tr.append(pd.Series(summ_dict)) optimizer.zero_grad() loss.backward() clip_grad_norm( [(n, p) for n, p in self.named_parameters() if p.grad is not None], max_norm=1.0, verbose=False, clip=True) optimizer.step() mean_stats = pd.concat(tr, axis=1).mean(1) if val_data is not None: val_acc, val_results = self.validate(val_data) print("e{:2d}: train loss {:.3f} train acc {:.3f} val acc {:.3f}".format(epoch_num, mean_stats['loss'], mean_stats['acc'], val_acc), flush=True) if val_acc < best_val or epoch_num == (num_epoch - 1): return {'mlp': val_acc, 'fasttext': 0, 'cnn': 0, 'lstm_pos': 0, 'ensemble': 0} best_val = val_acc def validate(self, data): self.eval() all_predictions = [] for b, (time_per_batch, batch) in enumerate(time_batch(data, reset_every=100)): results = self(batch['lm_feats'].cuda(async=True))[0] all_predictions.append(results.data.cpu().numpy()) all_predictions = np.concatenate(all_predictions, 0) acc = (all_predictions.argmax(1) == 0).mean() return acc, {'ensemble': all_predictions} class BoWModel(nn.Module): def __init__(self, vocab, use_mean=True, embed_dim=100): """ Averaged embeddings of ending -> label :param embed_dim: dimension to use """ super(BoWModel, self).__init__() assert embed_dim == 100 self.embeds = Embedding.from_params( vocab, Params({'vocab_namespace': 'tokens', 'embedding_dim': embed_dim, 'trainable': True, 'padding_index': 0, 'pretrained_file': 'https://s3-us-west-2.amazonaws.com/allennlp/datasets/glove/glove.6B.100d.txt.gz' })) self.embed_dim = embed_dim self.use_mean = use_mean self.embedding_to_label = nn.Linear(self.embed_dim, 1, bias=False) @reshape def forward(self, word_ids): """ :param word_ids: [batch, length] ids :return: [batch] scores of real-ness. """ embeds = self.embeds(word_ids) mask = (word_ids.data != 0).long() seq_lengths = mask.sum(-1, keepdim=True).float() seq_lengths[seq_lengths < 1] = 1.0 inter_feats = embeds.sum(1) / Variable(seq_lengths) if self.use_mean else embeds.max(1)[0] preds = self.embedding_to_label(inter_feats).squeeze(1) return preds, inter_feats class CNNModel(nn.Module): def __init__(self, vocab, embed_dim=100, window_sizes=(2, 3, 4, 5), num_filters=128): super(CNNModel, self).__init__() self.embeds = Embedding.from_params( vocab, Params({'vocab_namespace': 'tokens', 'embedding_dim': embed_dim, 'trainable': True, 'padding_index': 0, 'pretrained_file': 'https://s3-us-west-2.amazonaws.com/allennlp/datasets/glove/glove.6B.100d.txt.gz' })) self.binary_feature_embedding = Embedding(2, embed_dim) self.convs = nn.ModuleList([ nn.Conv1d(embed_dim * 2, num_filters, kernel_size=window_size, padding=window_size - 1) for window_size in window_sizes ]) self.fc = nn.Linear(num_filters * len(window_sizes), 1, bias=False) @reshape def forward(self, word_ids, indicator_ids): """ :param word_ids: [batch, length] ids :param indicator_ids: [batch, length] ids """ embeds = torch.cat((self.embeds(word_ids), self.binary_feature_embedding(indicator_ids)), 2) # mask = (word_ids != 0).long() embeds_t = embeds.transpose(1, 2) # [B, D, L] conv_reps = [] for conv in self.convs: conv_reps.append(F.relu(conv(embeds_t)).max(2)[0]) # Now it's [B, D] inter_feats = torch.cat(conv_reps, 1) preds = self.fc(inter_feats).squeeze(1) return preds, inter_feats class BLSTMModel(nn.Module): def __init__(self, vocab, use_postags_only=True, embed_dim=100, hidden_size=200, recurrent_dropout_probability=0.3, use_highway=False, maxpool=True): super(BLSTMModel, self).__init__() self.embeds = Embedding.from_params( vocab, Params({'vocab_namespace': 'pos' if use_postags_only else 'tokens', 'embedding_dim': embed_dim, 'trainable': True, 'padding_index': 0, 'pretrained_file': None if use_postags_only else 'https://s3-us-west-2.amazonaws.com/allennlp/datasets/glove/glove.6B.100d.txt.gz', })) self.binary_feature_embedding = Embedding(2, embed_dim) self.fwd_lstm = PytorchSeq2SeqWrapper(AugmentedLstm( input_size=embed_dim * 2, hidden_size=hidden_size, go_forward=True, recurrent_dropout_probability=recurrent_dropout_probability, use_input_projection_bias=False, use_highway=use_highway), stateful=False) self.bwd_lstm = PytorchSeq2SeqWrapper(AugmentedLstm( input_size=embed_dim * 2, hidden_size=hidden_size, go_forward=False, recurrent_dropout_probability=recurrent_dropout_probability, use_input_projection_bias=False, use_highway=use_highway), stateful=False) self.maxpool = maxpool self.fc = nn.Linear(hidden_size * 2, 1, bias=False) @reshape def forward(self, word_ids, indicator_ids): """ :param word_ids: [batch, length] ids :param indicator_ids: [batch, length] ids """ embeds = torch.cat((self.embeds(word_ids), self.binary_feature_embedding(indicator_ids)), 2) mask = (word_ids != 0).long() fwd_activation = self.fwd_lstm(embeds, mask) # [B, L, D] bwd_activation = self.bwd_lstm(embeds, mask) if self.maxpool: reps = torch.cat((fwd_activation.max(1)[0], bwd_activation.max(1)[0]), 1) # [B*N, 2D] else: # Forward and last. reps = torch.cat(( fwd_activation[torch.arange(0, mask.size(0), out=mask.data.new(mask.size(0))), mask.sum(1) - 1], bwd_activation[:, 0] ), 1) return self.fc(reps).squeeze(1), reps class Ensemble(nn.Module): def __init__(self, vocab): super(Ensemble, self).__init__() self.fasttext_model = BoWModel(vocab, use_mean=True, embed_dim=100) self.mlp_model = LMFeatsModel(input_dim=8, hidden_dim=1024) self.lstm_pos_model = BLSTMModel(vocab, use_postags_only=True, maxpool=True) # self.lstm_lex_model = BLSTMModel(vocab, use_postags_only=False, maxpool=True) self.cnn_model = CNNModel(vocab) self.mlp = nn.Sequential( nn.Linear(100 + 1024 + 400 + 4 * 128, 2048, bias=True), # nn.SELU(), # nn.AlphaDropout(p=0.2), # nn.Linear(2048, 2048, bias=True), nn.SELU(), nn.AlphaDropout(p=0.2), nn.Linear(2048, 1, bias=False), ) def forward(self, lm_feats, ending_word_ids, postags_word_ids, ctx_indicator, inds): """ :param lm_feats: [batch_size, #options, dim] :param ending_word_ids: [batch_size, #options, L] word ids :param postags_word_ids: [batch_size, #options, L] word ids :param ctx_indicator: [batch_size, #options, L] indicator :param inds: [batch_size] indices (not needed) :return: """ results = {} results['mlp'], mlp_feats = self.mlp_model(lm_feats) results['fasttext'], fasttext_feats = self.fasttext_model(ending_word_ids) results['cnn'], cnn_feats = self.cnn_model(ending_word_ids, ctx_indicator) results['lstm_pos'], lstm_feats = self.lstm_pos_model(postags_word_ids, ctx_indicator) # results['lstm_lex'], _ = self.lstm_lex_model(ending_word_ids, ctx_indicator) results['ensemble'] = self.mlp( torch.cat((mlp_feats, fasttext_feats, cnn_feats, lstm_feats), 2)).squeeze(2) return results def predict(self, lm_feats, ending_word_ids, postags_word_ids, ctx_indicator, inds): """ Predict a distribution of probabilities :return: Dict from model type -> prob dist """ results = self.forward(lm_feats, ending_word_ids, postags_word_ids, ctx_indicator, inds) results = {k: F.softmax(v, 1).data.cpu().numpy() for k, v in results.items()} return results def validate(self, val_dataloader): """ :param val_dataloader: Dataloader :return: Accuracies: dict from model -> accuracy All predictions: Dict from model -> [batch, #ex] distribution. """ # Compute the validation performance self.eval() all_predictions = {'mlp': [], 'fasttext': [], 'cnn': [], 'lstm_pos': [], #'lstm_lex': [], 'ensemble': []} for b, (time_per_batch, batch) in enumerate(time_batch(val_dataloader, reset_every=100)): batch = {k: v.cuda(async=True) if hasattr(v, 'cuda') else v for k, v in batch.items()} if b % 100 == 0 and b > 0: print("\nb{:5d}/{:5d} {:.3f}s/batch, {:.1f}m/epoch".format( b, len(val_dataloader), time_per_batch, len(val_dataloader) * time_per_batch / 60), flush=True) for k, v in self.predict(**batch).items(): all_predictions[k].append(v) all_predictions = {k: np.concatenate(v, 0) for k, v in all_predictions.items()} accuracies = {k: np.mean(v.argmax(1) == 0) for k, v in all_predictions.items()} return accuracies, all_predictions def fit(self, train_dataloader, val_dataloader, num_epoch=5): """ :param train_dataloader: Dataloader :param num_epoch number of epochs to use """ print_every = 100 optimizer = optim.Adam([p for p in self.parameters() if p.requires_grad], weight_decay=1e-6, lr=1e-3) best_val = 0.0 for epoch_num in range(num_epoch): tr = [] self.train() for b, (time_per_batch, batch) in enumerate(time_batch(train_dataloader, reset_every=print_every)): batch = {k: v.cuda(async=True) if hasattr(v, 'cuda') else v for k, v in batch.items()} results = self(**batch) losses = {'{}-loss'.format(k): F.cross_entropy( v, Variable(v.data.new(v.size(0)).long().fill_(0))) for k, v in results.items()} if any([np.isnan(x.data.cpu().numpy()) for x in losses.values()]): import ipdb ipdb.set_trace() loss = sum(losses.values()) summ_dict = {k: v.data[0] for k, v in losses.items()} summ_dict.update( {'{}-acc'.format(k): (v.max(1)[1] == 0).float().mean().data[0] for k, v in results.items()}) tr.append(pd.Series(summ_dict)) optimizer.zero_grad() loss.backward() if b % print_every == 0 and b > 0: print("\ne{:2d}b{:5d}/{:5d} {:.3f}s/batch, {:.1f}m/epoch".format( epoch_num, b, len(train_dataloader), time_per_batch, len(train_dataloader) * time_per_batch / 60)) print(pd.concat(tr[-print_every:], axis=1).mean(1)) print('-----------', flush=True) # clip_grad_norm([(n, p) for n, p in self.named_parameters() if # p.grad is not None and n.startswith('lstm_lex_model')], max_norm=1.0, # verbose=b % 100 == 1, clip=True) clip_grad_norm([(n, p) for n, p in self.named_parameters() if p.grad is not None and not n.startswith('lstm_lex_model')], max_norm=1.0, verbose=b % 100 == 1, clip=True) optimizer.step() val_results, _ = self.validate(val_dataloader) val_acc = val_results['ensemble'] if val_acc < best_val or epoch_num == (num_epoch - 1): print("Stopping on epoch={} with\n{}".format(epoch_num, pd.Series(val_results)), flush=True) return val_results else: print("Continuing on epoch={} with\n{}".format(epoch_num, pd.Series(val_results)), flush=True) best_val = val_acc
15,626
43.019718
151
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/generate_candidates/sample_candidates.py
import pickle as pkl from argparse import ArgumentParser from copy import deepcopy from time import time import numpy as np import pandas as pd import torch from allennlp.commands.predict import Predictor from allennlp.data import Vocabulary from allennlp.models.archival import load_archive from tqdm import tqdm from create_swag.lm.config import NUM_FOLDS from create_swag.lm.simple_bilm import SimpleBiLM from pytorch_misc import optimistic_restore from spacy.tokens.doc import Doc from allennlp.common.util import get_spacy_model import spacy BATCH_SIZE = 1024 # ARGUMENTS parser = ArgumentParser(description='which fold to use') parser.add_argument('-fold', dest='fold', help='Which fold to use', type=int, default=0) fold = parser.parse_args().fold assert fold in set(range(NUM_FOLDS)) print("~~~~~~~~~USING SPLIT#{}~~~~~~~~~~~~~".format(fold)) # SETUP spacy_model = get_spacy_model("en_core_web_sm", pos_tags=True, parse=False, ner=False) spacy_model.tokenizer = lambda x: Doc(spacy_model.vocab, x) archive = load_archive('https://s3-us-west-2.amazonaws.com/allennlp/models/elmo-constituency-parser-2018.03.14.tar.gz') constituency_predictor = Predictor.from_archive(archive, 'constituency-parser') # This is hella hacky! but it's tokenized already constituency_predictor._tokenizer.spacy.tokenizer = lambda x: Doc(constituency_predictor._tokenizer.spacy.vocab, x) vocab = Vocabulary.from_files('../lm/vocabulary') pos_vocab = Vocabulary(counter={'tokens': {name: i + 9000 for i, name in enumerate( [vocab.get_token_from_index(x) for x in range(100)] + [pos for pos in spacy.parts_of_speech.NAMES.values() if len(pos) > 0] )}}) model = SimpleBiLM(vocab=vocab, recurrent_dropout_probability=0.2, embedding_dropout_probability=0.2) optimistic_restore(model, torch.load('../lm/best-{}.tar'.format(fold))['state_dict']) # <- NEED TO DO THIS ON A FOLD LEVEL # include if not necessary model.register_buffer('invalid_tokens', torch.LongTensor([vocab.get_token_index(tok) for tok in ['@@UNKNOWN@@', '@@PADDING@@', '@@bos@@', '@@eos@@', '@@NEWLINE@@']])) model.cuda() model.eval() with open('../lm/lm-{}-of-{}.pkl'.format(fold, NUM_FOLDS), 'rb') as f: stories_tokenized = pkl.load(f) ######## # We want to recurse until we find verb phrases def find_VP(tree): """ Recurse on the tree until we find verb phrases :param tree: constituency parser result :return: """ # Recursion is annoying because we need to check whether each is a list or not def _recurse_on_children(): assert 'children' in tree result = [] for child in tree['children']: res = find_VP(child) if isinstance(res, tuple): result.append(res) else: result.extend(res) return result if 'VP' in tree['attributes']: # # Now we'll get greedy and see if we can find something better # if 'children' in tree and len(tree['children']) > 1: # recurse_result = _recurse_on_children() # if all([x[1] in ('VP', 'NP', 'CC') for x in recurse_result]): # return recurse_result return [(tree['word'], 'VP')] # base cases if 'NP' in tree['attributes']: return [(tree['word'], 'NP')] # No children if not 'children' in tree: return [(tree['word'], tree['attributes'][0])] # If a node only has 1 child then we'll have to stick with that if len(tree['children']) == 1: return _recurse_on_children() # try recursing on everything return _recurse_on_children() def split_on_final_vp(sentence): """ Splits a sentence on the final verb phrase""" try: res = constituency_predictor.predict_json({'sentence': sentence}) except: return None, None res_chunked = find_VP(res['hierplane_tree']['root']) is_vp = [i for i, (word, pos) in enumerate(res_chunked) if pos == 'VP'] if len(is_vp) == 0: return None, None vp_ind = max(is_vp) not_vp = [token for x in res_chunked[:vp_ind] for token in x[0].split(' ')] is_vp = [token for x in res_chunked[vp_ind:] for token in x[0].split(' ')] return not_vp, is_vp good_examples = [] for (instance, s1_toks, s2_toks, item) in tqdm(stories_tokenized): eos_bounds = [i + 1 for i, x in enumerate(s1_toks) if x in ('.', '?', '!')] if len(eos_bounds) == 0: s1_toks.append('.') # Just in case there's no EOS indicator. context_len = len(s1_toks) if context_len < 6 or context_len > 100: print("skipping on {} (too short or long)".format(' '.join(s1_toks + s2_toks))) continue # Something I should have done: make sure that there aren't multiple periods, etc. in s2 or in the middle eos_bounds_s2 = [i + 1 for i, x in enumerate(s2_toks) if x in ('.', '?', '!')] if len(eos_bounds_s2) > 1 or max(eos_bounds_s2) != len(s2_toks): continue elif len(eos_bounds_s2) == 0: s2_toks.append('.') # Now split on the VP startphrase, endphrase = split_on_final_vp(s2_toks) if startphrase is None or len(startphrase) == 0 or len(endphrase) < 5 or len(endphrase) > 25: print("skipping on {}->{},{}".format(' '.join(s1_toks + s2_toks), startphrase, endphrase), flush=True) continue # if endphrase contains unk then it's hopeless if any(vocab.get_token_index(tok.lower()) == vocab.get_token_index(vocab._oov_token) for tok in endphrase): print("skipping on {} (unk!)".format(' '.join(s1_toks + s2_toks))) continue context = s1_toks + startphrase tic = time() gens0, fwd_scores, ctx_scores = model.conditional_generation(context, gt_completion=endphrase, batch_size=2 * BATCH_SIZE, max_gen_length=25) if len(gens0) < BATCH_SIZE: print("Couldnt generate enough candidates so skipping") continue gens0 = gens0[:BATCH_SIZE] fwd_scores = fwd_scores[:BATCH_SIZE] # Now get the backward scores. full_sents = [context + gen for gen in gens0] # NOTE: #1 is GT result_dict = model(model.batch_to_ids(full_sents), use_forward=False, use_reverse=True, compute_logprobs=True) ending_lengths = (fwd_scores < 0).sum(1) ending_lengths_float = ending_lengths.astype(np.float32) rev_scores = result_dict['reverse_logprobs'].data.cpu().numpy() forward_logperp_ending = -fwd_scores.sum(1) / ending_lengths_float reverse_logperp_ending = -rev_scores[:, context_len:].sum(1) / ending_lengths_float forward_logperp_begin = -ctx_scores.mean() reverse_logperp_begin = -rev_scores[:, :context_len].mean(1) eos_logperp = -fwd_scores[np.arange(fwd_scores.shape[0]), ending_lengths - 1] print("Time elapsed {:.3f}".format(time() - tic), flush=True) scores = np.exp(np.column_stack(( forward_logperp_ending, reverse_logperp_ending, reverse_logperp_begin, eos_logperp, np.ones(forward_logperp_ending.shape[0], dtype=np.float32) * forward_logperp_begin, ))) # PRINTOUT low2high = scores[:, 2].argsort() print("\n\n Dataset={} ctx: {} (perp={:.3f})\n~~~\n".format(item['dataset'], ' '.join(context), np.exp(forward_logperp_begin)), flush=True) for i, ind in enumerate(low2high.tolist()): gen_i = ' '.join(gens0[ind]) if (ind == 0) or (i < 128): print("{:3d}/{:4d}) ({}, end|ctx:{:5.1f} end:{:5.1f} ctx|end:{:5.1f} EOS|(ctx, end):{:5.1f}) {}".format( i, len(gens0), 'GOLD' if ind == 0 else ' ', *scores[ind][:-1], gen_i), flush=True) gt_score = low2high.argsort()[0] item_full = deepcopy(item) item_full['sent1'] = s1_toks item_full['startphrase'] = startphrase item_full['context'] = context item_full['generations'] = gens0 item_full['postags'] = [ # parse real fast [x.orth_.lower() if pos_vocab.get_token_index(x.orth_.lower()) != 1 else x.pos_ for x in y] for y in spacy_model.pipe([startphrase + gen for gen in gens0], batch_size=BATCH_SIZE)] item_full['scores'] = pd.DataFrame(data=scores, index=np.arange(scores.shape[0]), columns=['end-from-ctx', 'end', 'ctx-from-end', 'eos-from-ctxend', 'ctx']) good_examples.append(item_full) with open('examples{}-of-{}.pkl'.format(fold, NUM_FOLDS), 'wb') as f: pkl.dump(good_examples, f)
8,718
40.918269
119
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/generate_candidates/__init__.py
0
0
0
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/generate_candidates/rebalance_dataset_ensemble.py
""" The big idea will be to add in the worst scoring one. But we want to use a MULTILAYER PERCEPTRON. Also not using word features for now """ import pickle as pkl from argparse import ArgumentParser from copy import deepcopy import numpy as np import pandas as pd import spacy import torch from allennlp.data import Instance from allennlp.data import Token from allennlp.data import Vocabulary from allennlp.data.dataset import Batch from allennlp.data.fields import TextField, SequenceLabelField from allennlp.data.token_indexers import SingleIdTokenIndexer from torch.autograd import Variable from torch.utils.data import Dataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm from create_swag.lm.config import NUM_FOLDS from create_swag.generate_candidates.classifiers import Ensemble, LMFeatsModel ######### PARAMETERS NUM_DISTRACTORS = 9 TRAIN_PERC = 0.8 BATCH_SIZE = 1024 vocab = Vocabulary.from_files('../lm/vocabulary') pos_vocab = Vocabulary(counter={'tokens': {name: i + 9000 for i, name in enumerate( [vocab.get_token_from_index(x) for x in range(100)] + [pos for pos in spacy.parts_of_speech.NAMES.values() if len(pos) > 0] )}}) vocab._token_to_index['pos'] = pos_vocab._token_to_index['tokens'] vocab._index_to_token['pos'] = pos_vocab._index_to_token['tokens'] parser = ArgumentParser(description='which fold to use') parser.add_argument('-fold', dest='fold', help='Which fold to use. If you say -1 we will use ALL OF THEM!', type=int, default=0) fold = parser.parse_args().fold assert fold in set(range(NUM_FOLDS)) or fold == -1 print("~~~~~~~~~USING SPLIT#{}~~~~~~~~~~~~~".format(fold)) if fold == -1: assignments = [] assignments = np.load('assignments-pretrained.npy') # for i in range(5): # assignments.append(np.load('assignments-fold-{}-19.npy'.format(i))) # assignments = np.concatenate(assignments) else: assignments = np.load('assignments-pretrained-fold{}.npy'.format(fold)) ######################################### # TODO can we do this in parallel? class AssignmentsDataLoader(Dataset): # TODO: we might need to load the dataset again on every iteration because memory is a big problem. def __init__(self, instances, inds, train=True, recompute_assignments=False): self.instances = instances self.inds = inds self.train = train self.recompute_assignments = recompute_assignments self.dataloader = DataLoader(dataset=self, batch_size=128 if not recompute_assignments else 16, shuffle=self.train, num_workers=0, collate_fn=self.collate, drop_last=self.train) def collate(self, items_l): # Assume all of these have the same length index_l, second_sentences_l, pos_tags_l, feats_l, context_len_l = zip(*items_l) feats = Variable(torch.FloatTensor(np.stack(feats_l))) inds = np.array(index_l) instances = [] for second_sentences, pos_tags, context_len in zip(second_sentences_l, pos_tags_l, context_len_l): for second_sent, pos_tag in zip(second_sentences, pos_tags): instance_d = { 'words': TextField([Token(token) for token in ['@@bos@@'] + second_sent + ['@@eos@@']], {'tokens': SingleIdTokenIndexer(namespace='tokens', lowercase_tokens=True)}), 'postags': TextField([Token(token) for token in ['@@bos@@'] + pos_tag + ['@@eos@@']], {'pos': SingleIdTokenIndexer(namespace='pos', lowercase_tokens=False)}), } instance_d['context_indicator'] = SequenceLabelField([1] * (context_len + 1) + [0] * (len(second_sent) - context_len + 1), instance_d['words']) instances.append(Instance(instance_d)) batch = Batch(instances) batch.index_instances(vocab) tensor_dict = batch.as_tensor_dict(for_training=self.train) # instances_mask = torch.LongTensor(np.stack([np.array([len(sub_g) > 0 for sub_g in g], dtype=np.int64) # for g in selected_gens])) return { 'lm_feats': feats, 'inds': inds, 'ending_word_ids': tensor_dict['words']['tokens'].view(inds.shape[0], -1, tensor_dict['words']['tokens'].size(1)), 'postags_word_ids': tensor_dict['postags']['pos'].view(inds.shape[0], -1, tensor_dict['postags']['pos'].size(1)), 'ctx_indicator': tensor_dict['context_indicator'].view(inds.shape[0], -1, tensor_dict['context_indicator'].size(1)), } def __len__(self): return len(self.instances) def __getitem__(self, index): """ :param index: index into the list of examples. ps: they are of the form sent1: List[str] of tokens for the first sentence startphrase: List[str] of tokens for the first part of the 2nd sentence generations: List[List[str]] of tokenized responses. The first one is GT. postags: List[List[str]] of POSTags and some lexicalization for startphrase+generations. They're all of the same size (1024) :return: index second_sentences List[List[str]] full s2's pos_tags List[List[str]] full PosTags of S2's feats [#ex, dim] np array of features context_len length of context size in second_sentences and pos_tags """ this_ex = self.instances[index] second_sentences = [this_ex['startphrase'] + gen for gen in this_ex['generations']] context_len = len(this_ex['startphrase']) feats_vals = this_ex['scores'].values if np.isinf(feats_vals).any(): feats_vals[np.isinf(feats_vals)] = 1e17 feats = np.column_stack(( np.log(feats_vals), np.array([len(gen) for gen in this_ex['generations']], dtype=np.float32), np.ones(feats_vals.shape[0], dtype=np.float32) * context_len, np.ones(feats_vals.shape[0], dtype=np.float32) * len(this_ex['sent1']), )) return index, second_sentences, this_ex['postags'], feats, context_len @classmethod def splits(cls, assignments): """ if assignments is none we initialize by looking at topN""" s_idx = 0 train_instances = [] val_instances = [] test_instances = [] train_indices = [] test_indices = [] print("loading the data!", flush=True) def _load_from_examples(example_list, offset): idx = np.random.permutation(len(example_list)) train_idx = np.sort(idx[:int(TRAIN_PERC * idx.shape[0])]) val_idx = np.sort(idx[int(TRAIN_PERC * idx.shape[0]):]) train_indices.append(offset + train_idx) test_indices.append(offset + val_idx) for i in tqdm(train_idx): item_copy = example_list[i] item_copy['generations'] = [example_list[i]['generations'][j] for j in assignments[i + offset]] item_copy['postags'] = [example_list[i]['postags'][j] for j in assignments[i + offset]] item_copy['scores'] = example_list[i]['scores'].iloc[assignments[i + offset]] train_instances.append(item_copy) for i in tqdm(val_idx): item_copy = deepcopy(example_list[i]) item_copy['generations'] = [example_list[i]['generations'][j] for j in assignments[i + offset]] item_copy['postags'] = [example_list[i]['postags'][j] for j in assignments[i + offset]] item_copy['scores'] = example_list[i]['scores'].iloc[assignments[i + offset]] val_instances.append(item_copy) test_instances.append(example_list[i]) return len(ex_this_fold) folds2use = range(5) if fold == -1 else [fold] for fold_no in folds2use: print("loading data from fold {}".format(fold_no), flush=True) with open('examples{}-of-5.pkl'.format(fold_no), 'rb') as f: ex_this_fold = pkl.load(f) s_idx += _load_from_examples(ex_this_fold, s_idx) train_indices = np.concatenate(train_indices, 0) test_indices = np.concatenate(test_indices, 0) return cls(train_instances, train_indices, train=True), cls(val_instances, test_indices, train=False), cls( test_instances, test_indices, train=False, recompute_assignments=True) def _iter(): train, val, test = AssignmentsDataLoader.splits(assignments) model = Ensemble(vocab) model.cuda() val_results = model.fit(train.dataloader, val.dataloader, num_epoch=10) # Now get predictions for the best thing best_scoring_model_name = pd.Series(val_results).argmax() print("We will rebalance with {}".format(best_scoring_model_name)) test_results, all_predictions = model.validate(test.dataloader) n2chs = [] for val_ind, pred in zip(test.inds, all_predictions[best_scoring_model_name]): high2low = (-pred).argsort() # Things at the beginning of this list seem real idx2rank = high2low.argsort() cur_assign = assignments[val_ind] adversarial_examples = high2low[:idx2rank[0]] adversarial_examples = adversarial_examples[ ~np.in1d(adversarial_examples, cur_assign)] # not currently assigned easy_idxs = high2low[idx2rank[0] + 1:][::-1] easy_idxs = easy_idxs[np.in1d(easy_idxs, cur_assign)] # Make the easy indices map according to their position in the assignments easy_inds = np.argmax(easy_idxs[:, None] == cur_assign[None], 1) assert np.allclose(cur_assign[easy_inds], easy_idxs) num2change = min(2, adversarial_examples.shape[0], easy_idxs.shape[0]) n2chs.append(num2change) # print("adversarial ex we can add {:4d} easy idxs {:4d} were changing {:4d}".format( # adversarial_examples.shape[0], easy_idxs.shape[0], num2change)) if num2change == 0: pass else: # change a random index ind_loc = np.random.choice(easy_inds, replace=False, size=num2change) adv_loc = np.random.choice(adversarial_examples, replace=False, size=num2change) assignments[val_ind, ind_loc] = adv_loc # Change the first index over. # ind_loc = easy_inds[0] # assignments[val_ind, ind_loc] = adversarial_examples[0] val_results['n2chs'] = np.mean(n2chs) return pd.Series(val_results) all_results = [] for i in range(50): all_results.append(_iter()) if fold == -1: pd.DataFrame(all_results).to_csv('ensemble-accs.csv', index=False) np.save('assignments-{}.npy'.format(i), assignments) else: pd.DataFrame(all_results).to_csv('ensemble-accs-fold-{}.csv'.format(fold), index=False) np.save('assignments-fold-{}-{}.npy'.format(fold, i), assignments) # # # To extract some things (maybe this is useful? idk) # from nltk.tokenize.moses import MosesDetokenizer # def _extract(): # detokenizer = MosesDetokenizer() # with open('examples0-of-5.pkl', 'rb') as f: # ex_this_fold = pkl.load(f) # assignments = np.load('assignments-4.npy') # # selected_examples = [] # for ind, (item, assign_i) in enumerate(zip(tqdm(ex_this_fold), assignments)): # context = pd.Series([detokenizer.detokenize(item['sent1'], return_str=True)] * len(assign_i)) # completions = pd.Series( # [detokenizer.detokenize(item['startphrase'] + item['generations'][i], return_str=True) for i in # assign_i.tolist()]) # dataset = pd.Series([item['dataset']] * len(assign_i)) # ids = pd.Series([item['id']] * len(assign_i)) # duration = pd.Series([item['duration']] * len(assign_i)) # inds = pd.Series([ind] * len(assign_i)) # # df_this_ex = pd.DataFrame( # data={'inds': inds, 'selections': assign_i, 'context': context, 'completions': completions, # 'is_gold': (assign_i == 0), # 'choice': np.arange(NUM_DISTRACTORS + 1), # 'dataset': dataset, 'ids': ids, 'duration': duration}, # columns=['inds', 'context', 'completions', 'selections', 'is_gold', 'choice', 'dataset', 'ids', 'duration']) # # df_with_extra_feats = pd.concat( # (df_this_ex, item['scores'].iloc[assign_i].reset_index(drop=True)), axis=1) # selected_examples.append(df_with_extra_feats) # return pd.concat(selected_examples, 0).reset_index(drop=True) # # # _extract().to_csv('dataset.csv', sep='\t', index=False)
13,160
44.539792
122
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/lm/pretrain_lm.py
import os import pandas as pd import torch from allennlp.data import Instance from allennlp.data import Token from allennlp.data import Vocabulary from allennlp.data.dataset import Batch from allennlp.data.fields import TextField from allennlp.data.token_indexers import SingleIdTokenIndexer from allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer from torch import optim from create_swag.lm.simple_bilm import SimpleBiLM from raw_data.events import _postprocess from pytorch_misc import clip_grad_norm, print_para, time_batch from create_swag.lm.config import PRETRAIN_TXT assert os.path.exists('../vocabulary') vocab = Vocabulary.from_files('../vocabulary') indexer = ELMoTokenCharactersIndexer() def batcher(inp_list): """ batches, asumming everything is padded and tokenized.""" instances = [Instance({'story': TextField([Token(x) for x in ['@@bos@@'] + subl + ['@@eos@@']], token_indexers={ 'tokens': SingleIdTokenIndexer(namespace='tokens', lowercase_tokens=True), 'char_encoding': indexer}), }) for subl in inp_list] batch = Batch(instances) batch.index_instances(vocab) result_dict = batch.as_tensor_dict()['story'] result_dict['story'] = inp_list return result_dict def data_runner(start_point=0, minlength=4): print("starting at {}".format(start_point)) with open(PRETRAIN_TXT, 'r') as f: f.seek(start_point) f.readline() # Clear the partial line for i, line in enumerate(f): yield _postprocess(line) def _sample_a_good_pair(gen, seq_length, min_length=3): cur_status = [] eos_idxs = [i for i, x in enumerate(cur_status) if x in ('.', '!', '?')] while len(eos_idxs) < 2: cur_status.extend([x for x in next(gen).split(' ') if x is not '\n']) eos_idxs = [i for i, x in enumerate(cur_status) if x in ('.', '!', '?')] if eos_idxs[1] >= seq_length: return _sample_a_good_pair(gen, seq_length, min_length=min_length) elif (eos_idxs[0] < min_length) or (eos_idxs[1] - eos_idxs[0]) < min_length: # Too short return _sample_a_good_pair(gen, seq_length, min_length=min_length) return cur_status[:eos_idxs[1] + 1] def looped_data_runner(batch_size=128, seq_length=50): offset = 0 TOTAL_BYTES_TRAIN = 4343022454 generators = [data_runner(start_point=TOTAL_BYTES_TRAIN * i // batch_size + offset, minlength=0) for i in range(batch_size)] while True: for g_i, gen in enumerate(generators): yield _sample_a_good_pair(gen, seq_length=seq_length, min_length=5) def bucketed_data_runner(batch_size=64, seq_length=50): length2batch = [[] for i in range(seq_length + 1)] # Get diverse samples for batch in looped_data_runner(batch_size=128, seq_length=seq_length): length2batch[len(batch)].append(batch) if len(length2batch[len(batch)]) >= batch_size: # print("Yielding now of size {}".format(len(batch))) yield batcher(length2batch[len(batch)]) length2batch[len(batch)] = [] # Dataloader model = SimpleBiLM(vocab=vocab, recurrent_dropout_probability=0.2, embedding_dropout_probability=0.2) model.cuda() tr = [] model.train() for epoch_num in range(2): if epoch_num == 0: optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad], weight_decay=1e-6, lr=1e-3) else: optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad], weight_decay=1e-6, lr=1e-4) print(print_para(model)) for b, (time_per_batch, batch) in enumerate(time_batch(bucketed_data_runner())): batch['tokens'] = batch['tokens'].cuda(async=True) model_forward = model(batch['tokens']) losses = {key: model_forward[key] for key in ['forward_loss', 'reverse_loss']} tr.append(pd.Series({k: v.data[0] for k, v in losses.items()})) loss = sum(losses.values()) optimizer.zero_grad() loss.backward() if b % 100 == 0 and b > 0: df_cat = pd.concat(tr[-100:], axis=1).mean(1) print("b{:8d} {:.3f}s/batch, fwd loss {:.3f} rev loss {:.3f} ".format(b, time_per_batch, df_cat['forward_loss'], df_cat['reverse_loss']), flush=True) clip_grad_norm( [(n, p) for n, p in model.named_parameters() if p.grad is not None], max_norm=1.0, verbose=b % 1000 == 1, clip=True) optimizer.step() if b % 10000 == 0 and b > 0: torch.save({'state_dict': model.state_dict()}, 'e{}-tbooks-pretrained-ckpt-{}.tar'.format(epoch_num, b))
4,759
40.391304
118
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/lm/config.py
# Set this to how many LMs you want to train on diff splits of the data NUM_FOLDS = 5 # what text to train on (right now it's toronto books) PRETRAIN_TXT = '/home/mbforbes/repos/ari-holtzman/learning_to_write/data/corpora/tbooks/train.txt'
242
33.714286
98
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/lm/train_lm.py
import os from argparse import ArgumentParser import numpy as np import pandas as pd import torch from torch import optim from torch.optim.lr_scheduler import StepLR from tqdm import tqdm from create_swag.lm.config import NUM_FOLDS from create_swag.lm.load_data import load_lm_data, RawPassages from create_swag.lm.simple_bilm import SimpleBiLM from pytorch_misc import clip_grad_norm, optimistic_restore, print_para, time_batch if not os.path.exists('vocabulary') or not all( [os.path.exists('lm-{}-of-{}.pkl'.format(i, NUM_FOLDS)) for i in range(NUM_FOLDS)]): print("MAKING THE VOCABULARY / DATA AGAIN", flush=True) _, vocab = load_lm_data(None) # ARGUMENTS parser = ArgumentParser(description='which fold to use') parser.add_argument('-fold', dest='fold', help='Which fold to use', type=int, default=0) fold = parser.parse_args().fold assert fold in set(range(NUM_FOLDS)) if not os.path.exists('checkpoints-{}'.format(fold)): os.mkdir('checkpoints-{}'.format(fold)) print("~~~~~~~~~USING SPLIT#{}~~~~~~~~~~~~~".format(fold)) train, val = RawPassages.splits(fold=fold) model = SimpleBiLM( vocab=train.vocab, recurrent_dropout_probability=0.2, embedding_dropout_probability=0.2, ) model.cuda() optimistic_restore(model, torch.load('e1-tbooks-pretrained-ckpt-370000.tar')['state_dict']) optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad], weight_decay=1e-6, lr=1e-3) # scheduler = ReduceLROnPlateau(optimizer, 'min', patience=3, factor=0.1, # verbose=True, threshold=0.0001, threshold_mode='abs', cooldown=1) scheduler = StepLR(optimizer, step_size=5, gamma=0.1) print(print_para(model)) for epoch_num in range(15): tr = [] model.train() for b, (time_per_batch, batch) in enumerate(time_batch(train.dataloader)): # batch['char_encoding'] = batch['char_encoding'].cuda(async=True) batch['story'] = batch['story'].cuda(async=True) model_forward = model(batch['story']) losses = {key: model_forward[key] for key in ['forward_loss', 'reverse_loss']} tr.append(pd.Series({k: v.data[0] for k, v in losses.items()})) optimizer.zero_grad() loss = sum(losses.values()) loss.backward() if b % 100 == 0 and b > 0: print("\ne{:2d}b{:5d}/{:5d} {:.3f}s/batch, {:.1f}m/epoch".format( epoch_num, b, len(train.dataloader), time_per_batch, len(train.dataloader) * time_per_batch / 60)) print(pd.concat(tr[-100:], axis=1).mean(1)) print('-----------', flush=True) clip_grad_norm( [(n, p) for n, p in model.named_parameters() if p.grad is not None], max_norm=1, verbose=b % 1000 == 1, clip=True) optimizer.step() # Get the validation perplexity perplexity = [] model.eval() for batch in tqdm(val.dataloader): # batch['char_encoding'] = batch['char_encoding'].cuda(async=True) batch['story'] = batch['story'].cuda(async=True) model_forward = model(batch['story']) losses = {key: model_forward[key] for key in ['forward_loss', 'reverse_loss']} perplexity.append(pd.Series({k: v.data[0] for k, v in losses.items()})) df_cat = pd.DataFrame(perplexity).mean(0) print("Epoch {}, fwd loss {:.3f} perplexity {:.3f} bwd loss {:.3f} perplexity {:.3f}".format( epoch_num, df_cat['forward_loss'], np.exp(df_cat['forward_loss']), df_cat['reverse_loss'], np.exp(df_cat['reverse_loss'])), flush=True) scheduler.step(df_cat['forward_loss'] + df_cat['reverse_loss']) torch.save({'state_dict': model.state_dict()}, 'checkpoints-{}/ckpt-{}.tar'.format(fold, epoch_num))
3,709
41.159091
104
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/lm/simple_bilm.py
""" A wrapper around ai2s elmo LM to allow for an lm objective... """ from typing import Optional, Tuple from typing import Union, List, Dict import numpy as np import torch from allennlp.common.checks import ConfigurationError from allennlp.data import Token, Vocabulary, Instance from allennlp.data.dataset import Batch from allennlp.data.fields import TextField from allennlp.data.token_indexers import SingleIdTokenIndexer from allennlp.modules.augmented_lstm import AugmentedLstm from allennlp.modules.seq2seq_encoders.pytorch_seq2seq_wrapper import PytorchSeq2SeqWrapper from allennlp.nn.util import sequence_cross_entropy_with_logits from torch.autograd import Variable from torch.nn import functional as F from torch.nn.utils.rnn import PackedSequence def _de_duplicate_generations(generations): """ Given a list of list of strings, filter out the ones that are duplicates. and return an idx corresponding to the good ones :param generations: :return: """ dup_set = set() unique_idx = [] for i, gen_i in enumerate(generations): gen_i_str = ' '.join(gen_i) if gen_i_str not in dup_set: unique_idx.append(i) dup_set.add(gen_i_str) return [generations[i] for i in unique_idx], np.array(unique_idx) class StackedLstm(torch.nn.Module): """ A stacked LSTM. Parameters ---------- input_size : int, required The dimension of the inputs to the LSTM. hidden_size : int, required The dimension of the outputs of the LSTM. num_layers : int, required The number of stacked LSTMs to use. recurrent_dropout_probability: float, optional (default = 0.0) The dropout probability to be used in a dropout scheme as stated in `A Theoretically Grounded Application of Dropout in Recurrent Neural Networks <https://arxiv.org/abs/1512.05287>`_ . use_input_projection_bias : bool, optional (default = True) Whether or not to use a bias on the input projection layer. This is mainly here for backwards compatibility reasons and will be removed (and set to False) in future releases. Returns ------- output_accumulator : PackedSequence The outputs of the interleaved LSTMs per timestep. A tensor of shape (batch_size, max_timesteps, hidden_size) where for a given batch element, all outputs past the sequence length for that batch are zero tensors. """ def __init__(self, input_size: int, hidden_size: int, num_layers: int, recurrent_dropout_probability: float = 0.0, use_highway: bool = True, use_input_projection_bias: bool = True, go_forward: bool = True) -> None: super(StackedLstm, self).__init__() # Required to be wrapped with a :class:`PytorchSeq2SeqWrapper`. self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers layers = [] lstm_input_size = input_size for layer_index in range(num_layers): layer = AugmentedLstm(lstm_input_size, hidden_size, go_forward, recurrent_dropout_probability=recurrent_dropout_probability, use_highway=use_highway, use_input_projection_bias=use_input_projection_bias) lstm_input_size = hidden_size self.add_module('layer_{}'.format(layer_index), layer) layers.append(layer) self.lstm_layers = layers def forward(self, # pylint: disable=arguments-differ inputs: PackedSequence, initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None): """ Parameters ---------- inputs : ``PackedSequence``, required. A batch first ``PackedSequence`` to run the stacked LSTM over. initial_state : Tuple[torch.Tensor, torch.Tensor], optional, (default = None) A tuple (state, memory) representing the initial hidden state and memory of the LSTM. Each tensor has shape (1, batch_size, output_dimension). Returns ------- output_sequence : PackedSequence The encoded sequence of shape (batch_size, sequence_length, hidden_size) final_states: torch.Tensor The per-layer final (state, memory) states of the LSTM, each with shape (num_layers, batch_size, hidden_size). """ if not initial_state: hidden_states = [None] * len(self.lstm_layers) elif initial_state[0].size()[0] != len(self.lstm_layers): raise ConfigurationError("Initial states were passed to forward() but the number of " "initial states does not match the number of layers.") else: hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0))) output_sequence = inputs final_states = [] for i, state in enumerate(hidden_states): layer = getattr(self, 'layer_{}'.format(i)) # The state is duplicated to mirror the Pytorch API for LSTMs. output_sequence, final_state = layer(output_sequence, state) final_states.append(final_state) final_state_tuple = tuple(torch.cat(state_list, 0) for state_list in zip(*final_states)) return output_sequence, final_state_tuple class SimpleBiLM(torch.nn.Module): def __init__(self, vocab: Vocabulary, recurrent_dropout_probability: float = 0.0, embedding_dropout_probability: float = 0.0, input_size=512, hidden_size=512) -> None: """ :param options_file: for initializing elmo BiLM :param weight_file: for initializing elmo BiLM :param requires_grad: Whether or not to finetune the LSTM layers :param recurrent_dropout_probability: recurrent dropout to add to LSTM layers """ super(SimpleBiLM, self).__init__() self.forward_lm = PytorchSeq2SeqWrapper(StackedLstm( input_size=input_size, hidden_size=hidden_size, num_layers=2, go_forward=True, recurrent_dropout_probability=recurrent_dropout_probability, use_input_projection_bias=False, use_highway=True), stateful=True) self.reverse_lm = PytorchSeq2SeqWrapper(StackedLstm( input_size=input_size, hidden_size=hidden_size, num_layers=2, go_forward=False, recurrent_dropout_probability=recurrent_dropout_probability, use_input_projection_bias=False, use_highway=True), stateful=True) # This will also be the encoder self.decoder = torch.nn.Linear(512, vocab.get_vocab_size(namespace='tokens')) self.vocab = vocab self.register_buffer('eos_tokens', torch.LongTensor([vocab.get_token_index(tok) for tok in ['.', '!', '?', '@@UNKNOWN@@', '@@PADDING@@', '@@bos@@', '@@eos@@']])) self.register_buffer('invalid_tokens', torch.LongTensor([vocab.get_token_index(tok) for tok in ['@@UNKNOWN@@', '@@PADDING@@', '@@bos@@', '@@eos@@', '@@NEWLINE@@']])) self.embedding_dropout_probability = embedding_dropout_probability def embed_words(self, words): assert words.dim() == 2 if not self.training: return F.embedding(words, self.decoder.weight) # Embedding dropout vocab_size = self.decoder.weight.size(0) mask = Variable( self.decoder.weight.data.new(vocab_size, 1).bernoulli_(1 - self.embedding_dropout_probability).expand_as( self.decoder.weight) / (1 - self.embedding_dropout_probability)) padding_idx = 0 embeds = self.decoder._backend.Embedding.apply(words, mask * self.decoder.weight, padding_idx, None, 2, False, False) return embeds def timestep_to_ids(self, timestep_tokenized: List[str]): """ Just a single timestep (so dont add BOS or EOS""" return Variable(torch.LongTensor([self.vocab.get_token_index(x) for x in timestep_tokenized])[:, None], volatile=not self.training).cuda(async=True) def batch_to_ids(self, stories_tokenized: List[List[str]]): """ Simple wrapper around _elmo_batch_to_ids :param batch: A list of tokenized sentences. :return: A tensor of padded character ids. """ batch = Batch([Instance( {'story': TextField([Token('@@bos@@')] + [Token(x) for x in story] + [Token('@@eos@@')], token_indexers={ 'tokens': SingleIdTokenIndexer(namespace='tokens', lowercase_tokens=True)})}) for story in stories_tokenized]) batch.index_instances(self.vocab) words = {k: v['tokens'] for k, v in batch.as_tensor_dict(for_training=self.training).items()}['story'].cuda( async=True) return words def conditional_generation(self, context, gt_completion, batch_size=128, max_gen_length=25, same_length_as_gt=False): """ Generate conditoned on the context. While we're at it we'll score the GT going forwards :param context: List of tokens to condition on. We'll add the BOS marker to it :param gt_completion: The GT completion :param batch_size: Number of sentences to generate :param max_gen_length: Max length for genertaed sentences (irrelvant if same_length_as_gt=True) :param same_length_as_gt: set to True if you want all the sents to have the same length as the gt_completion :return: """ # Forward condition on context, then repeat to be the right batch size: # (layer_index, batch_size, fwd hidden dim) forward_logprobs = self(self.batch_to_ids([context]), use_forward=True, use_reverse=False, compute_logprobs=True)['forward_logprobs'] self.forward_lm._states = tuple(x.repeat(1, batch_size, 1).contiguous() for x in self.forward_lm._states) # Each item will be (token, score) generations = [[(context[-1], 0.0)] for i in range(batch_size)] mask = Variable(forward_logprobs.data.new(batch_size).long().fill_(1)) gt_completion_padded = [self.vocab.get_token_index(gt_token) for gt_token in [x.lower() for x in gt_completion] + ['@@PADDING@@'] * ( max_gen_length - len(gt_completion))] for index, gt_token_ind in enumerate(gt_completion_padded): embeds = self.embed_words(self.timestep_to_ids([gen[-1][0] for gen in generations])) next_dists = F.softmax(self.decoder(self.forward_lm(embeds, mask[:, None]))[:, 0], 1).data # Perform hacky stuff on the distribution (disallowing BOS, EOS, that sorta thing sampling_probs = next_dists.clone() sampling_probs[:, self.invalid_tokens] = 0.0 # fix first row!!! sampling_probs[0].zero_() sampling_probs[0, gt_token_ind] = 1 if same_length_as_gt: if index == (len(gt_completion) - 1): sampling_probs.zero_() sampling_probs[:, gt_token_ind] = 1 else: sampling_probs[:, self.eos_tokens] = 0.0 sampling_probs = sampling_probs / sampling_probs.sum(1, keepdim=True) next_preds = torch.multinomial(sampling_probs, 1).squeeze(1) next_scores = np.log(next_dists[ torch.arange(0, next_dists.size(0), out=mask.data.new(next_dists.size(0))), next_preds, ].cpu().numpy()) for i, (gen_list, pred_id, score_i, mask_i) in enumerate( zip(generations, next_preds.cpu().numpy(), next_scores, mask.data.cpu().numpy())): if mask_i: gen_list.append((self.vocab.get_token_from_index(pred_id), score_i)) is_eos = (next_preds[:, None] == self.eos_tokens[None]).max(1)[0] mask[is_eos] = 0 if mask.sum().data[0] == 0: break generation_scores = np.zeros((len(generations), max([len(g) - 1 for g in generations])), dtype=np.float32) for i, gen in enumerate(generations): for j, (_, v) in enumerate(gen[1:]): generation_scores[i, j] = v generation_toks, idx = _de_duplicate_generations([[tok for (tok, score) in gen[1:]] for gen in generations]) return generation_toks, generation_scores[idx], forward_logprobs.data.cpu().numpy() def _chunked_logsoftmaxes(self, activation, word_targets, chunk_size=256): """ do the softmax in chunks so memory doesnt explode :param activation: [batch, T, dim] :param targets: [batch, T] indices :param chunk_size: you might need to tune this based on GPU specs :return: """ all_logprobs = [] num_chunks = (activation.size(0) - 1) // chunk_size + 1 for activation_chunk, target_chunk in zip(torch.chunk(activation, num_chunks, dim=0), torch.chunk(word_targets, num_chunks, dim=0)): assert activation_chunk.size()[:2] == target_chunk.size()[:2] targets_flat = target_chunk.view(-1) time_indexer = torch.arange(0, targets_flat.size(0), out=target_chunk.data.new(targets_flat.size(0))) % target_chunk.size(1) batch_indexer = torch.arange(0, targets_flat.size(0), out=target_chunk.data.new(targets_flat.size(0))) / target_chunk.size(1) all_logprobs.append(F.log_softmax(self.decoder(activation_chunk), 2)[ batch_indexer, time_indexer, targets_flat].view(*target_chunk.size())) return torch.cat(all_logprobs, 0) def forward(self, words: torch.Tensor, use_forward=True, use_reverse=True, compute_logprobs=False) -> Dict[ str, Union[torch.Tensor, List[torch.Tensor]]]: """ use this for training the LM :param words: [batch_size, N] words. assuming you're starting with BOS and ending with EOS here :return: """ encoded_inputs = self.embed_words(words) mask = (words != 0).long()[:, 2:] word_targets = words[:, 1:-1].contiguous() result_dict = { 'mask': mask, 'word_targets': word_targets, } # TODO: try to reduce duplicate code here if use_forward: self.forward_lm.reset_states() forward_activation = self.forward_lm(encoded_inputs[:, :-2], mask) if compute_logprobs: # being memory efficient here is critical if the input tensors are large result_dict['forward_logprobs'] = self._chunked_logsoftmaxes(forward_activation, word_targets) * mask.float() else: result_dict['forward_logits'] = self.decoder(forward_activation) result_dict['forward_loss'] = sequence_cross_entropy_with_logits(result_dict['forward_logits'], word_targets, mask) if use_reverse: self.reverse_lm.reset_states() reverse_activation = self.reverse_lm(encoded_inputs[:, 2:], mask) if compute_logprobs: result_dict['reverse_logprobs'] = self._chunked_logsoftmaxes(reverse_activation, word_targets) * mask.float() else: result_dict['reverse_logits'] = self.decoder(reverse_activation) result_dict['reverse_loss'] = sequence_cross_entropy_with_logits(result_dict['reverse_logits'], word_targets, mask) return result_dict
16,902
48.568915
117
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/lm/load_data.py
# First make the vocabulary, etc. import os import pickle as pkl import random import simplejson as json from allennlp.common.util import get_spacy_model from allennlp.data import Instance from allennlp.data import Token from allennlp.data import Vocabulary from allennlp.data.dataset import Batch from allennlp.data.fields import TextField from allennlp.data.token_indexers import SingleIdTokenIndexer from allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer from torch.utils.data import Dataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm from raw_data.events import DATA_PATH from pytorch_misc import pairwise from create_swag.lm.config import NUM_FOLDS def load_lm_data(fold=None, mode='train'): """ Turns the sequential data into instances. :param split: :return: """ # Get or make vocab spacy_model = get_spacy_model("en_core_web_sm", pos_tags=False, parse=False, ner=False) if os.path.exists('vocabulary'): print("Loading cached vocab. caution if you're building the dataset again!!!!", flush=True) vocab = Vocabulary.from_files('vocabulary') with open(os.path.join(DATA_PATH, 'events-3.json'), 'r') as f: lm_data = json.load(f) lm_data = [data_item for s in ('train', 'val', 'test') for data_item in lm_data[s]] else: assert fold is None with open(os.path.join(DATA_PATH, 'events-3.json'), 'r') as f: lm_data = json.load(f) lm_data = [data_item for s in ('train', 'val', 'test') for data_item in lm_data[s]] # Manually doing this because I don't want to double count things vocab = Vocabulary.from_instances( [Instance({'story': TextField( [Token(x) for x in ['@@bos@@'] + [x.orth_ for x in spacy_model(sent)] + ['@@eos@@']], token_indexers={ 'tokens': SingleIdTokenIndexer(namespace='tokens', lowercase_tokens=True)})}) for data_item in lm_data for sent in data_item['sentences']], min_count={'tokens': 3}) vocab.get_index_to_token_vocabulary('tokens') vocab.save_to_files('vocabulary') print("VOCABULARY HAS {} ITEMS".format(vocab.get_vocab_size(namespace='tokens'))) if all([os.path.exists('lm-{}-of-{}.pkl'.format(i, NUM_FOLDS)) for i in range(NUM_FOLDS)]): print("LOADING CACHED DATASET", flush=True) if mode == 'val': with open('lm-{}-of-{}.pkl'.format(fold, NUM_FOLDS), 'rb') as f: print("Loading split{} for {}".format(fold, mode)) instances = pkl.load(f) else: instances = [] for other_fold in range(NUM_FOLDS): if other_fold != fold: with open('lm-{}-of-{}.pkl'.format(other_fold, NUM_FOLDS), 'rb') as f: print("Loading split{} for {}".format(other_fold, mode)) instances += pkl.load(f) return instances, vocab print("MAKING THE DATASET", flush=True) assert fold is None for item in tqdm(lm_data): item['sentences_tokenized'] = [[st.orth_ for st in spacy_model(sent)] for sent in item['sentences']] def _to_instances(data): # flatten this instances = [] for item in data: for s1, s2 in pairwise(item['sentences_tokenized']): instances.append(( Instance({'story': TextField([Token(x) for x in ['@@bos@@'] + s1 + s2 + ['@@eos@@']], token_indexers={ 'tokens': SingleIdTokenIndexer(namespace='tokens', lowercase_tokens=True)})}), s1, s2, item, )) return instances random.seed(123456) random.shuffle(lm_data) all_sets = [] for fold_ in range(NUM_FOLDS): val_set = _to_instances(lm_data[len(lm_data) * fold_ // NUM_FOLDS:len(lm_data) * (fold_ + 1) // NUM_FOLDS]) with open('lm-{}-of-{}.pkl'.format(fold_, NUM_FOLDS), 'wb') as f: pkl.dump(val_set, f) all_sets.extend(val_set) return all_sets, vocab class RawPassages(Dataset): def __init__(self, fold, mode): self.mode = mode self.fold = fold self.instances, self.vocab = load_lm_data(fold=self.fold, mode=self.mode) self.dataloader = DataLoader(dataset=self, batch_size=32, shuffle=self.mode == 'train', num_workers=0, collate_fn=self.collate, drop_last=self.mode == 'train') self.indexer = ELMoTokenCharactersIndexer() def collate(self, instances_l): batch = Batch([x[0] for x in instances_l]) batch.index_instances(self.vocab) batch_dict = {k: v['tokens'] for k, v in batch.as_tensor_dict().items()} batch_dict['story_tokens'] = [instance[0].fields['story'].tokens for instance in instances_l] batch_dict['story_full'] = [x[1] + x[2] for x in instances_l] batch_dict['items'] = [x[3] for x in instances_l] return batch_dict def __len__(self): return len(self.instances) def __getitem__(self, index): """ :param index: :return: * raw rocstories * entities * entity IDs + sentences * Instance. to print use r3.fields['verb_phrase'].field_list[5].tokens """ return self.instances[index] @classmethod def splits(cls, fold): return cls(fold, mode='train'), cls(fold, mode='val') if __name__ == '__main__': instances, vocab = load_lm_data() # train, val = RawPassages.splits() # for item in train.dataloader: # for story in item['story_tokens']: # tok_text = [x.text.lower() for x in story] # remapped_text = [vocab.get_token_from_index(vocab.get_token_index(x)) for x in tok_text] # print('({}) {} -> {}'.format('D' if tok_text != remapped_text else ' ', # ' '.join(tok_text), ' '.join(remapped_text)), flush=True)
6,285
40.629139
118
py
fat-albert
fat-albert-master/bert/datasets/SWAG/create_swag/lm/__init__.py
0
0
0
py
fat-albert
fat-albert-master/bert/datasets/MovieQA/semantic_alignment_clinical.py
import config as cfg import story_loader import data_loader import csv import numpy as np from semantic_text_similarity.models import WebBertSimilarity from semantic_text_similarity.models import ClinicalBertSimilarity web_model = WebBertSimilarity(device='cuda', batch_size=10) #defaults to GPU prediction clinical_model = ClinicalBertSimilarity(device='cuda', batch_size=10) #defaults to GPU prediction num_sentences = 10 total_sentences = 5 mqa = data_loader.DataLoader() def common_member(a, b): common_set = [] counter = 0 for i in a: for j in b: if i == j and counter < total_sentences: common_set.append(i) counter = counter + 1 return common_set def stringReplace(input): output = input if not output: output = 'None' return output ## LOAD THE TEST DATASET movie_list = mqa.get_split_movies(split='val') story, qa = mqa.get_story_qa_data('val', 'split_plot') with open('clinical.csv', mode='w') as test_file: employee_writer = csv.writer(test_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) counter = 0 for q in qa: movie_idx = q[4] movie_story = ', '.join(story[movie_idx]) question = q[1] ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) index = 0 #story_split = movie_story.splitlines() movie_align = '' sen_mtx = [] comparison_metric = question + ans_1 + ans_2 + ans_3 + ans_4 + ans_5 sim_mtx_w = [] sim_mtx_c = [] for sen in story[movie_idx]: sen_mtx.append((sen,comparison_metric)) sim_mtx_w = web_model.predict(sen_mtx) sim_mtx_c = clinical_model.predict(sen_mtx) #sim_mtx = sim_mtx_w + sim_mtx_c #print(sim_mtx_w) #print(sim_mtx_c) sim_mtx = sim_mtx_w #sim_mtx.append(sim_mtx_c) np.concatenate((sim_mtx_w, sim_mtx_c), axis=None) print(len(sen_mtx)) print("-----------") top_sentences_w = np.asarray(sim_mtx_w).argsort()[-num_sentences:][::-1] top_sentences_c = np.asarray(sim_mtx_c).argsort()[-num_sentences:][::-1] top_sentences = np.asarray(sim_mtx).argsort()[-num_sentences:][::-1] #top_sentences = common_member(top_sentences_w, top_sentences_c) print(counter,q[5],top_sentences) counter = counter + 1
2,519
34
100
py
fat-albert
fat-albert-master/bert/datasets/MovieQA/sem_align_eval_train.py
import config as cfg import story_loader import data_loader import csv import numpy as np from semantic_text_similarity.models import WebBertSimilarity from semantic_text_similarity.models import ClinicalBertSimilarity web_model = WebBertSimilarity(device='cuda', batch_size=100) #defaults to GPU prediction clinical_model = ClinicalBertSimilarity(device='cuda', batch_size=100) #defaults to GPU prediction num_sentences = 6 mqa = data_loader.DataLoader() def stringReplace(input): output = input if not output: output = 'None' return output ## LOAD THE TRAIN DATASET and WEB movie_list = mqa.get_split_movies(split='train') story, qa = mqa.get_story_qa_data('train', 'split_plot') with open('sem_align_eval/train_com.csv', mode='w') as out_file: file_writer = csv.writer(out_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) file_writer.writerow(['', 'plot_align' , 'similarity_align']) counter = 0 for q in qa: movie_idx = q[4] question = q[1] ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) index = 0 movie_align = '' sim_mtx = [] comparison_metric = question + ans_1 + ans_2 + ans_3 + ans_4 + ans_5 sen_mtx = [] for sen in story[movie_idx]: sen_mtx.append((sen,comparison_metric)) sim_mtx_w = web_model.predict(sen_mtx) sim_mtx_c = clinical_model.predict(sen_mtx) sim_mtx = np.divide(sim_mtx_w,np.mean(sim_mtx_w)) + np.divide(sim_mtx_c,np.mean(sim_mtx_c)) top_sentences = np.asarray(sim_mtx).argsort()[-num_sentences:][::-1] for a in top_sentences: movie_align = movie_align + " ".join(story[movie_idx][a].split("\n")) combined = " ".join(movie_align.split("\n")) print("COMB",counter) file_writer.writerow([counter, q[5], top_sentences]) correct_ans = q[3] counter = counter + 1 ## LOAD THE TRAIN DATASET and CLINICAL movie_list = mqa.get_split_movies(split='train') story, qa = mqa.get_story_qa_data('train', 'split_plot') with open('sem_align_eval/train_clin.csv', mode='w') as out_file: file_writer = csv.writer(out_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) file_writer.writerow(['', 'plot_align' , 'similarity_align']) counter = 0 for q in qa: movie_idx = q[4] question = q[1] ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) index = 0 movie_align = '' sim_mtx = [] comparison_metric = question + ans_1 + ans_2 + ans_3 + ans_4 + ans_5 sen_mtx = [] for sen in story[movie_idx]: sen_mtx.append((sen,comparison_metric)) #sim_mtx_w = web_model.predict(sen_mtx) sim_mtx = clinical_model.predict(sen_mtx) #sim_mtx = np.divide(sim_mtx_w,np.mean(sim_mtx_w)) + np.divide(sim_mtx_c,np.mean(sim_mtx_c)) top_sentences = np.asarray(sim_mtx).argsort()[-num_sentences:][::-1] for a in top_sentences: movie_align = movie_align + " ".join(story[movie_idx][a].split("\n")) combined = " ".join(movie_align.split("\n")) print("CLIN",counter) file_writer.writerow([counter, q[5], top_sentences]) correct_ans = q[3] counter = counter + 1 ## LOAD THE TRAIN DATASET and WEB movie_list = mqa.get_split_movies(split='train') story, qa = mqa.get_story_qa_data('train', 'split_plot') with open('sem_align_eval/train_web.csv', mode='w') as out_file: file_writer = csv.writer(out_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) file_writer.writerow(['', 'plot_align' , 'similarity_align']) counter = 0 for q in qa: movie_idx = q[4] question = q[1] ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) index = 0 movie_align = '' sim_mtx = [] comparison_metric = question + ans_1 + ans_2 + ans_3 + ans_4 + ans_5 sen_mtx = [] for sen in story[movie_idx]: sen_mtx.append((sen,comparison_metric)) sim_mtx = web_model.predict(sen_mtx) #sim_mtx_c = clinical_model.predict(sen_mtx) #sim_mtx = np.divide(sim_mtx_w,np.mean(sim_mtx_w)) + np.divide(sim_mtx_c,np.mean(sim_mtx_c)) top_sentences = np.asarray(sim_mtx).argsort()[-num_sentences:][::-1] for a in top_sentences: movie_align = movie_align + " ".join(story[movie_idx][a].split("\n")) combined = " ".join(movie_align.split("\n")) print("WEB",counter) file_writer.writerow([counter, q[5], top_sentences]) correct_ans = q[3] counter = counter + 1
5,033
39.596774
100
py
fat-albert
fat-albert-master/bert/datasets/MovieQA/semantic_alignment.py
import config as cfg import story_loader import data_loader import csv import numpy as np from semantic_text_similarity.models import WebBertSimilarity web_model = WebBertSimilarity(device='cuda', batch_size=10) #defaults to GPU prediction num_sentences = 6 mqa = data_loader.DataLoader() def stringReplace(input): output = input if not output: output = 'None' return output ## LOAD THE TRAINING DATASET movie_list = mqa.get_split_movies(split='train') story, qa = mqa.get_story_qa_data('train', 'split_plot') with open('data/sim-train.csv', mode='w') as training_file: employee_writer = csv.writer(training_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) employee_writer.writerow(['', 'video-id', 'fold-ind', 'startphrase', 'sent1', 'sent2', 'gold-source', 'ending0', 'ending1', 'ending2', 'ending3', 'ending4', 'label']) counter = 0 for q in qa: movie_idx = q[4] movie_story = ', '.join(story[movie_idx]) question = q[1] ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) index = 0 #story_split = movie_story.splitlines() movie_align = '' sim_mtx = [] comparison_metric = question + ans_1 + ans_2 + ans_3 + ans_4 + ans_5 for sen in story[movie_idx]: sim_mtx.append(web_model.predict([(sen, comparison_metric)]).item()) top_sentences = np.asarray(sim_mtx).argsort()[-num_sentences:][::-1] for a in top_sentences: movie_align = movie_align + " ".join(story[movie_idx][a].split("\n")) combined = " ".join(movie_align.split("\n")) + question correct_ans = q[3] employee_writer.writerow([counter, movie_idx, counter, combined, movie_align, question, movie_align, ans_1, ans_2, ans_3, ans_4, ans_5, correct_ans]) counter = counter + 1 ## LOAD THE VALIDATION DATASET movie_list = mqa.get_split_movies(split='val') story, qa = mqa.get_story_qa_data('val', 'split_plot') with open('data/sim-val.csv', mode='w') as val_file: employee_writer = csv.writer(val_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) employee_writer.writerow(['', 'video-id', 'fold-ind', 'startphrase', 'sent1', 'sent2', 'gold-source', 'ending0', 'ending1', 'ending2', 'ending3', 'ending4', 'label']) counter = 0 for q in qa: movie_idx = q[4] movie_story = ', '.join(story[movie_idx]) question = q[1] ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) index = 0 #story_split = movie_story.splitlines() movie_align = '' sim_mtx = [] comparison_metric = question + ans_1 + ans_2 + ans_3 + ans_4 + ans_5 for sen in story[movie_idx]: sim_mtx.append(web_model.predict([(sen, comparison_metric)]).item()) top_sentences = np.asarray(sim_mtx).argsort()[-num_sentences:][::-1] for a in top_sentences: movie_align = movie_align + " ".join(story[movie_idx][a].split("\n")) combined = " ".join(movie_align.split("\n")) + question correct_ans = q[3] employee_writer.writerow([counter, movie_idx, counter, combined, movie_align, question, movie_align, ans_1, ans_2, ans_3, ans_4, ans_5, correct_ans]) counter = counter + 1 ## LOAD THE TEST DATASET movie_list = mqa.get_split_movies(split='test') story, qa = mqa.get_story_qa_data('test', 'split_plot') with open('data/test.csv', mode='w') as test_file: employee_writer = csv.writer(test_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) employee_writer.writerow(['', 'video-id', 'fold-ind', 'startphrase', 'sent1', 'sent2', 'gold-source', 'ending0', 'ending1', 'ending2', 'ending3', 'ending4', 'label']) counter = 0 for q in qa: movie_idx = q[4] movie_story = ', '.join(story[movie_idx]) question = q[1] ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) index = 0 movie_align = '' sim_mtx = [] comparison_metric = question + ans_1 + ans_2 + ans_3 + ans_4 + ans_5 for sen in story[movie_idx]: sim_mtx.append(web_model.predict([(sen, comparison_metric)]).item()) top_sentences = np.asarray(sim_mtx).argsort()[-num_sentences:][::-1] for a in top_sentences: movie_align = movie_align + " ".join(story[movie_idx][a].split("\n")) combined = " ".join(movie_align.split("\n")) + question employee_writer.writerow([counter, movie_idx, counter, combined, movie_align, question, movie_align, ans_1, ans_2, ans_3, ans_4, ans_5]) counter = counter + 1
5,040
42.08547
170
py
fat-albert
fat-albert-master/bert/datasets/MovieQA/sem_align_eval_val.py
import config as cfg import story_loader import data_loader import csv import numpy as np from semantic_text_similarity.models import WebBertSimilarity from semantic_text_similarity.models import ClinicalBertSimilarity web_model = WebBertSimilarity(device='cuda', batch_size=100) #defaults to GPU prediction clinical_model = ClinicalBertSimilarity(device='cuda', batch_size=100) #defaults to GPU prediction num_sentences = 6 mqa = data_loader.DataLoader() def stringReplace(input): output = input if not output: output = 'None' return output ## LOAD THE VAL DATASET and WEB movie_list = mqa.get_split_movies(split='val') story, qa = mqa.get_story_qa_data('val', 'split_plot') with open('sem_align_eval/val_com.csv', mode='w') as out_file: file_writer = csv.writer(out_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) file_writer.writerow(['', 'plot_align' , 'similarity_align']) counter = 0 for q in qa: movie_idx = q[4] question = q[1] ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) index = 0 movie_align = '' sim_mtx = [] comparison_metric = question + ans_1 + ans_2 + ans_3 + ans_4 + ans_5 sen_mtx = [] for sen in story[movie_idx]: sen_mtx.append((sen,comparison_metric)) sim_mtx_w = web_model.predict(sen_mtx) sim_mtx_c = clinical_model.predict(sen_mtx) sim_mtx = np.divide(sim_mtx_w,np.mean(sim_mtx_w)) + np.divide(sim_mtx_c,np.mean(sim_mtx_c)) top_sentences = np.asarray(sim_mtx).argsort()[-num_sentences:][::-1] for a in top_sentences: movie_align = movie_align + " ".join(story[movie_idx][a].split("\n")) combined = " ".join(movie_align.split("\n")) print("COMB",counter) file_writer.writerow([counter, q[5], top_sentences]) correct_ans = q[3] counter = counter + 1 ## LOAD THE VAL DATASET and CLINICAL movie_list = mqa.get_split_movies(split='val') story, qa = mqa.get_story_qa_data('val', 'split_plot') with open('sem_align_eval/val_clin.csv', mode='w') as out_file: file_writer = csv.writer(out_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) file_writer.writerow(['', 'plot_align' , 'similarity_align']) counter = 0 for q in qa: movie_idx = q[4] question = q[1] ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) index = 0 movie_align = '' sim_mtx = [] comparison_metric = question + ans_1 + ans_2 + ans_3 + ans_4 + ans_5 sen_mtx = [] for sen in story[movie_idx]: sen_mtx.append((sen,comparison_metric)) #sim_mtx_w = web_model.predict(sen_mtx) sim_mtx = clinical_model.predict(sen_mtx) #sim_mtx = np.divide(sim_mtx_w,np.mean(sim_mtx_w)) + np.divide(sim_mtx_c,np.mean(sim_mtx_c)) top_sentences = np.asarray(sim_mtx).argsort()[-num_sentences:][::-1] for a in top_sentences: movie_align = movie_align + " ".join(story[movie_idx][a].split("\n")) combined = " ".join(movie_align.split("\n")) print("CLIN",counter) file_writer.writerow([counter, q[5], top_sentences]) correct_ans = q[3] counter = counter + 1 ## LOAD THE VAL DATASET and WEB movie_list = mqa.get_split_movies(split='val') story, qa = mqa.get_story_qa_data('val', 'split_plot') with open('sem_align_eval/val_web.csv', mode='w') as out_file: file_writer = csv.writer(out_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) file_writer.writerow(['', 'plot_align' , 'similarity_align']) counter = 0 for q in qa: movie_idx = q[4] question = q[1] ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) index = 0 movie_align = '' sim_mtx = [] comparison_metric = question + ans_1 + ans_2 + ans_3 + ans_4 + ans_5 sen_mtx = [] for sen in story[movie_idx]: sen_mtx.append((sen,comparison_metric)) sim_mtx = web_model.predict(sen_mtx) #sim_mtx_c = clinical_model.predict(sen_mtx) #sim_mtx = np.divide(sim_mtx_w,np.mean(sim_mtx_w)) + np.divide(sim_mtx_c,np.mean(sim_mtx_c)) top_sentences = np.asarray(sim_mtx).argsort()[-num_sentences:][::-1] for a in top_sentences: movie_align = movie_align + " ".join(story[movie_idx][a].split("\n")) combined = " ".join(movie_align.split("\n")) file_writer.writerow([counter, q[5], top_sentences]) print("WEB",counter) correct_ans = q[3] counter = counter + 1
5,009
39.403226
100
py
fat-albert
fat-albert-master/bert/datasets/MovieQA/semantic_alignment_fast.py
import config as cfg import story_loader import data_loader import csv import numpy as np from semantic_text_similarity.models import WebBertSimilarity web_model = WebBertSimilarity(device='cuda', batch_size=10) #defaults to GPU prediction num_sentences = 6 mqa = data_loader.DataLoader() def stringReplace(input): output = input if not output: output = 'None' return output ## LOAD THE TEST DATASET movie_list = mqa.get_split_movies(split='test') story, qa = mqa.get_story_qa_data('test', 'split_plot') with open('data/test.csv', mode='w') as test_file: employee_writer = csv.writer(test_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) employee_writer.writerow(['', 'video-id', 'fold-ind', 'startphrase', 'sent1', 'sent2', 'gold-source', 'ending0', 'ending1', 'ending2', 'ending3', 'ending4', 'label']) counter = 0 for q in qa: movie_idx = q[4] movie_story = ', '.join(story[movie_idx]) question = q[1] ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) index = 0 #story_split = movie_story.splitlines() movie_align = '' sim_mtx = [] comparison_metric = question + ans_1 + ans_2 + ans_3 + ans_4 + ans_5 sen_mtx = [] for sen in story[movie_idx]: sen_mtx.append((sen,comparison_metric)) #sim_mtx.append(web_model.predict([(sen, comparison_metric)]).item()) sim_mtx = web_model.predict(sen_mtx) top_sentences = np.asarray(sim_mtx).argsort()[-num_sentences:][::-1] print(top_sentences) for a in top_sentences: movie_align = movie_align + " ".join(story[movie_idx][a].split("\n")) combined = " ".join(movie_align.split("\n")) + question correct_ans = 0 employee_writer.writerow([counter, movie_idx, counter, combined, movie_align, question, movie_align, ans_1, ans_2, ans_3, ans_4, ans_5, correct_ans]) counter = counter + 1
2,116
37.490909
170
py
fat-albert
fat-albert-master/bert/datasets/MovieQA/plot_alignment.py
import config as cfg import story_loader import data_loader import csv mqa = data_loader.DataLoader() def stringReplace(input): output = input if not output: output = 'None' return output ## LOAD THE TRAINING DATASET movie_list = mqa.get_split_movies(split='train') story, qa = mqa.get_story_qa_data('train', 'split_plot') with open('data/aligned-train.csv', mode='w') as training_file: employee_writer = csv.writer(training_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) employee_writer.writerow(['', 'video-id', 'fold-ind', 'startphrase', 'sent1', 'sent2', 'gold-source', 'ending0', 'ending1', 'ending2', 'ending3', 'ending4', 'label']) counter = 0 for q in qa: movie_idx = q[4] movie_story = ', '.join(story[movie_idx]) question = q[1] index = 0 #story_split = movie_story.splitlines() movie_align = '' for a in q[5]: movie_align = movie_align + " ".join(story[movie_idx][a].split("\n")) combined = " ".join(movie_align.split("\n")) + question ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) correct_ans = q[3] employee_writer.writerow([counter, movie_idx, counter, combined, movie_align, question, movie_align, ans_1, ans_2, ans_3, ans_4, ans_5, correct_ans]) counter = counter + 1 ## LOAD THE VALIDATION DATASET movie_list = mqa.get_split_movies(split='val') story, qa = mqa.get_story_qa_data('val', 'split_plot') with open('data/aligned-val.csv', mode='w') as val_file: employee_writer = csv.writer(val_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) employee_writer.writerow(['', 'video-id', 'fold-ind', 'startphrase', 'sent1', 'sent2', 'gold-source', 'ending0', 'ending1', 'ending2', 'ending3', 'ending4', 'label']) counter = 0 for q in qa: movie_idx = q[4] movie_story = ', '.join(story[movie_idx]) question = q[1] index = 0 #story_split = movie_story.splitlines() movie_align = '' for a in q[5]: movie_align = movie_align + " ".join(story[movie_idx][a].split("\n")) combined = " ".join(movie_align.split("\n")) + question ans_1 = stringReplace(q[2][0]) ans_2 = stringReplace(q[2][1]) ans_3 = stringReplace(q[2][2]) ans_4 = stringReplace(q[2][3]) ans_5 = stringReplace(q[2][4]) correct_ans = q[3] employee_writer.writerow([counter, movie_idx, counter, combined, movie_align, question, movie_align, ans_1, ans_2, ans_3, ans_4, ans_5, correct_ans]) counter = counter + 1
2,784
38.785714
170
py
fat-albert
fat-albert-master/bert/docs/source/conf.py
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- Project information ----------------------------------------------------- project = u'transformers' copyright = u'2019, huggingface' author = u'huggingface' # The short X.Y version version = u'' # The full version, including alpha/beta/rc tags release = u'2.2.0' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.napoleon', 'recommonmark', 'sphinx.ext.viewcode', 'sphinx_markdown_tables' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] # source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { 'analytics_id': 'UA-83738774-2' } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'transformersdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'transformers.tex', u'transformers Documentation', u'huggingface', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'transformers', u'transformers Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'transformers', u'transformers Documentation', author, 'transformers', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] def setup(app): app.add_stylesheet('css/huggingface.css') app.add_stylesheet('css/code-snippets.css') app.add_js_file('js/custom.js') # -- Extension configuration -------------------------------------------------
5,609
28.68254
79
py
fat-albert
fat-albert-master/bert/transformers/modeling_encoder_decoder.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Classes to support Encoder-Decoder architectures """ from __future__ import absolute_import, division, print_function, unicode_literals import logging import os import torch from torch import nn from .modeling_auto import AutoModel, AutoModelWithLMHead logger = logging.getLogger(__name__) class PreTrainedEncoderDecoder(nn.Module): r""" :class:`~transformers.PreTrainedEncoderDecoder` is a generic model class that will be instantiated as a transformer architecture with one of the base model classes of the library as encoder and (optionally) another one as decoder when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)` class method. """ def __init__(self, encoder, decoder): super(PreTrainedEncoderDecoder, self).__init__() self.encoder = encoder self.decoder = decoder @classmethod def from_pretrained( cls, encoder_pretrained_model_name_or_path=None, decoder_pretrained_model_name_or_path=None, *model_args, **kwargs ): r""" Instantiates an encoder and a decoder from one or two base classes of the library from pre-trained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you need to first set it back in training mode with `model.train()` Params: encoder_pretrained_model_name_or_path: information necessary to initiate the encoder. Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/encoder``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. decoder_pretrained_model_name_or_path: information necessary to initiate the decoder. Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/decoder``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments. Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. You can specify kwargs sepcific for the encoder and decoder by prefixing the key with `encoder_` and `decoder_` respectively. (e.g. ``decoder_output_attention=True``). The remaining kwargs will be passed to both encoders and decoders. Examples:: model = PreTrainedEncoderDecoder.from_pretained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert """ # keyword arguments come in 3 flavors: encoder-specific (prefixed by # `encoder_`), decoder-specific (prefixed by `decoder_`) and those # that apply to the model as a whole. # We let the specific kwargs override the common ones in case of conflict. kwargs_common = { argument: value for argument, value in kwargs.items() if not argument.startswith("encoder_") and not argument.startswith("decoder_") } kwargs_decoder = kwargs_common.copy() kwargs_encoder = kwargs_common.copy() kwargs_encoder.update( { argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") } ) kwargs_decoder.update( { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } ) # Load and initialize the encoder and decoder # The distinction between encoder and decoder at the model level is made # by the value of the flag `is_decoder` that we need to set correctly. encoder = kwargs_encoder.pop("model", None) if encoder is None: encoder = AutoModel.from_pretrained( encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder ) encoder.config.is_decoder = False decoder = kwargs_decoder.pop("model", None) if decoder is None: decoder = AutoModelWithLMHead.from_pretrained( decoder_pretrained_model_name_or_path, **kwargs_decoder ) decoder.config.is_decoder = True model = cls(encoder, decoder) return model def save_pretrained(self, save_directory): """ Save a Seq2Seq model and its configuration file in a format such that it can be loaded using `:func:`~transformers.PreTrainedEncoderDecoder.from_pretrained` We save the encoder' and decoder's parameters in two separate directories. """ self.encoder.save_pretrained(os.path.join(save_directory, "encoder")) self.decoder.save_pretrained(os.path.join(save_directory, "decoder")) def forward(self, encoder_input_ids, decoder_input_ids, **kwargs): """ The forward pass on a seq2eq depends what we are performing: - During training we perform one forward pass through both the encoder and decoder; - During prediction, we perform one forward pass through the encoder, and then perform several forward passes with the encoder's hidden state through the decoder to decode a full sequence. Therefore, we skip the forward pass on the encoder if an argument named `encoder_hidden_state` is passed to this function. Params: encoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)`` Indices of encoder input sequence tokens in the vocabulary. decoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)`` Indices of decoder input sequence tokens in the vocabulary. kwargs: (`optional`) Remaining dictionary of keyword arguments. """ # keyword arguments come in 3 flavors: encoder-specific (prefixed by # `encoder_`), decoder-specific (prefixed by `decoder_`) and those # that apply to the model as whole. # We let the specific kwargs override the common ones in case of conflict. kwargs_common = { argument: value for argument, value in kwargs.items() if not argument.startswith("encoder_") and not argument.startswith("decoder_") } kwargs_decoder = kwargs_common.copy() kwargs_encoder = kwargs_common.copy() kwargs_encoder.update( { argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") } ) kwargs_decoder.update( { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } ) # Encode if needed (training, first prediction pass) encoder_hidden_states = kwargs_encoder.pop("hidden_states", None) if encoder_hidden_states is None: encoder_outputs = self.encoder(encoder_input_ids, **kwargs_encoder) encoder_hidden_states = encoder_outputs[ 0 ] # output the last layer hidden state else: encoder_outputs = () # Decode kwargs_decoder["encoder_hidden_states"] = encoder_hidden_states kwargs_decoder["encoder_attention_mask"] = kwargs_encoder.get( "attention_mask", None ) decoder_outputs = self.decoder(decoder_input_ids, **kwargs_decoder) return decoder_outputs + encoder_outputs class Model2Model(PreTrainedEncoderDecoder): r""" :class:`~transformers.Model2Model` instantiates a Seq2Seq2 model where both of the encoder and decoder are of the same family. If the name of or that path to a pretrained model is specified the encoder and the decoder will be initialized with the pretrained weight (the cross-attention will be intialized randomly if its weights are not present). It is possible to override this behavior and initialize, say, the decoder randomly by creating it beforehand as follows config = BertConfig.from_pretrained() decoder = BertForMaskedLM(config) model = Model2Model.from_pretrained('bert-base-uncased', decoder_model=decoder) """ def __init__(self, *args, **kwargs): super(Model2Model, self).__init__(*args, **kwargs) self.tie_weights() def tie_weights(self): """ Tying the encoder and decoders' embeddings together. We need for each to get down to the embedding weights. However the different model classes are inconsistent to that respect: - BertModel: embeddings.word_embeddings - RoBERTa: embeddings.word_embeddings - XLMModel: embeddings - GPT2: wte - BertForMaskedLM: bert.embeddings.word_embeddings - RobertaForMaskedLM: roberta.embeddings.word_embeddings argument of the XEmbedding layer for each model, but it is "blocked" by a model-specific keyword (bert, )... """ # self._tie_or_clone_weights(self.encoder, self.decoder) pass @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs): if ( "bert" not in pretrained_model_name_or_path or "roberta" in pretrained_model_name_or_path or "distilbert" in pretrained_model_name_or_path ): raise ValueError("Only the Bert model is currently supported.") model = super(Model2Model, cls).from_pretrained( encoder_pretrained_model_name_or_path=pretrained_model_name_or_path, decoder_pretrained_model_name_or_path=pretrained_model_name_or_path, *args, **kwargs ) return model class Model2LSTM(PreTrainedEncoderDecoder): @classmethod def from_pretrained(cls, *args, **kwargs): if kwargs.get("decoder_model", None) is None: # We will create a randomly initilized LSTM model as decoder if "decoder_config" not in kwargs: raise ValueError( "To load an LSTM in Encoder-Decoder model, please supply either: " " - a torch.nn.LSTM model as `decoder_model` parameter (`decoder_model=lstm_model`), or" " - a dictionary of configuration parameters that will be used to initialize a" " torch.nn.LSTM model as `decoder_config` keyword argument. " " E.g. `decoder_config={'input_size': 768, 'hidden_size': 768, 'num_layers': 2}`" ) kwargs["decoder_model"] = torch.nn.LSTM(kwargs.pop("decoder_config")) model = super(Model2LSTM, cls).from_pretrained(*args, **kwargs) return model
15,827
49.893891
472
py
fat-albert
fat-albert-master/bert/transformers/tokenization_roberta.py
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for RoBERTa.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import sys import json import logging import os import regex as re from io import open from .tokenization_gpt2 import GPT2Tokenizer try: from functools import lru_cache except ImportError: # Just a dummy decorator to get the checks to run on python2 # because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now. def lru_cache(): return lambda func: func logger = logging.getLogger(__name__) VOCAB_FILES_NAMES = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } PRETRAINED_VOCAB_FILES_MAP = { 'vocab_file': { 'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json", 'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json", 'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-vocab.json", 'distilroberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-vocab.json", 'roberta-base-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json", 'roberta-large-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json", }, 'merges_file': { 'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt", 'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt", 'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-merges.txt", 'distilroberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-merges.txt", 'roberta-base-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt", 'roberta-large-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'roberta-base': 512, 'roberta-large': 512, 'roberta-large-mnli': 512, 'distilroberta-base': 512, 'roberta-base-openai-detector': 512, 'roberta-large-openai-detector': 512, } class RobertaTokenizer(GPT2Tokenizer): """ RoBERTa BPE tokenizer, derived from the GPT-2 tokenizer. Peculiarities: - Byte-level Byte-Pair-Encoding - Requires a space to start the input string => the encoding methods should be called with the ``add_prefix_space`` flag set to ``True``. Otherwise, this tokenizer ``encode`` and ``decode`` method will not conserve the absence of a space at the beginning of a string: `tokenizer.decode(tokenizer.encode("Hello")) = " Hello"` """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, merges_file, errors='replace', bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token='<pad>', mask_token='<mask>', **kwargs): super(RobertaTokenizer, self).__init__(vocab_file=vocab_file, merges_file=merges_file, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, **kwargs) self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format: single sequence: <s> X </s> pair of sequences: <s> A </s></s> B </s> """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0: list of ids (must not contain special tokens) token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids for sequence pairs already_has_special_tokens: (default False) Set to True if the token list is already formated with special tokens for the model Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError("You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model.") return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A RoBERTa sequence pair mask has the following format: 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 | first sequence | second sequence if token_ids_1 is None, only returns the first portion of the mask (0's). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep) * [0] + len(token_ids_1 + sep) * [1]
7,300
47.673333
120
py
fat-albert
fat-albert-master/bert/transformers/modeling_tf_albert.py
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 ALBERT model. """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import math import os import sys from io import open import numpy as np import tensorflow as tf from .configuration_albert import AlbertConfig from .modeling_tf_utils import TFPreTrainedModel, get_initializer from .modeling_tf_bert import ACT2FN, TFBertSelfAttention from .file_utils import add_start_docstrings import logging logger = logging.getLogger(__name__) TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { 'albert-base-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-tf_model.h5", 'albert-large-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-tf_model.h5", 'albert-xlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-tf_model.h5", 'albert-xxlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-tf_model.h5", 'albert-base-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-tf_model.h5", 'albert-large-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-tf_model.h5", 'albert-xlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-tf_model.h5", 'albert-xxlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-tf_model.h5", } class TFAlbertEmbeddings(tf.keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config, **kwargs): super(TFAlbertEmbeddings, self).__init__(**kwargs) self.config = config self.position_embeddings = tf.keras.layers.Embedding(config.max_position_embeddings, config.embedding_size, embeddings_initializer=get_initializer( self.config.initializer_range), name='position_embeddings') self.token_type_embeddings = tf.keras.layers.Embedding(config.type_vocab_size, config.embedding_size, embeddings_initializer=get_initializer( self.config.initializer_range), name='token_type_embeddings') # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def build(self, input_shape): """Build shared word embedding layer """ with tf.name_scope("word_embeddings"): # Create and initialize weights. The random normal initializer was chosen # arbitrarily, and works well. self.word_embeddings = self.add_weight( "weight", shape=[self.config.vocab_size, self.config.embedding_size], initializer=get_initializer(self.config.initializer_range)) super(TFAlbertEmbeddings, self).build(input_shape) def call(self, inputs, mode="embedding", training=False): """Get token embeddings of inputs. Args: inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids) mode: string, a valid value is one of "embedding" and "linear". Returns: outputs: (1) If mode == "embedding", output embedding tensor, float32 with shape [batch_size, length, embedding_size]; (2) mode == "linear", output linear tensor, float32 with shape [batch_size, length, vocab_size]. Raises: ValueError: if mode is not valid. Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ if mode == "embedding": return self._embedding(inputs, training=training) elif mode == "linear": return self._linear(inputs) else: raise ValueError("mode {} is not valid.".format(mode)) def _embedding(self, inputs, training=False): """Applies embedding based on inputs tensor.""" input_ids, position_ids, token_type_ids, inputs_embeds = inputs if input_ids is not None: input_shape = tf.shape(input_ids) else: input_shape = tf.shape(inputs_embeds)[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :] if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) if inputs_embeds is None: inputs_embeds = tf.gather(self.word_embeddings, input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings, training=training) return embeddings def _linear(self, inputs): """Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [batch_size, length, embedding_size] Returns: float32 tensor with shape [batch_size, length, vocab_size]. """ batch_size = tf.shape(inputs)[0] length = tf.shape(inputs)[1] x = tf.reshape(inputs, [-1, self.config.embedding_size]) logits = tf.matmul(x, self.word_embeddings, transpose_b=True) return tf.reshape(logits, [batch_size, length, self.config.vocab_size]) class TFAlbertSelfAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super(TFAlbertSelfAttention, self).__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.output_attentions = config.output_attentions self.num_attention_heads = config.num_attention_heads assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int( config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = tf.keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer( config.initializer_range), name='query') self.key = tf.keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer( config.initializer_range), name='key') self.value = tf.keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer( config.initializer_range), name='value') self.dropout = tf.keras.layers.Dropout( config.attention_probs_dropout_prob) def transpose_for_scores(self, x, batch_size): x = tf.reshape( x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, inputs, training=False): hidden_states, attention_mask, head_mask = inputs batch_size = tf.shape(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) # scale attention_scores dk = tf.cast(tf.shape(key_layer)[-1], tf.float32) attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = tf.nn.softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size)) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if self.output_attentions else ( context_layer,) return outputs class TFAlbertSelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super(TFAlbertSelfOutput, self).__init__(**kwargs) self.dense = tf.keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer( config.initializer_range), name='dense') self.LayerNorm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, inputs, training=False): hidden_states, input_tensor = inputs hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TFAlbertAttention(TFBertSelfAttention): def __init__(self, config, **kwargs): super(TFAlbertAttention, self).__init__(config, **kwargs) self.hidden_size = config.hidden_size self.dense = tf.keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer( config.initializer_range), name='dense') self.LayerNorm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name='LayerNorm') self.pruned_heads = set() def prune_heads(self, heads): raise NotImplementedError def call(self, inputs, training=False): input_tensor, attention_mask, head_mask = inputs batch_size = tf.shape(input_tensor)[0] mixed_query_layer = self.query(input_tensor) mixed_key_layer = self.key(input_tensor) mixed_value_layer = self.value(input_tensor) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) # scale attention_scores dk = tf.cast(tf.shape(key_layer)[-1], tf.float32) attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = tf.nn.softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size)) # (batch_size, seq_len_q, all_head_size) self_outputs = (context_layer, attention_probs) if self.output_attentions else ( context_layer,) hidden_states = self_outputs[0] hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) attention_output = self.LayerNorm(hidden_states + input_tensor) # add attentions if we output them outputs = (attention_output,) + self_outputs[1:] return outputs class TFAlbertLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super(TFAlbertLayer, self).__init__(**kwargs) self.attention = TFAlbertAttention(config, name='attention') self.ffn = tf.keras.layers.Dense(config.intermediate_size, kernel_initializer=get_initializer( config.initializer_range), name='ffn') if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act self.ffn_output = tf.keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer( config.initializer_range), name='ffn_output') self.full_layer_layer_norm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name='full_layer_layer_norm') self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, inputs, training=False): hidden_states, attention_mask, head_mask = inputs attention_outputs = self.attention( [hidden_states, attention_mask, head_mask], training=training) ffn_output = self.ffn(attention_outputs[0]) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(ffn_output) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.full_layer_layer_norm( ffn_output + attention_outputs[0]) # add attentions if we output them outputs = (hidden_states,) + attention_outputs[1:] return outputs class TFAlbertLayerGroup(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super(TFAlbertLayerGroup, self).__init__(**kwargs) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.albert_layers = [TFAlbertLayer(config, name="albert_layers_._{}".format( i)) for i in range(config.inner_group_num)] def call(self, inputs, training=False): hidden_states, attention_mask, head_mask = inputs layer_hidden_states = () layer_attentions = () for layer_index, albert_layer in enumerate(self.albert_layers): layer_output = albert_layer( [hidden_states, attention_mask, head_mask[layer_index]], training=training) hidden_states = layer_output[0] if self.output_attentions: layer_attentions = layer_attentions + (layer_output[1],) if self.output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (layer_hidden_states,) if self.output_attentions: outputs = outputs + (layer_attentions,) # last-layer hidden state, (layer hidden states), (layer attentions) return outputs class TFAlbertTransformer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super(TFAlbertTransformer, self).__init__(**kwargs) self.config = config self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.embedding_hidden_mapping_in = tf.keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer( config.initializer_range), name='embedding_hidden_mapping_in') self.albert_layer_groups = [TFAlbertLayerGroup( config, name="albert_layer_groups_._{}".format(i)) for i in range(config.num_hidden_groups)] def call(self, inputs, training=False): hidden_states, attention_mask, head_mask = inputs hidden_states = self.embedding_hidden_mapping_in(hidden_states) all_attentions = () if self.output_hidden_states: all_hidden_states = (hidden_states,) for i in range(self.config.num_hidden_layers): # Number of layers in a hidden group layers_per_group = int( self.config.num_hidden_layers / self.config.num_hidden_groups) # Index of the hidden group group_idx = int( i / (self.config.num_hidden_layers / self.config.num_hidden_groups)) layer_group_output = self.albert_layer_groups[group_idx]( [hidden_states, attention_mask, head_mask[group_idx*layers_per_group:(group_idx+1)*layers_per_group]], training=training) hidden_states = layer_group_output[0] if self.output_attentions: all_attentions = all_attentions + layer_group_output[-1] if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) # last-layer hidden state, (all hidden states), (all attentions) return outputs class TFAlbertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = AlbertConfig pretrained_model_archive_map = TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "albert" class TFAlbertMLMHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super(TFAlbertMLMHead, self).__init__(**kwargs) self.vocab_size = config.vocab_size self.dense = tf.keras.layers.Dense(config.embedding_size, kernel_initializer=get_initializer( config.initializer_range), name='dense') if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act self.LayerNorm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name='LayerNorm') # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer='zeros', trainable=True, name='bias') self.decoder_bias = self.add_weight(shape=(self.vocab_size,), initializer='zeros', trainable=True, name='decoder/bias') super(TFAlbertMLMHead, self).build(input_shape) def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(hidden_states) hidden_states = self.decoder(hidden_states, mode="linear") + self.decoder_bias hidden_states = hidden_states + self.bias return hidden_states ALBERT_START_DOCSTRING = r""" The ALBERT model was proposed in `ALBERT: A Lite BERT for Self-supervised Learning of Language Representations`_ by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut. It presents two parameter-reduction techniques to lower memory consumption and increase the trainig speed of BERT. This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. _`ALBERT: A Lite BERT for Self-supervised Learning of Language Representations`: https://arxiv.org/abs/1909.11942 .. _`tf.keras.Model`: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model Note on the model inputs: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with input_ids only and nothing else: `model(inputs_ids) - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associaed to the input names given in the docstring: `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` Parameters: config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ ALBERT_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. To match pre-training, ALBERT input sequence should be formatted with [CLS] and [SEP] tokens as follows: (a) For sequence pairs: ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` (b) For single sequences: ``tokens: [CLS] the dog is hairy . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0`` Albert is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`transformers.AlbertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token (see `ALBERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). **position_ids**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. """ @add_start_docstrings("The bare Albert Model transformer outputing raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ALBERT_INPUTS_DOCSTRING) class TFAlbertModel(TFAlbertPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the output of the last layer of the model. **pooler_output**: ``tf.Tensor`` of shape ``(batch_size, hidden_size)`` Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during Albert pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import AlbertTokenizer, TFAlbertModel tokenizer = AlbertTokenizer.from_pretrained('bert-base-uncased') model = TFAlbertModel.from_pretrained('bert-base-uncased') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config, **kwargs): super(TFAlbertModel, self).__init__(config, **kwargs) self.num_hidden_layers = config.num_hidden_layers self.embeddings = TFAlbertEmbeddings(config, name="embeddings") self.encoder = TFAlbertTransformer(config, name="encoder") self.pooler = tf.keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer( config.initializer_range), activation='tanh', name='pooler') def get_input_embeddings(self): return self.embeddings def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds assert len(inputs) <= 6, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) assert len(inputs) <= 6, "Too many inputs." else: input_ids = inputs if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = tf.shape(input_ids) elif inputs_embeds is not None: input_shape = inputs_embeds.shape[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, tf.float32) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if not head_mask is None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) embedding_output = self.embeddings( [input_ids, position_ids, token_type_ids, inputs_embeds], training=training) encoder_outputs = self.encoder( [embedding_output, extended_attention_mask, head_mask], training=training) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output[:, 0]) # add hidden_states and attentions if they are here outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # sequence_output, pooled_output, (hidden_states), (attentions) return outputs @add_start_docstrings("""Albert Model with a `language modeling` head on top. """, ALBERT_START_DOCSTRING, ALBERT_INPUTS_DOCSTRING) class TFAlbertForMaskedLM(TFAlbertPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **prediction_scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import AlbertTokenizer, TFAlbertForMaskedLM tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = TFAlbertForMaskedLM.from_pretrained('albert-base-v2') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) prediction_scores = outputs[0] """ def __init__(self, config, *inputs, **kwargs): super(TFAlbertForMaskedLM, self).__init__(config, *inputs, **kwargs) self.albert = TFAlbertModel(config, name='albert') self.predictions = TFAlbertMLMHead( config, self.albert.embeddings, name='predictions') def get_output_embeddings(self): return self.albert.embeddings def call(self, inputs, **kwargs): outputs = self.albert(inputs, **kwargs) sequence_output = outputs[0] prediction_scores = self.predictions( sequence_output, training=kwargs.get('training', False)) # Add hidden states and attention if they are here outputs = (prediction_scores,) + outputs[2:] return outputs # prediction_scores, (hidden_states), (attentions) @add_start_docstrings("""Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ALBERT_START_DOCSTRING, ALBERT_INPUTS_DOCSTRING) class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **logits**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import tensorflow as tf from transformers import AlbertTokenizer, TFAlbertForSequenceClassification tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = TFAlbertForSequenceClassification.from_pretrained('albert-base-v2') input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 outputs = model(input_ids) logits = outputs[0] """ def __init__(self, config, *inputs, **kwargs): super(TFAlbertForSequenceClassification, self).__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertModel(config, name='albert') self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') def call(self, inputs, **kwargs): outputs = self.albert(inputs, **kwargs) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=kwargs.get('training', False)) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here return outputs # logits, (hidden_states), (attentions)
39,735
48.732165
193
py
fat-albert
fat-albert-master/bert/transformers/configuration_xlm.py
# coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XLM configuration """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import sys from io import open from .configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) XLM_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-config.json", 'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-config.json", 'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-config.json", 'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-config.json", 'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-config.json", 'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-config.json", 'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-config.json", 'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-config.json", 'xlm-mlm-17-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-config.json", 'xlm-mlm-100-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-config.json", } class XLMConfig(PretrainedConfig): """Configuration class to store the configuration of a `XLMModel`. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `XLMModel`. d_model: Size of the encoder layers and the pooler layer. n_layer: Number of hidden layers in the Transformer encoder. n_head: Number of attention heads for each attention layer in the Transformer encoder. d_inner: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. ff_activation: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. untie_r: untie relative position biases attn_type: 'bi' for XLM, 'uni' for Transformer-XL dropout: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps: The epsilon used by LayerNorm. dropout: float, dropout rate. init: str, the initialization scheme, either "normal" or "uniform". init_range: float, initialize the parameters with a uniform distribution in [-init_range, init_range]. Only effective when init="uniform". init_std: float, initialize the parameters with a normal distribution with mean 0 and stddev init_std. Only effective when init="normal". mem_len: int, the number of tokens to cache. reuse_len: int, the number of tokens in the currect batch to be cached and reused in the future. bi_data: bool, whether to use bidirectional input pipeline. Usually set to True during pretraining and False during finetuning. clamp_len: int, clamp all relative distances larger than clamp_len. -1 means no clamping. same_length: bool, whether to use the same attention length for each token. """ pretrained_config_archive_map = XLM_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__(self, vocab_size_or_config_json_file=30145, emb_dim=2048, n_layers=12, n_heads=16, dropout=0.1, attention_dropout=0.1, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=1, use_lang_emb=True, max_position_embeddings=512, embed_init_std=2048 ** -0.5, layer_norm_eps=1e-12, init_std=0.02, bos_index=0, eos_index=1, pad_index=2, unk_index=3, mask_index=5, is_encoder=True, finetuning_task=None, num_labels=2, summary_type='first', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, start_n_top=5, end_n_top=5, **kwargs): """Constructs XLMConfig. """ super(XLMConfig, self).__init__(**kwargs) if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 and isinstance(vocab_size_or_config_json_file, unicode)): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.n_words = vocab_size_or_config_json_file self.emb_dim = emb_dim self.n_layers = n_layers self.n_heads = n_heads self.dropout = dropout self.attention_dropout = attention_dropout self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.use_lang_emb = use_lang_emb self.layer_norm_eps = layer_norm_eps self.bos_index = bos_index self.eos_index = eos_index self.pad_index = pad_index self.unk_index = unk_index self.mask_index = mask_index self.is_encoder = is_encoder self.max_position_embeddings = max_position_embeddings self.embed_init_std = embed_init_std self.init_std = init_std self.finetuning_task = finetuning_task self.num_labels = num_labels self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_proj_to_labels = summary_proj_to_labels self.summary_first_dropout = summary_first_dropout self.start_n_top = start_n_top self.end_n_top = end_n_top else: raise ValueError("First argument must be either a vocabulary size (int)" " or the path to a pretrained model config file (str)") @property def vocab_size(self): return self.n_words @vocab_size.setter def vocab_size(self, value): self.n_words = value @property def hidden_size(self): return self.emb_dim @property def num_attention_heads(self): return self.n_heads @property def num_hidden_layers(self): return self.n_layers
8,112
43.576923
121
py
fat-albert
fat-albert-master/bert/transformers/optimization.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import logging import math import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR logger = logging.getLogger(__name__) def get_constant_schedule(optimizer, last_epoch=-1): """ Create a schedule with a constant learning rate. """ return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) def get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1): """ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and 1. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1. return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly after linearly increasing during a warmup period. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=.5, last_epoch=-1): """ Create a schedule with a learning rate that decreases following the values of the cosine function between 0 and `pi * cycles` after a warmup period during which it increases linearly between 0 and 1. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0., 0.5 * (1. + math.cos(math.pi * float(num_cycles) * 2. * progress))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=1., last_epoch=-1): """ Create a schedule with a learning rate that decreases following the values of the cosine function with several hard restarts, after a warmup period during which it increases linearly between 0 and 1. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) if progress >= 1.: return 0. return max(0., 0.5 * (1. + math.cos(math.pi * ((float(num_cycles) * progress) % 1.)))) return LambdaLR(optimizer, lr_lambda, last_epoch) class AdamW(Optimizer): """ Implements Adam algorithm with weight decay fix. Parameters: lr (float): learning rate. Default 1e-3. betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999) eps (float): Adams epsilon. Default: 1e-6 weight_decay (float): Weight decay. Default: 0.0 correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True. """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True): if lr < 0.0: raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1])) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias) super(AdamW, self).__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time exp_avg.mul_(beta1).add_(1.0 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad) denom = exp_avg_sq.sqrt().add_(group['eps']) step_size = group['lr'] if group['correct_bias']: # No bias correction for Bert bias_correction1 = 1.0 - beta1 ** state['step'] bias_correction2 = 1.0 - beta2 ** state['step'] step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 p.data.addcdiv_(-step_size, exp_avg, denom) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. # Add weight decay at the end (fixed version) if group['weight_decay'] > 0.0: p.data.add_(-group['lr'] * group['weight_decay'], p.data) return loss
7,658
44.052941
134
py
fat-albert
fat-albert-master/bert/transformers/configuration_xlnet.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XLNet configuration """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import sys from io import open from .configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'xlnet-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-config.json", 'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-config.json", } class XLNetConfig(PretrainedConfig): """Configuration class to store the configuration of a ``XLNetModel``. Args: vocab_size_or_config_json_file: Vocabulary size of ``inputs_ids`` in ``XLNetModel``. d_model: Size of the encoder layers and the pooler layer. n_layer: Number of hidden layers in the Transformer encoder. n_head: Number of attention heads for each attention layer in the Transformer encoder. d_inner: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. ff_activation: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. untie_r: untie relative position biases attn_type: 'bi' for XLNet, 'uni' for Transformer-XL dropout: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps: The epsilon used by LayerNorm. dropout: float, dropout rate. init: str, the initialization scheme, either "normal" or "uniform". init_range: float, initialize the parameters with a uniform distribution in [-init_range, init_range]. Only effective when init="uniform". init_std: float, initialize the parameters with a normal distribution with mean 0 and stddev init_std. Only effective when init="normal". mem_len: int, the number of tokens to cache. reuse_len: int, the number of tokens in the currect batch to be cached and reused in the future. bi_data: bool, whether to use bidirectional input pipeline. Usually set to True during pretraining and False during finetuning. clamp_len: int, clamp all relative distances larger than clamp_len. -1 means no clamping. same_length: bool, whether to use the same attention length for each token. finetuning_task: name of the glue task on which the model was fine-tuned if any """ pretrained_config_archive_map = XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__(self, vocab_size_or_config_json_file=32000, d_model=1024, n_layer=24, n_head=16, d_inner=4096, max_position_embeddings=512, ff_activation="gelu", untie_r=True, attn_type="bi", initializer_range=0.02, layer_norm_eps=1e-12, dropout=0.1, mem_len=None, reuse_len=None, bi_data=False, clamp_len=-1, same_length=False, finetuning_task=None, num_labels=2, summary_type='last', summary_use_proj=True, summary_activation='tanh', summary_last_dropout=0.1, start_n_top=5, end_n_top=5, **kwargs): """Constructs XLNetConfig. """ super(XLNetConfig, self).__init__(**kwargs) if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 and isinstance(vocab_size_or_config_json_file, unicode)): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): setattr(config, key, value) elif isinstance(vocab_size_or_config_json_file, int): self.n_token = vocab_size_or_config_json_file self.d_model = d_model self.n_layer = n_layer self.n_head = n_head assert d_model % n_head == 0 self.d_head = d_model // n_head self.ff_activation = ff_activation self.d_inner = d_inner self.untie_r = untie_r self.attn_type = attn_type self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.dropout = dropout self.mem_len = mem_len self.reuse_len = reuse_len self.bi_data = bi_data self.clamp_len = clamp_len self.same_length = same_length self.finetuning_task = finetuning_task self.num_labels = num_labels self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_last_dropout = summary_last_dropout self.start_n_top = start_n_top self.end_n_top = end_n_top else: raise ValueError("First argument must be either a vocabulary size (int)" " or the path to a pretrained model config file (str)") @property def max_position_embeddings(self): return -1 @property def vocab_size(self): return self.n_token @vocab_size.setter def vocab_size(self, value): self.n_token = value @property def hidden_size(self): return self.d_model @property def num_attention_heads(self): return self.n_head @property def num_hidden_layers(self): return self.n_layer
6,805
38.80117
110
py
fat-albert
fat-albert-master/bert/transformers/__main__.py
# coding: utf8 def main(): import sys if (len(sys.argv) < 4 or len(sys.argv) > 6) or sys.argv[1] not in ["bert", "gpt", "transfo_xl", "gpt2", "xlnet", "xlm"]: print( "This command line utility let you convert original (author released) model checkpoint to pytorch.\n" "It should be used as one of: \n" ">> transformers bert TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT, \n" ">> transformers gpt OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG], \n" ">> transformers transfo_xl TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG] or \n" ">> transformers gpt2 TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG] or \n" ">> transformers xlnet TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT [FINETUNING_TASK_NAME] or \n" ">> transformers xlm XLM_CHECKPOINT_PATH PYTORCH_DUMP_OUTPUT") else: if sys.argv[1] == "bert": try: from .convert_bert_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: print("transformers can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise if len(sys.argv) != 5: # pylint: disable=line-too-long print("Should be used as `transformers bert TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`") else: PYTORCH_DUMP_OUTPUT = sys.argv.pop() TF_CONFIG = sys.argv.pop() TF_CHECKPOINT = sys.argv.pop() convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT) elif sys.argv[1] == "gpt": from .convert_openai_original_tf_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch if len(sys.argv) < 4 or len(sys.argv) > 5: # pylint: disable=line-too-long print("Should be used as `transformers gpt OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`") else: OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2] PYTORCH_DUMP_OUTPUT = sys.argv[3] if len(sys.argv) == 5: OPENAI_GPT_CONFIG = sys.argv[4] else: OPENAI_GPT_CONFIG = "" convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH, OPENAI_GPT_CONFIG, PYTORCH_DUMP_OUTPUT) elif sys.argv[1] == "transfo_xl": try: from .convert_transfo_xl_original_tf_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch except ImportError: print("transformers can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise if len(sys.argv) < 4 or len(sys.argv) > 5: # pylint: disable=line-too-long print("Should be used as `transformers transfo_xl TF_CHECKPOINT/TF_DATASET_FILE PYTORCH_DUMP_OUTPUT [TF_CONFIG]`") else: if 'ckpt' in sys.argv[2].lower(): TF_CHECKPOINT = sys.argv[2] TF_DATASET_FILE = "" else: TF_DATASET_FILE = sys.argv[2] TF_CHECKPOINT = "" PYTORCH_DUMP_OUTPUT = sys.argv[3] if len(sys.argv) == 5: TF_CONFIG = sys.argv[4] else: TF_CONFIG = "" convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE) elif sys.argv[1] == "gpt2": try: from .convert_gpt2_original_tf_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch except ImportError: print("transformers can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise if len(sys.argv) < 4 or len(sys.argv) > 5: # pylint: disable=line-too-long print("Should be used as `transformers gpt2 TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [TF_CONFIG]`") else: TF_CHECKPOINT = sys.argv[2] PYTORCH_DUMP_OUTPUT = sys.argv[3] if len(sys.argv) == 5: TF_CONFIG = sys.argv[4] else: TF_CONFIG = "" convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT) elif sys.argv[1] == "xlnet": try: from .convert_xlnet_original_tf_checkpoint_to_pytorch import convert_xlnet_checkpoint_to_pytorch except ImportError: print("transformers can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise if len(sys.argv) < 5 or len(sys.argv) > 6: # pylint: disable=line-too-long print("Should be used as `transformers xlnet TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT [FINETUNING_TASK_NAME]`") else: TF_CHECKPOINT = sys.argv[2] TF_CONFIG = sys.argv[3] PYTORCH_DUMP_OUTPUT = sys.argv[4] if len(sys.argv) == 6: FINETUNING_TASK = sys.argv[5] else: FINETUNING_TASK = None convert_xlnet_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, FINETUNING_TASK) elif sys.argv[1] == "xlm": from .convert_xlm_original_pytorch_checkpoint_to_pytorch import convert_xlm_checkpoint_to_pytorch if len(sys.argv) != 4: # pylint: disable=line-too-long print("Should be used as `transformers xlm XLM_CHECKPOINT_PATH PYTORCH_DUMP_OUTPUT`") else: XLM_CHECKPOINT_PATH = sys.argv[2] PYTORCH_DUMP_OUTPUT = sys.argv[3] convert_xlm_checkpoint_to_pytorch(XLM_CHECKPOINT_PATH, PYTORCH_DUMP_OUTPUT) if __name__ == '__main__': main()
7,085
53.507692
135
py