diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..625d16dd19962842ac81aee1e123b4f60e937a6b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -57,3 +57,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text # Video files - compressed *.mp4 filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text +hf_datas/llava_v1_5_mix665k.json filter=lfs diff=lfs merge=lfs -text +playground/data.z52 filter=lfs diff=lfs merge=lfs -text +playground/data.z40 filter=lfs diff=lfs merge=lfs -text +playground/data.z05 filter=lfs diff=lfs merge=lfs -text +playground/data.z74 filter=lfs diff=lfs merge=lfs -text +playground/data.z01 filter=lfs diff=lfs merge=lfs -text +playground/data.z60 filter=lfs diff=lfs merge=lfs -text +playground/data.z65 filter=lfs diff=lfs merge=lfs -text +playground/data.z37 filter=lfs diff=lfs merge=lfs -text +playground/data.z62 filter=lfs diff=lfs merge=lfs -text +playground/data.z63 filter=lfs diff=lfs merge=lfs -text +playground/data.z21 filter=lfs diff=lfs merge=lfs -text +playground/data.z53 filter=lfs diff=lfs merge=lfs -text +playground/data.z41 filter=lfs diff=lfs merge=lfs -text +playground/data.z07 filter=lfs diff=lfs merge=lfs -text +playground/data.z35 filter=lfs diff=lfs merge=lfs -text +playground/data.z06 filter=lfs diff=lfs merge=lfs -text +playground/data.z50 filter=lfs diff=lfs merge=lfs -text +playground/data.z12 filter=lfs diff=lfs merge=lfs -text +playground/data.z15 filter=lfs diff=lfs merge=lfs -text +playground/data.z18 filter=lfs diff=lfs merge=lfs -text +playground/data.z23 filter=lfs diff=lfs merge=lfs -text +playground/data.z09 filter=lfs diff=lfs merge=lfs -text +playground/data.z11 filter=lfs diff=lfs merge=lfs -text +playground/data.z38 filter=lfs diff=lfs merge=lfs -text +playground/data.z33 filter=lfs diff=lfs merge=lfs -text +playground/data.z70 filter=lfs diff=lfs merge=lfs -text +playground/data.z02 filter=lfs diff=lfs merge=lfs -text +playground/data.z51 filter=lfs diff=lfs merge=lfs -text +playground/data.z73 filter=lfs diff=lfs merge=lfs -text +playground/data.z42 filter=lfs diff=lfs merge=lfs -text +playground/data.z30 filter=lfs diff=lfs merge=lfs -text +playground/data.z64 filter=lfs diff=lfs merge=lfs -text +playground/data.z46 filter=lfs diff=lfs merge=lfs -text +playground/data.z27 filter=lfs diff=lfs merge=lfs -text +playground/data.z19 filter=lfs diff=lfs merge=lfs -text +playground/data.z04 filter=lfs diff=lfs merge=lfs -text +playground/data.z28 filter=lfs diff=lfs merge=lfs -text +playground/data.z48 filter=lfs diff=lfs merge=lfs -text +playground/data.z16 filter=lfs diff=lfs merge=lfs -text +playground/data.z71 filter=lfs diff=lfs merge=lfs -text +playground/data.z24 filter=lfs diff=lfs merge=lfs -text +playground/data.z69 filter=lfs diff=lfs merge=lfs -text +playground/data.z61 filter=lfs diff=lfs merge=lfs -text +playground/data.z68 filter=lfs diff=lfs merge=lfs -text +playground/data.z14 filter=lfs diff=lfs merge=lfs -text +playground/data.z58 filter=lfs diff=lfs merge=lfs -text +playground/data.z75 filter=lfs diff=lfs merge=lfs -text +playground/data.z44 filter=lfs diff=lfs merge=lfs -text +playground/data.z49 filter=lfs diff=lfs merge=lfs -text +playground/data.z10 filter=lfs diff=lfs merge=lfs -text +playground/data.z43 filter=lfs diff=lfs merge=lfs -text +playground/data.z39 filter=lfs diff=lfs merge=lfs -text +playground/data.z26 filter=lfs diff=lfs merge=lfs -text +playground/data.z55 filter=lfs diff=lfs merge=lfs -text +playground/data.z36 filter=lfs diff=lfs merge=lfs -text +playground/data.z08 filter=lfs diff=lfs merge=lfs -text +playground/data.z20 filter=lfs diff=lfs merge=lfs -text +playground/data.z67 filter=lfs diff=lfs merge=lfs -text +playground/data.z25 filter=lfs diff=lfs merge=lfs -text +playground/data.z31 filter=lfs diff=lfs merge=lfs -text +playground/data.z45 filter=lfs diff=lfs merge=lfs -text +playground/data.z47 filter=lfs diff=lfs merge=lfs -text +playground/data.z57 filter=lfs diff=lfs merge=lfs -text diff --git a/hf_datas/llava_v1_5_mix665k.json b/hf_datas/llava_v1_5_mix665k.json new file mode 100644 index 0000000000000000000000000000000000000000..06a946db8c25cce3207ec2194414e713b81d9c50 --- /dev/null +++ b/hf_datas/llava_v1_5_mix665k.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce959ce6e23073ee1cd1a8a2ef1c633768c10d4174327b8b2dc7113b91af6cf8 +size 1029887963 diff --git a/hf_models/clip-vit-large-patch14-336/pytorch_model.bin b/hf_models/clip-vit-large-patch14-336/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..d599938f87909461098a7856a49c4842c84fc214 --- /dev/null +++ b/hf_models/clip-vit-large-patch14-336/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6032c2e0caae3dc2d4fba35535fa6307dbb49df59c7e182b1bc4b3329b81801 +size 1711974081 diff --git a/hf_models/clip-vit-large-patch14-336/tf_model.h5 b/hf_models/clip-vit-large-patch14-336/tf_model.h5 new file mode 100644 index 0000000000000000000000000000000000000000..b9b9abad1937d9d1b0aaaa53e647ad468bab050b --- /dev/null +++ b/hf_models/clip-vit-large-patch14-336/tf_model.h5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d12828ca8f0f3c92194f277b7d893da7f2fb7824d0b99dedb305eb48eb46bb7f +size 1712454232 diff --git a/hf_models/vicuna-7b-v1.5/pytorch_model-00001-of-00002.bin b/hf_models/vicuna-7b-v1.5/pytorch_model-00001-of-00002.bin new file mode 100644 index 0000000000000000000000000000000000000000..b79c622eee90796495a68c915d2919428bd14aea --- /dev/null +++ b/hf_models/vicuna-7b-v1.5/pytorch_model-00001-of-00002.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4133d2fcc5f31286881ea50806d95b721d016b533036a99dedce3f8fe88520e6 +size 9976634558 diff --git a/hf_models/vicuna-7b-v1.5/pytorch_model-00002-of-00002.bin b/hf_models/vicuna-7b-v1.5/pytorch_model-00002-of-00002.bin new file mode 100644 index 0000000000000000000000000000000000000000..e563335b51cea5ebf21447b9557b7d3e2dc976b2 --- /dev/null +++ b/hf_models/vicuna-7b-v1.5/pytorch_model-00002-of-00002.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d261d3c35e92d3070d1e61ed821ebfca812a847d2a880757d82728acf005c5ac +size 3500315539 diff --git a/llava/__pycache__/__init__.cpython-310.pyc b/llava/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..252c38c9975dc3283f447c958c3e1c3c2de24e4f Binary files /dev/null and b/llava/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava/__pycache__/constants.cpython-310.pyc b/llava/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fca26bab1c43798d7c79a772c69ed8d00b732f2 Binary files /dev/null and b/llava/__pycache__/constants.cpython-310.pyc differ diff --git a/llava/__pycache__/conversation.cpython-310.pyc b/llava/__pycache__/conversation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2a864c85f24e25bdf539732a055c4a8fa330b50 Binary files /dev/null and b/llava/__pycache__/conversation.cpython-310.pyc differ diff --git a/llava/__pycache__/mm_utils.cpython-310.pyc b/llava/__pycache__/mm_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88145b45946d5f9e507fcf6286de1df030512b01 Binary files /dev/null and b/llava/__pycache__/mm_utils.cpython-310.pyc differ diff --git a/llava/__pycache__/utils.cpython-310.pyc b/llava/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..636a866636dc57f9e7da34c33903054d35561676 Binary files /dev/null and b/llava/__pycache__/utils.cpython-310.pyc differ diff --git a/llava/eval/__pycache__/m4c_evaluator.cpython-310.pyc b/llava/eval/__pycache__/m4c_evaluator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6db488a68666591affeae9016b0fc5cb522f0845 Binary files /dev/null and b/llava/eval/__pycache__/m4c_evaluator.cpython-310.pyc differ diff --git a/llava/eval/__pycache__/model_vqa_loader.cpython-310.pyc b/llava/eval/__pycache__/model_vqa_loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3f6c6bce2d48b5bdeebb0a9c16f92d48a42870a Binary files /dev/null and b/llava/eval/__pycache__/model_vqa_loader.cpython-310.pyc differ diff --git a/llava/eval/__pycache__/model_vqa_mmbench.cpython-310.pyc b/llava/eval/__pycache__/model_vqa_mmbench.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2d34053529bc4034f151d82714a927877938daf Binary files /dev/null and b/llava/eval/__pycache__/model_vqa_mmbench.cpython-310.pyc differ diff --git a/llava/eval/__pycache__/model_vqa_science.cpython-310.pyc b/llava/eval/__pycache__/model_vqa_science.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8751ba0a93213f209c349e5fb77253a5727f7b52 Binary files /dev/null and b/llava/eval/__pycache__/model_vqa_science.cpython-310.pyc differ diff --git a/llava/eval/eval_science_qa.py b/llava/eval/eval_science_qa.py new file mode 100644 index 0000000000000000000000000000000000000000..ccf206bbd7a5d6376eef82d61b3ef8bbe0f71c6c --- /dev/null +++ b/llava/eval/eval_science_qa.py @@ -0,0 +1,114 @@ +import argparse +import json +import os +import re +import random + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--base-dir', type=str) + parser.add_argument('--result-file', type=str) + parser.add_argument('--output-file', type=str) + parser.add_argument('--output-result', type=str) + parser.add_argument('--split', type=str, default='test') + parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"]) + return parser.parse_args() + + +def convert_caps(results): + fakecaps = [] + for result in results: + image_id = result['question_id'] + caption = result['text'] + fakecaps.append({"image_id": int(image_id), "caption": caption}) + return fakecaps + + +def get_pred_idx(prediction, choices, options): + """ + Get the index (e.g. 2) from the prediction (e.g. 'C') + """ + if prediction in options[:len(choices)]: + return options.index(prediction) + else: + return -1 + return random.choice(range(len(choices))) + + +if __name__ == "__main__": + args = get_args() + + base_dir = args.base_dir + split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split] + problems = json.load(open(os.path.join(base_dir, "problems.json"))) + predictions = [json.loads(line) for line in open(args.result_file)] + predictions = {pred['question_id']: pred for pred in predictions} + split_problems = {idx: problems[idx] for idx in split_indices} + + results = {'correct': [], 'incorrect': []} + sqa_results = {} + sqa_results['acc'] = None + sqa_results['correct'] = None + sqa_results['count'] = None + sqa_results['results'] = {} + sqa_results['outputs'] = {} + + for prob_id, prob in split_problems.items(): + if prob_id not in predictions: + pred = {'text': 'FAILED', 'prompt': 'Unknown'} + pred_text = 'FAILED' + else: + pred = predictions[prob_id] + pred_text = pred['text'] + + if pred_text in args.options: + answer = pred_text + elif len(pred_text) >= 3 and pred_text[0] in args.options and pred_text[1:3] == ". ": + answer = pred_text[0] + else: + pattern = re.compile(r'The answer is ([A-Z]).') + res = pattern.findall(pred_text) + if len(res) == 1: + answer = res[0] # 'A', 'B', ... + else: + answer = "FAILED" + + pred_idx = get_pred_idx(answer, prob['choices'], args.options) + + analysis = { + 'question_id': prob_id, + 'parsed_ans': answer, + 'ground_truth': args.options[prob['answer']], + 'question': pred['prompt'], + 'pred': pred_text, + 'is_multimodal': '' in pred['prompt'], + } + + sqa_results['results'][prob_id] = get_pred_idx(answer, prob['choices'], args.options) + sqa_results['outputs'][prob_id] = pred_text + + if pred_idx == prob['answer']: + results['correct'].append(analysis) + else: + results['incorrect'].append(analysis) + + correct = len(results['correct']) + total = len(results['correct']) + len(results['incorrect']) + + ###### IMG ###### + multimodal_correct = len([x for x in results['correct'] if x['is_multimodal']]) + multimodal_incorrect = len([x for x in results['incorrect'] if x['is_multimodal']]) + multimodal_total = multimodal_correct + multimodal_incorrect + ###### IMG ###### + + print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%, IMG-Accuracy: {multimodal_correct / multimodal_total * 100:.2f}%') + + sqa_results['acc'] = correct / total * 100 + sqa_results['correct'] = correct + sqa_results['count'] = total + + with open(args.output_file, 'w') as f: + json.dump(results, f, indent=2) + with open(args.output_result, 'w') as f: + json.dump(sqa_results, f, indent=2) diff --git a/llava/eval/m4c_evaluator.py b/llava/eval/m4c_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..e30e958da061a4f0a0bfe34b12d2fcaeba7ff2f4 --- /dev/null +++ b/llava/eval/m4c_evaluator.py @@ -0,0 +1,334 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import re + +from tqdm import tqdm + + +class EvalAIAnswerProcessor: + """ + Processes an answer similar to Eval AI + copied from + https://github.com/facebookresearch/mmf/blob/c46b3b3391275b4181567db80943473a89ab98ab/pythia/tasks/processors.py#L897 + """ + + CONTRACTIONS = { + "aint": "ain't", + "arent": "aren't", + "cant": "can't", + "couldve": "could've", + "couldnt": "couldn't", + "couldn'tve": "couldn't've", + "couldnt've": "couldn't've", + "didnt": "didn't", + "doesnt": "doesn't", + "dont": "don't", + "hadnt": "hadn't", + "hadnt've": "hadn't've", + "hadn'tve": "hadn't've", + "hasnt": "hasn't", + "havent": "haven't", + "hed": "he'd", + "hed've": "he'd've", + "he'dve": "he'd've", + "hes": "he's", + "howd": "how'd", + "howll": "how'll", + "hows": "how's", + "Id've": "I'd've", + "I'dve": "I'd've", + "Im": "I'm", + "Ive": "I've", + "isnt": "isn't", + "itd": "it'd", + "itd've": "it'd've", + "it'dve": "it'd've", + "itll": "it'll", + "let's": "let's", + "maam": "ma'am", + "mightnt": "mightn't", + "mightnt've": "mightn't've", + "mightn'tve": "mightn't've", + "mightve": "might've", + "mustnt": "mustn't", + "mustve": "must've", + "neednt": "needn't", + "notve": "not've", + "oclock": "o'clock", + "oughtnt": "oughtn't", + "ow's'at": "'ow's'at", + "'ows'at": "'ow's'at", + "'ow'sat": "'ow's'at", + "shant": "shan't", + "shed've": "she'd've", + "she'dve": "she'd've", + "she's": "she's", + "shouldve": "should've", + "shouldnt": "shouldn't", + "shouldnt've": "shouldn't've", + "shouldn'tve": "shouldn't've", + "somebody'd": "somebodyd", + "somebodyd've": "somebody'd've", + "somebody'dve": "somebody'd've", + "somebodyll": "somebody'll", + "somebodys": "somebody's", + "someoned": "someone'd", + "someoned've": "someone'd've", + "someone'dve": "someone'd've", + "someonell": "someone'll", + "someones": "someone's", + "somethingd": "something'd", + "somethingd've": "something'd've", + "something'dve": "something'd've", + "somethingll": "something'll", + "thats": "that's", + "thered": "there'd", + "thered've": "there'd've", + "there'dve": "there'd've", + "therere": "there're", + "theres": "there's", + "theyd": "they'd", + "theyd've": "they'd've", + "they'dve": "they'd've", + "theyll": "they'll", + "theyre": "they're", + "theyve": "they've", + "twas": "'twas", + "wasnt": "wasn't", + "wed've": "we'd've", + "we'dve": "we'd've", + "weve": "we've", + "werent": "weren't", + "whatll": "what'll", + "whatre": "what're", + "whats": "what's", + "whatve": "what've", + "whens": "when's", + "whered": "where'd", + "wheres": "where's", + "whereve": "where've", + "whod": "who'd", + "whod've": "who'd've", + "who'dve": "who'd've", + "wholl": "who'll", + "whos": "who's", + "whove": "who've", + "whyll": "why'll", + "whyre": "why're", + "whys": "why's", + "wont": "won't", + "wouldve": "would've", + "wouldnt": "wouldn't", + "wouldnt've": "wouldn't've", + "wouldn'tve": "wouldn't've", + "yall": "y'all", + "yall'll": "y'all'll", + "y'allll": "y'all'll", + "yall'd've": "y'all'd've", + "y'alld've": "y'all'd've", + "y'all'dve": "y'all'd've", + "youd": "you'd", + "youd've": "you'd've", + "you'dve": "you'd've", + "youll": "you'll", + "youre": "you're", + "youve": "you've", + } + + NUMBER_MAP = { + "none": "0", + "zero": "0", + "one": "1", + "two": "2", + "three": "3", + "four": "4", + "five": "5", + "six": "6", + "seven": "7", + "eight": "8", + "nine": "9", + "ten": "10", + } + ARTICLES = ["a", "an", "the"] + PERIOD_STRIP = re.compile(r"(?!<=\d)(\.)(?!\d)") + COMMA_STRIP = re.compile(r"(?<=\d)(\,)+(?=\d)") + PUNCTUATIONS = [ + ";", + r"/", + "[", + "]", + '"', + "{", + "}", + "(", + ")", + "=", + "+", + "\\", + "_", + "-", + ">", + "<", + "@", + "`", + ",", + "?", + "!", + ] + + def __init__(self, *args, **kwargs): + pass + + def word_tokenize(self, word): + word = word.lower() + word = word.replace(",", "").replace("?", "").replace("'s", " 's") + return word.strip() + + def process_punctuation(self, in_text): + out_text = in_text + for p in self.PUNCTUATIONS: + if (p + " " in in_text or " " + p in in_text) or ( + re.search(self.COMMA_STRIP, in_text) is not None + ): + out_text = out_text.replace(p, "") + else: + out_text = out_text.replace(p, " ") + out_text = self.PERIOD_STRIP.sub("", out_text, re.UNICODE) + return out_text + + def process_digit_article(self, in_text): + out_text = [] + temp_text = in_text.lower().split() + for word in temp_text: + word = self.NUMBER_MAP.setdefault(word, word) + if word not in self.ARTICLES: + out_text.append(word) + else: + pass + for word_id, word in enumerate(out_text): + if word in self.CONTRACTIONS: + out_text[word_id] = self.CONTRACTIONS[word] + out_text = " ".join(out_text) + return out_text + + def __call__(self, item): + item = self.word_tokenize(item) + item = item.replace("\n", " ").replace("\t", " ").strip() + item = self.process_punctuation(item) + item = self.process_digit_article(item) + return item + + +class TextVQAAccuracyEvaluator: + def __init__(self): + self.answer_processor = EvalAIAnswerProcessor() + + def _compute_answer_scores(self, raw_answers): + """ + compute the accuracy (soft score) of human answers + """ + answers = [self.answer_processor(a) for a in raw_answers] + assert len(answers) == 10 + gt_answers = list(enumerate(answers)) + unique_answers = set(answers) + unique_answer_scores = {} + + for unique_answer in unique_answers: + accs = [] + for gt_answer in gt_answers: + other_answers = [item for item in gt_answers if item != gt_answer] + matching_answers = [ + item for item in other_answers if item[1] == unique_answer + ] + acc = min(1, float(len(matching_answers)) / 3) + accs.append(acc) + unique_answer_scores[unique_answer] = sum(accs) / len(accs) + + return unique_answer_scores + + def eval_pred_list(self, pred_list): + pred_scores = [] + for entry in tqdm(pred_list): + pred_answer = self.answer_processor(entry["pred_answer"]) + unique_answer_scores = self._compute_answer_scores(entry["gt_answers"]) + score = unique_answer_scores.get(pred_answer, 0.0) + pred_scores.append(score) + + accuracy = sum(pred_scores) / len(pred_scores) + return accuracy + + +class STVQAAccuracyEvaluator: + def __init__(self): + self.answer_processor = EvalAIAnswerProcessor() + + def eval_pred_list(self, pred_list): + pred_scores = [] + for entry in pred_list: + pred_answer = self.answer_processor(entry["pred_answer"]) + gts = [self.answer_processor(a) for a in entry["gt_answers"]] + score = 1.0 if pred_answer in gts else 0.0 + pred_scores.append(score) + + accuracy = sum(pred_scores) / len(pred_scores) + return accuracy + + +class STVQAANLSEvaluator: + def __init__(self): + import editdistance # install with `pip install editdistance` + + self.get_edit_distance = editdistance.eval + + def get_anls(self, s1, s2): + s1 = s1.lower().strip() + s2 = s2.lower().strip() + iou = 1 - self.get_edit_distance(s1, s2) / max(len(s1), len(s2)) + anls = iou if iou >= 0.5 else 0.0 + return anls + + def eval_pred_list(self, pred_list): + pred_scores = [] + for entry in pred_list: + anls = max( + self.get_anls(entry["pred_answer"], gt) for gt in entry["gt_answers"] + ) + pred_scores.append(anls) + + accuracy = sum(pred_scores) / len(pred_scores) + return accuracy + + +class TextCapsBleu4Evaluator: + def __init__(self): + # The following script requires Java 1.8.0 and pycocotools installed. + # The pycocoevalcap can be installed with pip as + # pip install git+https://github.com/ronghanghu/coco-caption.git@python23 + # Original pycocoevalcap code is at https://github.com/tylin/coco-caption + # but has no python3 support yet. + try: + from pycocoevalcap.bleu.bleu import Bleu + from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer + except ModuleNotFoundError: + print( + "Please install pycocoevalcap module using " + "pip install git+https://github.com/ronghanghu/coco-caption.git@python23" # noqa + ) + raise + + self.tokenizer = PTBTokenizer() + self.scorer = Bleu(4) + + def eval_pred_list(self, pred_list): + # Create reference and hypotheses captions. + gts = {} + res = {} + for idx, entry in enumerate(pred_list): + gts[idx] = [{"caption": a} for a in entry["gt_answers"]] + res[idx] = [{"caption": entry["pred_answer"]}] + + gts = self.tokenizer.tokenize(gts) + res = self.tokenizer.tokenize(res) + score, _ = self.scorer.compute_score(gts, res) + + bleu4 = score[3] # score is (Bleu-1, Bleu-2, Bleu-3, Bleu-4) + return bleu4 diff --git a/llava/eval/model_vqa_loader.py b/llava/eval/model_vqa_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..1fc8ccb58bd64051b648ae7c24973da85003c387 --- /dev/null +++ b/llava/eval/model_vqa_loader.py @@ -0,0 +1,158 @@ +import argparse +import torch +import os +import json +from tqdm import tqdm +import shortuuid + +from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava import conversation as conversation_lib +from llava.model.builder import load_pretrained_model +from llava.utils import disable_torch_init +from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path +from torch.utils.data import Dataset, DataLoader + +from PIL import Image +import math + + +def split_list(lst, n): + """Split a list into n (roughly) equal-sized chunks""" + chunk_size = math.ceil(len(lst) / n) # integer division + return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] + + +def get_chunk(lst, n, k): + chunks = split_list(lst, n) + return chunks[k] + + +# Custom dataset class +class CustomDataset(Dataset): + def __init__(self, questions, image_folder, tokenizer, image_processor, model_config, voco_num): + self.questions = questions + self.image_folder = image_folder + self.tokenizer = tokenizer + self.image_processor = image_processor + self.model_config = model_config + self.voco_num = voco_num + print("voco_num is ", voco_num) + + def __getitem__(self, index): + line = self.questions[index] + image_file = line["image"] + qs = line["text"] + if self.model_config.mm_use_im_start_end: + qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs + else: + qs = DEFAULT_IMAGE_TOKEN + '\n' + qs + + # conv = conv_templates[args.conv_mode].copy() + conv = conversation_lib.voco_default_conversation.copy() + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + + # print(prompt) + maybe_voco_str = "".join( + ["" for _ in range(self.voco_num)] + ) + prompt = f"\n{maybe_voco_str}\n" + prompt.replace("\n", '').replace("", '') + + image = Image.open(os.path.join(self.image_folder, image_file)).convert('RGB') + image_tensor = process_images([image], self.image_processor, self.model_config)[0] + + input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt') + + return input_ids, image_tensor, image.size + + def __len__(self): + return len(self.questions) + + +def collate_fn(batch): + input_ids, image_tensors, image_sizes = zip(*batch) + input_ids = torch.stack(input_ids, dim=0) + image_tensors = torch.stack(image_tensors, dim=0) + return input_ids, image_tensors, image_sizes + + +# DataLoader +def create_data_loader(questions, image_folder, tokenizer, image_processor, model_config, voco_num, batch_size=1, num_workers=4): + assert batch_size == 1, "batch_size must be 1" + dataset = CustomDataset(questions, image_folder, tokenizer, image_processor, model_config, voco_num) + data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False, collate_fn=collate_fn) + return data_loader + + +def eval_model(args): + # Model + disable_torch_init() + model_path = os.path.expanduser(args.model_path) + model_name = get_model_name_from_path(model_path) + tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name, llava_model="initial") + + print("*************", len(tokenizer)) + + questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")] + questions = get_chunk(questions, args.num_chunks, args.chunk_idx) + answers_file = os.path.expanduser(args.answers_file) + os.makedirs(os.path.dirname(answers_file), exist_ok=True) + ans_file = open(answers_file, "w") + voco_num = args.voco_num + + if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode: + args.conv_mode = args.conv_mode + '_mmtag' + print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.') + + data_loader = create_data_loader(questions, args.image_folder, tokenizer, image_processor, model.config, voco_num) + + for (input_ids, image_tensor, image_sizes), line in tqdm(zip(data_loader, questions), total=len(questions)): + idx = line["question_id"] + cur_prompt = line["text"] + + input_ids = input_ids.to(device='cuda', non_blocking=True) + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True), + image_sizes=image_sizes, + do_sample=True if args.temperature > 0 else False, + temperature=args.temperature, + top_p=args.top_p, + num_beams=args.num_beams, + max_new_tokens=args.max_new_tokens, + use_cache=True) + + outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip() + + ans_id = shortuuid.uuid() + ans_file.write(json.dumps({"question_id": idx, + "prompt": cur_prompt, + "text": outputs, + "answer_id": ans_id, + "model_id": model_name, + "metadata": {}}) + "\n") + ans_file.flush() + ans_file.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--image-folder", type=str, default="") + parser.add_argument("--question-file", type=str, default="tables/question.jsonl") + parser.add_argument("--answers-file", type=str, default="answer.jsonl") + parser.add_argument("--conv-mode", type=str, default="llava_v1") + parser.add_argument("--num-chunks", type=int, default=1) + parser.add_argument("--chunk-idx", type=int, default=0) + parser.add_argument("--temperature", type=float, default=0.2) + parser.add_argument("--top_p", type=float, default=None) + parser.add_argument("--num_beams", type=int, default=1) + parser.add_argument("--max_new_tokens", type=int, default=128) + parser.add_argument("--voco_num", type=int, default=None) + args = parser.parse_args() + + eval_model(args) diff --git a/llava/eval/model_vqa_mmbench.py b/llava/eval/model_vqa_mmbench.py new file mode 100644 index 0000000000000000000000000000000000000000..009f2d2afc4734ab4835e0651e5cf0187f52138b --- /dev/null +++ b/llava/eval/model_vqa_mmbench.py @@ -0,0 +1,170 @@ +import argparse +import torch +import os +import json +import pandas as pd +from tqdm import tqdm +import shortuuid + +from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava.model.builder import load_pretrained_model +from llava.utils import disable_torch_init +from llava.mm_utils import tokenizer_image_token, process_images, load_image_from_base64, get_model_name_from_path + +from llava import conversation as conversation_lib +from PIL import Image +import math + + +all_options = ['A', 'B', 'C', 'D'] + + +def split_list(lst, n): + """Split a list into n (roughly) equal-sized chunks""" + chunk_size = math.ceil(len(lst) / n) # integer division + return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] + + +def get_chunk(lst, n, k): + chunks = split_list(lst, n) + return chunks[k] + + +def is_none(value): + if value is None: + return True + if type(value) is float and math.isnan(value): + return True + if type(value) is str and value.lower() == 'nan': + return True + if type(value) is str and value.lower() == 'none': + return True + return False + +def get_options(row, options): + parsed_options = [] + for option in options: + option_value = row[option] + if is_none(option_value): + break + parsed_options.append(option_value) + return parsed_options + + +def eval_model(args): + # Model + disable_torch_init() + model_path = os.path.expanduser(args.model_path) + model_name = get_model_name_from_path(model_path) + tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name, llava_model="initial") + + print("*************", len(tokenizer)) + + questions = pd.read_table(os.path.expanduser(args.question_file)) + questions = get_chunk(questions, args.num_chunks, args.chunk_idx) + answers_file = os.path.expanduser(args.answers_file) + os.makedirs(os.path.dirname(answers_file), exist_ok=True) + ans_file = open(answers_file, "w") + + if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode: + args.conv_mode = args.conv_mode + '_mmtag' + print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.') + + for index, row in tqdm(questions.iterrows(), total=len(questions)): + options = get_options(row, all_options) + cur_option_char = all_options[:len(options)] + + if args.all_rounds: + num_rounds = len(options) + else: + num_rounds = 1 + + for round_idx in range(num_rounds): + idx = row['index'] + question = row['question'] + hint = row['hint'] + image = load_image_from_base64(row['image']) + if not is_none(hint): + question = hint + '\n' + question + for option_char, option in zip(all_options[:len(options)], options): + question = question + '\n' + option_char + '. ' + option + qs = cur_prompt = question + if model.config.mm_use_im_start_end: + qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs + else: + qs = DEFAULT_IMAGE_TOKEN + '\n' + qs + + if args.single_pred_prompt: + if args.lang == 'cn': + qs = qs + '\n' + "请直接回答选项字母。" + else: + qs = qs + '\n' + "Answer with the option's letter from the given choices directly." + + # conv = conv_templates[args.conv_mode].copy() + conv = conversation_lib.voco_default_conversation.copy() + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + + maybe_voco_str = "".join( + ["" for _ in range(args.voco_num)] + ) + prompt = f"\n{maybe_voco_str}\n" + prompt.replace("\n", '').replace("", '') + + input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() + + image_tensor = process_images([image], image_processor, model.config)[0] + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=image_tensor.unsqueeze(0).half().cuda(), + image_sizes=[image.size], + do_sample=True if args.temperature > 0 else False, + temperature=args.temperature, + top_p=args.top_p, + num_beams=args.num_beams, + # no_repeat_ngram_size=3, + max_new_tokens=1024, + use_cache=True) + + outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip() + + ans_id = shortuuid.uuid() + ans_file.write(json.dumps({"question_id": idx, + "round_id": round_idx, + "prompt": cur_prompt, + "text": outputs, + "options": options, + "option_char": cur_option_char, + "answer_id": ans_id, + "model_id": model_name, + "metadata": {}}) + "\n") + ans_file.flush() + + # rotate options + options = options[1:] + options[:1] + cur_option_char = cur_option_char[1:] + cur_option_char[:1] + ans_file.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--image-folder", type=str, default="") + parser.add_argument("--question-file", type=str, default="tables/question.jsonl") + parser.add_argument("--answers-file", type=str, default="answer.jsonl") + parser.add_argument("--conv-mode", type=str, default="llava_v1") + parser.add_argument("--num-chunks", type=int, default=1) + parser.add_argument("--chunk-idx", type=int, default=0) + parser.add_argument("--temperature", type=float, default=0.2) + parser.add_argument("--top_p", type=float, default=None) + parser.add_argument("--num_beams", type=int, default=1) + parser.add_argument("--all-rounds", action="store_true") + parser.add_argument("--single-pred-prompt", action="store_true") + parser.add_argument("--lang", type=str, default="en") + parser.add_argument("--voco_num", type=int, default=None) + args = parser.parse_args() + + eval_model(args) diff --git a/llava/eval/model_vqa_science.py b/llava/eval/model_vqa_science.py new file mode 100644 index 0000000000000000000000000000000000000000..e580a376ec6928ea2f3a064597214f3e2bf42f8e --- /dev/null +++ b/llava/eval/model_vqa_science.py @@ -0,0 +1,122 @@ +import argparse +import torch +import os +import json +from tqdm import tqdm +import shortuuid + +from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava.model.builder import load_pretrained_model +from llava import conversation as conversation_lib +from llava.utils import disable_torch_init +from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path + +from PIL import Image +import math + + +def split_list(lst, n): + """Split a list into n (roughly) equal-sized chunks""" + chunk_size = math.ceil(len(lst) / n) # integer division + return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] + + +def get_chunk(lst, n, k): + chunks = split_list(lst, n) + return chunks[k] + + +def eval_model(args): + # Model + disable_torch_init() + model_path = os.path.expanduser(args.model_path) + model_name = get_model_name_from_path(model_path) + tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name, llava_model="initial") + + print("*************", len(tokenizer)) + + questions = json.load(open(os.path.expanduser(args.question_file), "r")) + questions = get_chunk(questions, args.num_chunks, args.chunk_idx) + answers_file = os.path.expanduser(args.answers_file) + os.makedirs(os.path.dirname(answers_file), exist_ok=True) + ans_file = open(answers_file, "w") + for i, line in enumerate(tqdm(questions)): + idx = line["id"] + question = line['conversations'][0] + qs = question['value'].replace('', '').strip() + cur_prompt = qs + + if 'image' in line: + image_file = line["image"] + image = Image.open(os.path.join(args.image_folder, image_file)) + image_tensor = process_images([image], image_processor, model.config)[0] + images = image_tensor.unsqueeze(0).half().cuda() + image_sizes = [image.size] + if getattr(model.config, 'mm_use_im_start_end', False): + qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs + else: + qs = DEFAULT_IMAGE_TOKEN + '\n' + qs + cur_prompt = '' + '\n' + cur_prompt + else: + continue + images = None + image_sizes = None + + if args.single_pred_prompt: + qs = qs + '\n' + "Answer with the option's letter from the given choices directly." + cur_prompt = cur_prompt + '\n' + "Answer with the option's letter from the given choices directly." + + # conv = conv_templates[args.conv_mode].copy() + conv = conversation_lib.voco_default_conversation.copy() + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + + maybe_voco_str = "".join( + ["" for _ in range(args.voco_num)] + ) + prompt = f"\n{maybe_voco_str}\n" + prompt.replace("\n", '').replace("", '') + + input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=images, + image_sizes=image_sizes, + do_sample=True if args.temperature > 0 else False, + temperature=args.temperature, + max_new_tokens=1024, + use_cache=True, + ) + + outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip() + + ans_id = shortuuid.uuid() + ans_file.write(json.dumps({"question_id": idx, + "prompt": cur_prompt, + "text": outputs, + "answer_id": ans_id, + "model_id": model_name, + "metadata": {}}) + "\n") + ans_file.flush() + ans_file.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--image-folder", type=str, default="") + parser.add_argument("--question-file", type=str, default="tables/question.json") + parser.add_argument("--answers-file", type=str, default="answer.jsonl") + parser.add_argument("--conv-mode", type=str, default="llava_v0") + parser.add_argument("--num-chunks", type=int, default=1) + parser.add_argument("--chunk-idx", type=int, default=0) + parser.add_argument("--temperature", type=float, default=0.2) + parser.add_argument("--answer-prompter", action="store_true") + parser.add_argument("--single-pred-prompt", action="store_true") + parser.add_argument("--voco_num", type=int, default=None) + args = parser.parse_args() + + eval_model(args) diff --git a/llava/train/__pycache__/llava_trainer.cpython-310.pyc b/llava/train/__pycache__/llava_trainer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56464d49d93bb31b5f303b4f8ce048a83f853194 Binary files /dev/null and b/llava/train/__pycache__/llava_trainer.cpython-310.pyc differ diff --git a/llava/train/__pycache__/train_compress.cpython-310.pyc b/llava/train/__pycache__/train_compress.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dfbec1ee308b5c3e47d66bcf2006762f0a50195 Binary files /dev/null and b/llava/train/__pycache__/train_compress.cpython-310.pyc differ diff --git a/llava/train/llama_flash_attn_monkey_patch.py b/llava/train/llama_flash_attn_monkey_patch.py new file mode 100644 index 0000000000000000000000000000000000000000..31db2eff8d1c4b3ae645583dfc5e156e818b6f1c --- /dev/null +++ b/llava/train/llama_flash_attn_monkey_patch.py @@ -0,0 +1,115 @@ +from typing import Optional, Tuple +import warnings + +import torch + +import transformers +from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, repeat_kv + +try: + from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func +except ImportError: + from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func +from flash_attn.bert_padding import unpad_input, pad_input + + +def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + warnings.warn( + "Output attentions is not supported for patched `LlamaAttention`, returning `None` instead." + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = ( + self.q_proj(hidden_states) + .view(bsz, q_len, self.num_heads, self.head_dim) + .transpose(1, 2) + ) + key_states = ( + self.k_proj(hidden_states) + .view(bsz, q_len, self.num_key_value_heads, self.head_dim) + .transpose(1, 2) + ) + value_states = ( + self.v_proj(hidden_states) + .view(bsz, q_len, self.num_key_value_heads, self.head_dim) + .transpose(1, 2) + ) # shape: (b, num_heads, s, head_dim) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb( + query_states, key_states, cos, sin, position_ids + ) + + if past_key_value is not None: + # reuse k, v + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + # Transform the data into the format required by flash attention + qkv = torch.stack([query_states, key_states, value_states], dim=2) + qkv = qkv.transpose(1, 3) # shape: [b, s, 3, num_heads, head_dim] + key_padding_mask = attention_mask + + if key_padding_mask is None: + qkv = qkv.reshape(-1, 3, self.num_heads, self.head_dim) + cu_q_lens = torch.arange( + 0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device + ) + max_s = q_len + output = flash_attn_unpadded_qkvpacked_func( + qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True + ) + output = output.view(bsz, q_len, -1) + else: + qkv = qkv.reshape(bsz, q_len, -1) + qkv, indices, cu_q_lens, max_s = unpad_input(qkv, key_padding_mask) + qkv = qkv.view(-1, 3, self.num_heads, self.head_dim) + output_unpad = flash_attn_unpadded_qkvpacked_func( + qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True + ) + output_unpad = output_unpad.reshape(-1, self.num_heads * self.head_dim) + output = pad_input(output_unpad, indices, bsz, q_len) + + return self.o_proj(output), None, past_key_value + + +# Disable the transformation of the attention mask in LlamaModel as the flash attention +# requires the attention mask to be the same as the key_padding_mask +def _prepare_decoder_attention_mask( + self, attention_mask, input_shape, inputs_embeds, past_key_values_length +): + # [bsz, seq_len] + return attention_mask + + +def replace_llama_attn_with_flash_attn(): + cuda_major, cuda_minor = torch.cuda.get_device_capability() + if cuda_major < 8: + warnings.warn( + "Flash attention is only supported on A100 or H100 GPU during training due to head dim > 64 backward." + "ref: https://github.com/HazyResearch/flash-attention/issues/190#issuecomment-1523359593" + ) + transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = ( + _prepare_decoder_attention_mask + ) + transformers.models.llama.modeling_llama.LlamaAttention.forward = forward diff --git a/llava/train/llama_xformers_attn_monkey_patch.py b/llava/train/llama_xformers_attn_monkey_patch.py new file mode 100644 index 0000000000000000000000000000000000000000..f8351e41ccd4a64dca237bd8f8be0702b23989dc --- /dev/null +++ b/llava/train/llama_xformers_attn_monkey_patch.py @@ -0,0 +1,129 @@ +""" +Directly copied the code from https://raw.githubusercontent.com/oobabooga/text-generation-webui/main/modules/llama_attn_hijack.py and made some adjustments +""" + +import logging +import math +from typing import Optional, Tuple + +import torch +import transformers.models.llama.modeling_llama +from torch import nn + +try: + import xformers.ops +except ImportError: + logging.error("xformers not found! Please install it before trying to use it.") + + +def replace_llama_attn_with_xformers_attn(): + transformers.models.llama.modeling_llama.LlamaAttention.forward = xformers_forward + + +def xformers_forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + # pylint: disable=duplicate-code + bsz, q_len, _ = hidden_states.size() + + query_states = ( + self.q_proj(hidden_states) + .view(bsz, q_len, self.num_heads, self.head_dim) + .transpose(1, 2) + ) + key_states = ( + self.k_proj(hidden_states) + .view(bsz, q_len, self.num_heads, self.head_dim) + .transpose(1, 2) + ) + value_states = ( + self.v_proj(hidden_states) + .view(bsz, q_len, self.num_heads, self.head_dim) + .transpose(1, 2) + ) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + ( + query_states, + key_states, + ) = transformers.models.llama.modeling_llama.apply_rotary_pos_emb( + query_states, key_states, cos, sin, position_ids + ) + # [bsz, nh, t, hd] + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + # We only apply xformers optimizations if we don't need to output the whole attention matrix + if not output_attentions: + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + # This is a nasty hack. We know attention_mask in transformers is either LowerTriangular or all Zeros. + # We therefore check if one element in the upper triangular portion is zero. If it is, then the mask is all zeros. + if attention_mask is None or attention_mask[0, 0, 0, 1] == 0: + # input and output should be of form (bsz, q_len, num_heads, head_dim) + attn_output = xformers.ops.memory_efficient_attention( + query_states, key_states, value_states, attn_bias=None + ) + else: + # input and output should be of form (bsz, q_len, num_heads, head_dim) + attn_output = xformers.ops.memory_efficient_attention( + query_states, + key_states, + value_states, + attn_bias=xformers.ops.LowerTriangularMask(), + ) + attn_weights = None + else: + attn_weights = torch.matmul( + query_states, key_states.transpose(2, 3) + ) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + attn_weights = torch.max( + attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min) + ) + + # upcast attention to fp32 + attn_weights = nn.functional.softmax( + attn_weights, dim=-1, dtype=torch.float32 + ).to(query_states.dtype) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + attn_output = self.o_proj(attn_output) + return attn_output, attn_weights, past_key_value diff --git a/llava/train/train_compress.py b/llava/train/train_compress.py new file mode 100644 index 0000000000000000000000000000000000000000..d8347d9fa471da53560768ad922cea7e94ccf0f8 --- /dev/null +++ b/llava/train/train_compress.py @@ -0,0 +1,1014 @@ +# Adopted from https://github.com/haotian-liu/LLaVA. +# Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright: +# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright: +# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import copy +from dataclasses import dataclass, field +import json +import logging +import pathlib +from typing import Dict, Optional, Sequence, List + +import torch + +import transformers +import tokenizers + +from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN +from torch.utils.data import Dataset +from llava.train.llava_trainer import LLaVATrainer + +from llava import conversation as conversation_lib +from llava.model import * +from llava.mm_utils import tokenizer_image_token + +from PIL import Image + + +local_rank = None + + +def rank0_print(*args): + if local_rank == 0: + print(*args) + + +from packaging import version +IS_TOKENIZER_GREATER_THAN_0_14 = version.parse(tokenizers.__version__) >= version.parse('0.14') + + +@dataclass +class ModelArguments: + model_name_or_path: Optional[str] = field(default="facebook/opt-125m") + version: Optional[str] = field(default="v0") + freeze_backbone: bool = field(default=False) + tune_mm_mlp_adapter: bool = field(default=False) + vision_tower: Optional[str] = field(default=None) + mm_vision_select_layer: Optional[int] = field(default=-1) # default to the last layer + pretrain_mm_mlp_adapter: Optional[str] = field(default=None) + mm_projector_type: Optional[str] = field(default='linear') + mm_use_im_start_end: bool = field(default=False) + mm_use_im_patch_token: bool = field(default=True) + mm_patch_merge_type: Optional[str] = field(default='flat') + mm_vision_select_feature: Optional[str] = field(default="patch") + + +@dataclass +class DataArguments: + data_path: str = field(default=None, + metadata={"help": "Path to the training data."}) + lazy_preprocess: bool = False + is_multimodal: bool = False + image_folder: Optional[str] = field(default=None) + image_aspect_ratio: str = 'square' + + +@dataclass +class TrainingArguments(transformers.TrainingArguments): + cache_dir: Optional[str] = field(default=None) + optim: str = field(default="adamw_torch") + remove_unused_columns: bool = field(default=False) + freeze_mm_mlp_adapter: bool = field(default=False) + mpt_attn_impl: Optional[str] = field(default="triton") + model_max_length: int = field( + default=512, + metadata={ + "help": + "Maximum sequence length. Sequences will be right padded (and possibly truncated)." + }, + ) + double_quant: bool = field( + default=True, + metadata={"help": "Compress the quantization statistics through double quantization."} + ) + quant_type: str = field( + default="nf4", + metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."} + ) + bits: int = field( + default=16, + metadata={"help": "How many bits to use."} + ) + lora_enable: bool = False + lora_r: int = 64 + lora_alpha: int = 16 + lora_dropout: float = 0.05 + lora_weight_path: str = "" + lora_bias: str = "none" + mm_projector_lr: Optional[float] = None + group_by_modality_length: bool = field(default=False) + + +def maybe_zero_3(param, ignore_status=False, name=None): + from deepspeed import zero + from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus + if hasattr(param, "ds_id"): + if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: + if not ignore_status: + logging.warning(f"{name}: param.ds_status != ZeroParamStatus.NOT_AVAILABLE: {param.ds_status}") + with zero.GatheredParameters([param]): + param = param.data.detach().cpu().clone() + else: + param = param.detach().cpu().clone() + return param + + +# Borrowed from peft.utils.get_peft_model_state_dict +def get_peft_state_maybe_zero_3(named_params, bias): + if bias == "none": + to_return = {k: t for k, t in named_params if "lora_" in k} + elif bias == "all": + to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k} + elif bias == "lora_only": + to_return = {} + maybe_lora_bias = {} + lora_bias_names = set() + for k, t in named_params: + if "lora_" in k: + to_return[k] = t + bias_name = k.split("lora_")[0] + "bias" + lora_bias_names.add(bias_name) + elif "bias" in k: + maybe_lora_bias[k] = t + for k, t in maybe_lora_bias: + if bias_name in lora_bias_names: + to_return[bias_name] = t + else: + raise NotImplementedError + to_return = {k: maybe_zero_3(v, ignore_status=True) for k, v in to_return.items()} + return to_return + + +def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True): + to_return = {k: t for k, t in named_params if "lora_" not in k} + if require_grad_only: + to_return = {k: t for k, t in to_return.items() if t.requires_grad} + to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()} + return to_return + + +def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match): + to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)} + to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()} + return to_return + + +def find_all_linear_names(model): + cls = torch.nn.Linear + lora_module_names = set() + multimodal_keywords = ['mm_projector', 'vision_tower', 'vision_resampler'] + for name, module in model.named_modules(): + if any(mm_keyword in name for mm_keyword in multimodal_keywords): + continue + if isinstance(module, cls): + names = name.split('.') + lora_module_names.add(names[0] if len(names) == 1 else names[-1]) + + if 'lm_head' in lora_module_names: # needed for 16-bit + lora_module_names.remove('lm_head') + return list(lora_module_names) + + +def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, + output_dir: str): + """Collects the state dict and dump to disk.""" + + if getattr(trainer.args, "tune_mm_mlp_adapter", False): + # Only save Adapter + keys_to_match = ['mm_projector'] + if getattr(trainer.args, "use_im_start_end", False): + keys_to_match.extend(['embed_tokens', 'embed_in']) + + weight_to_save = get_mm_adapter_state_maybe_zero_3(trainer.model.named_parameters(), keys_to_match) + trainer.model.config.save_pretrained(output_dir) + + current_folder = output_dir.split('/')[-1] + parent_folder = os.path.dirname(output_dir) + if trainer.args.local_rank == 0 or trainer.args.local_rank == -1: + if current_folder.startswith('checkpoint-'): + mm_projector_folder = os.path.join(parent_folder, "mm_projector") + os.makedirs(mm_projector_folder, exist_ok=True) + torch.save(weight_to_save, os.path.join(mm_projector_folder, f'{current_folder}.bin')) + else: + torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin')) + return + + if trainer.deepspeed: + torch.cuda.synchronize() + trainer.save_model(output_dir) + return + + state_dict = trainer.model.state_dict() + if trainer.args.should_save: + cpu_state_dict = { + key: value.cpu() + for key, value in state_dict.items() + } + del state_dict + trainer._save(output_dir, state_dict=cpu_state_dict) # noqa + + +def smart_tokenizer_and_embedding_resize( + special_tokens_dict: Dict, + tokenizer: transformers.PreTrainedTokenizer, + model: transformers.PreTrainedModel, +): + """Resize tokenizer and embedding. + + Note: This is the unoptimized version that may make your embedding size not be divisible by 64. + """ + num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict) + model.resize_token_embeddings(len(tokenizer)) + + if num_new_tokens > 0: + input_embeddings = model.get_input_embeddings().weight.data + output_embeddings = model.get_output_embeddings().weight.data + + input_embeddings_avg = input_embeddings[:-num_new_tokens].mean( + dim=0, keepdim=True) + output_embeddings_avg = output_embeddings[:-num_new_tokens].mean( + dim=0, keepdim=True) + + input_embeddings[-num_new_tokens:] = input_embeddings_avg + output_embeddings[-num_new_tokens:] = output_embeddings_avg + + +def _tokenize_fn(strings: Sequence[str], + tokenizer: transformers.PreTrainedTokenizer) -> Dict: + """Tokenize a list of strings.""" + tokenized_list = [ + tokenizer( + text, + return_tensors="pt", + padding="longest", + max_length=tokenizer.model_max_length, + truncation=True, + ) for text in strings + ] + input_ids = labels = [ + tokenized.input_ids[0] for tokenized in tokenized_list + ] + input_ids_lens = labels_lens = [ + tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() + for tokenized in tokenized_list + ] + return dict( + input_ids=input_ids, + labels=labels, + input_ids_lens=input_ids_lens, + labels_lens=labels_lens, + ) + + +def _mask_targets(target, tokenized_lens, speakers): + # cur_idx = 0 + cur_idx = tokenized_lens[0] + tokenized_lens = tokenized_lens[1:] + target[:cur_idx] = IGNORE_INDEX + for tokenized_len, speaker in zip(tokenized_lens, speakers): + if speaker == "human": + target[cur_idx+2:cur_idx + tokenized_len] = IGNORE_INDEX + cur_idx += tokenized_len + + +def _add_speaker_and_signal(header, source, get_conversation=True): + """Add speaker and start/end signal on each round.""" + BEGIN_SIGNAL = "### " + END_SIGNAL = "\n" + conversation = header + for sentence in source: + from_str = sentence["from"] + if from_str.lower() == "human": + from_str = conversation_lib.default_conversation.roles[0] + elif from_str.lower() == "gpt": + from_str = conversation_lib.default_conversation.roles[1] + else: + from_str = 'unknown' + sentence["value"] = (BEGIN_SIGNAL + from_str + ": " + + sentence["value"] + END_SIGNAL) + if get_conversation: + conversation += sentence["value"] + conversation += BEGIN_SIGNAL + return conversation + + +def preprocess_multimodal( + sources: Sequence[str], + data_args: DataArguments +) -> Dict: + is_multimodal = data_args.is_multimodal + if not is_multimodal: + return sources + + for source in sources: + for sentence in source: + if DEFAULT_IMAGE_TOKEN in sentence['value']: + sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, '').strip() + sentence['value'] = DEFAULT_IMAGE_TOKEN + '\n' + sentence['value'] + sentence['value'] = sentence['value'].strip() + if "mmtag" in conversation_lib.default_conversation.version: + sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, '' + DEFAULT_IMAGE_TOKEN + '') + replace_token = DEFAULT_IMAGE_TOKEN + if data_args.mm_use_im_start_end: + replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN + sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token) + + return sources + + +def preprocess_llama_2( + sources, + tokenizer: transformers.PreTrainedTokenizer, + has_image: bool = False +) -> Dict: + conv = conversation_lib.default_conversation.copy() + roles = {"human": conv.roles[0], "gpt": conv.roles[1]} + + # Apply prompt templates + conversations = [] + for i, source in enumerate(sources): + if roles[source[0]["from"]] != conv.roles[0]: + # Skip the first one if it is not from human + source = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role = roles[sentence["from"]] + assert role == conv.roles[j % 2], f"{i}" + conv.append_message(role, sentence["value"]) + conversations.append(conv.get_prompt()) + + # Tokenize conversations + + if has_image: + input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0) + else: + input_ids = tokenizer( + conversations, + return_tensors="pt", + padding="longest", + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + + targets = input_ids.clone() + + assert conv.sep_style == conversation_lib.SeparatorStyle.LLAMA_2 + + # Mask targets + sep = "[/INST] " + for conversation, target in zip(conversations, targets): + total_len = int(target.ne(tokenizer.pad_token_id).sum()) + + rounds = conversation.split(conv.sep2) + cur_len = 1 + target[:cur_len] = IGNORE_INDEX + for i, rou in enumerate(rounds): + if rou == "": + break + + parts = rou.split(sep) + if len(parts) != 2: + break + parts[0] += sep + + if has_image: + round_len = len(tokenizer_image_token(rou, tokenizer)) + instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2 + else: + round_len = len(tokenizer(rou).input_ids) + instruction_len = len(tokenizer(parts[0]).input_ids) - 2 + + target[cur_len : cur_len + instruction_len] = IGNORE_INDEX + + cur_len += round_len + target[cur_len:] = IGNORE_INDEX + + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_INDEX + print( + f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." + f" (ignored)" + ) + + return dict( + input_ids=input_ids, + labels=targets, + ) + + +def preprocess_v1( + sources, + tokenizer: transformers.PreTrainedTokenizer, + has_image: bool = False +) -> Dict: + conv = conversation_lib.voco_default_conversation.copy() + roles = {"human": conv.roles[0], "gpt": conv.roles[1]} + + # Apply prompt templates + conversations = [] + for i, source in enumerate(sources): + if roles[source[0]["from"]] != conv.roles[0]: + # Skip the first one if it is not from human + source = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role = roles[sentence["from"]] + assert role == conv.roles[j % 2], f"{i}" + conv.append_message(role, sentence["value"]) + conversations.append(conv.get_prompt()) + # The assistant gives helpful, detailed, and polite answers to the user's questions. + # + + USER: Q + ASSITENT: A + ... + + # Tokenize conversations + # token num + if has_image: + maybe_voco_str = "".join( + ["" for _ in range(2)] + ) + # conversations = [f"\n{maybe_voco_str}\n" + conversations[0].replace("\n", '')] + conversations = [f"\n{maybe_voco_str}\n" + conversations[0].replace("", '').replace("\n", '')] + input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0) + else: + input_ids = tokenizer( + conversations, + return_tensors="pt", + padding="longest", + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + + targets = input_ids.clone() # [1, len] + + assert conv.sep_style == conversation_lib.SeparatorStyle.TWO + + # Mask targets + sep = conv.sep + conv.roles[1] + ": " + for conversation, target in zip(conversations, targets): + total_len = int(target.ne(tokenizer.pad_token_id).sum()) + + rounds = conversation.split(conv.sep2) + cur_len = 1 + target[:cur_len] = IGNORE_INDEX + for i, rou in enumerate(rounds): + if rou == "": + break + + parts = rou.split(sep) + if len(parts) != 2: + break + parts[0] += sep + if has_image: + round_len = len(tokenizer_image_token(rou, tokenizer)) # 1 + tokenize(rou), 其中这个token被特殊编码为200 + instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2 # 问题token + else: + round_len = len(tokenizer(rou).input_ids) + instruction_len = len(tokenizer(parts[0]).input_ids) - 2 + + if i != 0 and not tokenizer.legacy and IS_TOKENIZER_GREATER_THAN_0_14: + round_len -= 1 + instruction_len -= 1 + + target[cur_len : cur_len + instruction_len] = IGNORE_INDEX + + cur_len += round_len + target[cur_len:] = IGNORE_INDEX + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_INDEX + print( + f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." + f" (ignored)" + ) + + return dict( + input_ids=input_ids, + labels=targets, + ) + + +def preprocess_mpt( + sources, + tokenizer: transformers.PreTrainedTokenizer, + has_image: bool = False +) -> Dict: + conv = conversation_lib.default_conversation.copy() + roles = {"human": conv.roles[0], "gpt": conv.roles[1]} + + # Apply prompt templates + conversations = [] + for i, source in enumerate(sources): + if roles[source[0]["from"]] != conv.roles[0]: + # Skip the first one if it is not from human + source = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role = roles[sentence["from"]] + assert role == conv.roles[j % 2], f"{i}" + conv.append_message(role, sentence["value"]) + conversations.append(conv.get_prompt()) + + # Tokenize conversations + + if has_image: + input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0) + else: + input_ids = tokenizer( + conversations, + return_tensors="pt", + padding="longest", + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + + targets = input_ids.clone() + assert conv.sep_style == conversation_lib.SeparatorStyle.MPT + + # Mask targets + sep = conv.sep + conv.roles[1] + for conversation, target in zip(conversations, targets): + total_len = int(target.ne(tokenizer.pad_token_id).sum()) + + rounds = conversation.split(conv.sep) + re_rounds = [conv.sep.join(rounds[:3])] # system + user + gpt + for conv_idx in range(3, len(rounds), 2): + re_rounds.append(conv.sep.join(rounds[conv_idx:conv_idx+2])) # user + gpt + cur_len = 0 + target[:cur_len] = IGNORE_INDEX + for i, rou in enumerate(re_rounds): + if rou == "": + break + + parts = rou.split(sep) + if len(parts) != 2: + break + parts[0] += sep + + if has_image: + round_len = len(tokenizer_image_token(rou, tokenizer)) + instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 1 + else: + round_len = len(tokenizer(rou).input_ids) + instruction_len = len(tokenizer(parts[0]).input_ids) - 1 + + if i != 0 and getattr(tokenizer, 'legacy', False) and IS_TOKENIZER_GREATER_THAN_0_14: + round_len += 1 + instruction_len += 1 + + target[cur_len : cur_len + instruction_len] = IGNORE_INDEX + + cur_len += round_len + target[cur_len:] = IGNORE_INDEX + + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_INDEX + print( + f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." + f" (ignored)" + ) + + return dict( + input_ids=input_ids, + labels=targets, + ) + + +def preprocess_plain( + sources: Sequence[str], + tokenizer: transformers.PreTrainedTokenizer, +) -> Dict: + # add end signal and concatenate together + conversations = [] + for source in sources: + assert len(source) == 2 + assert DEFAULT_IMAGE_TOKEN in source[0]['value'] + source[0]['value'] = DEFAULT_IMAGE_TOKEN + conversation = source[0]['value'] + source[1]['value'] + conversation_lib.default_conversation.sep + conversations.append(conversation) + # tokenize conversations + input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations] + targets = copy.deepcopy(input_ids) + for target, source in zip(targets, sources): + tokenized_len = len(tokenizer_image_token(source[0]['value'], tokenizer)) + target[:tokenized_len] = IGNORE_INDEX + + return dict(input_ids=input_ids, labels=targets) + + +def preprocess( + sources: Sequence[str], + tokenizer: transformers.PreTrainedTokenizer, + has_image: bool = False +) -> Dict: + """ + Given a list of sources, each is a conversation list. This transform: + 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; + 2. Concatenate conversations together; + 3. Tokenize the concatenated conversation; + 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX. + """ + if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.PLAIN: + return preprocess_plain(sources, tokenizer) + if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.LLAMA_2: + return preprocess_llama_2(sources, tokenizer, has_image=has_image) + if conversation_lib.default_conversation.version.startswith("v1"): + return preprocess_v1(sources, tokenizer, has_image=has_image) + if conversation_lib.default_conversation.version == "mpt": + return preprocess_mpt(sources, tokenizer, has_image=has_image) + # add end signal and concatenate together + conversations = [] + for source in sources: + header = f"{conversation_lib.default_conversation.system}\n\n" + conversation = _add_speaker_and_signal(header, source) + conversations.append(conversation) + # tokenize conversations + def get_tokenize_len(prompts): + return [len(tokenizer_image_token(prompt, tokenizer)) for prompt in prompts] + + if has_image: + input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations] + else: + conversations_tokenized = _tokenize_fn(conversations, tokenizer) + input_ids = conversations_tokenized["input_ids"] + + targets = copy.deepcopy(input_ids) + for target, source in zip(targets, sources): + if has_image: + tokenized_lens = get_tokenize_len([header] + [s["value"] for s in source]) + else: + tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source], tokenizer)["input_ids_lens"] + speakers = [sentence["from"] for sentence in source] + _mask_targets(target, tokenized_lens, speakers) + + return dict(input_ids=input_ids, labels=targets) + + +class LazySupervisedDataset(Dataset): + """Dataset for supervised fine-tuning.""" + + def __init__(self, data_path: str, + tokenizer: transformers.PreTrainedTokenizer, + data_args: DataArguments, + voco_token): + super(LazySupervisedDataset, self).__init__() + list_data_dict = json.load(open(data_path, "r")) + + rank0_print("Formatting inputs...Skip in lazy mode") + self.tokenizer = tokenizer + self.list_data_dict = list_data_dict + self.data_args = data_args + self.voco_token = voco_token + + def __len__(self): + return len(self.list_data_dict) + + @property + def lengths(self): + length_list = [] + for sample in self.list_data_dict: + img_tokens = 128 if 'image' in sample else 0 + length_list.append(sum(len(conv['value'].split()) for conv in sample['conversations']) + img_tokens) + return length_list + + @property + def modality_lengths(self): + length_list = [] + for sample in self.list_data_dict: + cur_len = sum(len(conv['value'].split()) for conv in sample['conversations']) + cur_len = cur_len if 'image' in sample else -cur_len + length_list.append(cur_len) + return length_list + + def __getitem__(self, i) -> Dict[str, torch.Tensor]: + sources = self.list_data_dict[i] + if isinstance(i, int): + sources = [sources] + assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME + if 'image' in sources[0]: + image_file = self.list_data_dict[i]['image'] + image_folder = self.data_args.image_folder + processor = self.data_args.image_processor + # print(image_folder, image_file) + image = Image.open(os.path.join(image_folder, image_file)).convert('RGB') + if self.data_args.image_aspect_ratio == 'pad': + def expand2square(pil_img, background_color): + width, height = pil_img.size + if width == height: + return pil_img + elif width > height: + result = Image.new(pil_img.mode, (width, width), background_color) + result.paste(pil_img, (0, (width - height) // 2)) + return result + else: + result = Image.new(pil_img.mode, (height, height), background_color) + result.paste(pil_img, ((height - width) // 2, 0)) + return result + image = expand2square(image, tuple(int(x*255) for x in processor.image_mean)) + image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0] # [3, 336, 336] + else: + image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0] + sources = preprocess_multimodal( + copy.deepcopy([e["conversations"] for e in sources]), + self.data_args) + else: + sources = copy.deepcopy([e["conversations"] for e in sources]) + data_dict = preprocess( + sources, + self.tokenizer, + has_image=('image' in self.list_data_dict[i])) + if isinstance(i, int): + data_dict = dict(input_ids=data_dict["input_ids"][0], + labels=data_dict["labels"][0]) + + # image exist in the data + if 'image' in self.list_data_dict[i]: + data_dict['image'] = image + elif self.data_args.is_multimodal: + # image does not exist in the data, but the model is multimodal + crop_size = self.data_args.image_processor.crop_size + data_dict['image'] = torch.zeros(3, crop_size['height'], crop_size['width']) + return data_dict + +@dataclass +class DataCollatorForSupervisedDataset(object): + """Collate examples for supervised fine-tuning.""" + + tokenizer: transformers.PreTrainedTokenizer + + def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]: + input_ids, labels = tuple([instance[key] for instance in instances] + for key in ("input_ids", "labels")) + input_ids = torch.nn.utils.rnn.pad_sequence( + input_ids, + batch_first=True, + padding_value=self.tokenizer.pad_token_id) + labels = torch.nn.utils.rnn.pad_sequence(labels, + batch_first=True, + padding_value=IGNORE_INDEX) + input_ids = input_ids[:, :self.tokenizer.model_max_length] + labels = labels[:, :self.tokenizer.model_max_length] + batch = dict( + input_ids=input_ids, + labels=labels, + attention_mask=input_ids.ne(self.tokenizer.pad_token_id), + ) + + if 'image' in instances[0]: + images = [instance['image'] for instance in instances] + if all(x is not None and x.shape == images[0].shape for x in images): + batch['images'] = torch.stack(images) + else: + batch['images'] = images + + return batch + + +def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer, + data_args, voco_token) -> Dict: + """Make dataset and collator for supervised fine-tuning.""" + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + data_path=data_args.data_path, + data_args=data_args, + voco_token=voco_token) + data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer) + return dict(train_dataset=train_dataset, + eval_dataset=None, + data_collator=data_collator) + + +def train(attn_implementation=None): + global local_rank + + parser = transformers.HfArgumentParser( + (ModelArguments, DataArguments, TrainingArguments)) + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + local_rank = training_args.local_rank + compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)) + + bnb_model_from_pretrained_args = {} + if training_args.bits in [4, 8]: + from transformers import BitsAndBytesConfig + bnb_model_from_pretrained_args.update(dict( + device_map={"": training_args.device}, + load_in_4bit=training_args.bits == 4, + load_in_8bit=training_args.bits == 8, + quantization_config=BitsAndBytesConfig( + load_in_4bit=training_args.bits == 4, + load_in_8bit=training_args.bits == 8, + llm_int8_skip_modules=["mm_projector"], + llm_int8_threshold=6.0, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=compute_dtype, + bnb_4bit_use_double_quant=training_args.double_quant, + bnb_4bit_quant_type=training_args.quant_type # {'fp4', 'nf4'} + ) + )) + + if model_args.vision_tower is not None: + if 'mpt' in model_args.model_name_or_path: + config = transformers.AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) + config.attn_config['attn_impl'] = training_args.mpt_attn_impl + model = LlavaMptForCausalLM.from_pretrained( + model_args.model_name_or_path, + config=config, + cache_dir=training_args.cache_dir, + **bnb_model_from_pretrained_args + ) + else: + print("use LlavaLlamaForCausalLM!!!") + model = LlavaLlamaForCausalLM.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + attn_implementation=attn_implementation, + torch_dtype=(torch.bfloat16 if training_args.bf16 else None), + **bnb_model_from_pretrained_args + ) + else: + model = transformers.LlamaForCausalLM.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + attn_implementation=attn_implementation, + torch_dtype=(torch.bfloat16 if training_args.bf16 else None), + **bnb_model_from_pretrained_args + ) + model.config.use_cache = False + + if model_args.freeze_backbone: + model.model.requires_grad_(False) + + if training_args.bits in [4, 8]: + from peft import prepare_model_for_kbit_training + model.config.torch_dtype=(torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)) + model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing) + + if training_args.gradient_checkpointing: + if hasattr(model, "enable_input_require_grads"): + model.enable_input_require_grads() + else: + def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + + if training_args.lora_enable: + from peft import LoraConfig, get_peft_model + lora_config = LoraConfig( + r=training_args.lora_r, + lora_alpha=training_args.lora_alpha, + target_modules=find_all_linear_names(model), + lora_dropout=training_args.lora_dropout, + bias=training_args.lora_bias, + task_type="CAUSAL_LM", + ) + if training_args.bits == 16: + if training_args.bf16: + model.to(torch.bfloat16) + if training_args.fp16: + model.to(torch.float16) + rank0_print("Adding LoRA adapters...") + model = get_peft_model(model, lora_config) + + if 'mpt' in model_args.model_name_or_path: + tokenizer = transformers.AutoTokenizer.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + model_max_length=training_args.model_max_length, + padding_side="right" + ) + else: + tokenizer = transformers.AutoTokenizer.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + model_max_length=training_args.model_max_length, + padding_side="left", + use_fast=False, + ) + + if model_args.version == "v0": + if tokenizer.pad_token is None: + smart_tokenizer_and_embedding_resize( + special_tokens_dict=dict(pad_token="[PAD]"), + tokenizer=tokenizer, + model=model, + ) + elif model_args.version == "v0.5": + tokenizer.pad_token = tokenizer.unk_token + else: + tokenizer.pad_token = tokenizer.unk_token + if model_args.version in conversation_lib.conv_templates: + conversation_lib.default_conversation = conversation_lib.conv_templates[model_args.version] + else: + conversation_lib.default_conversation = conversation_lib.conv_templates["vicuna_v1"] + + if len(tokenizer) == 32000 + 1: + assert ( + model.model.embed_tokens.weight.shape[0] + == 32000 + 1 + ) + assert model.lm_head.weight.shape[0] == 32000 + 1 + else: + print('add_voco_token 32001') + # Initialize voco token + tokenizer.add_special_tokens({"additional_special_tokens": [""]}) + model.resize_token_embeddings(len(tokenizer)) + voco_token = tokenizer.additional_special_tokens_ids[-1] + + if model_args.vision_tower is not None: + model.get_model().initialize_vision_modules( + model_args=model_args, + fsdp=training_args.fsdp + ) + + vision_tower = model.get_vision_tower() + vision_tower.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device) + + data_args.image_processor = vision_tower.image_processor + data_args.is_multimodal = True + + model.config.image_aspect_ratio = data_args.image_aspect_ratio + model.config.tokenizer_padding_side = tokenizer.padding_side + model.config.tokenizer_model_max_length = tokenizer.model_max_length + + model.config.tune_mm_mlp_adapter = training_args.tune_mm_mlp_adapter = model_args.tune_mm_mlp_adapter + if model_args.tune_mm_mlp_adapter: + model.requires_grad_(False) + for p in model.get_model().mm_projector.parameters(): + p.requires_grad = True + + model.config.freeze_mm_mlp_adapter = training_args.freeze_mm_mlp_adapter + if training_args.freeze_mm_mlp_adapter: + for p in model.get_model().mm_projector.parameters(): + p.requires_grad = False + + if training_args.bits in [4, 8]: + model.get_model().mm_projector.to(dtype=compute_dtype, device=training_args.device) + + model.config.mm_use_im_start_end = data_args.mm_use_im_start_end = model_args.mm_use_im_start_end + model.config.mm_projector_lr = training_args.mm_projector_lr + training_args.use_im_start_end = model_args.mm_use_im_start_end + model.config.mm_use_im_patch_token = model_args.mm_use_im_patch_token + model.initialize_vision_tokenizer(model_args, tokenizer=tokenizer) + + if training_args.bits in [4, 8]: + from peft.tuners.lora import LoraLayer + for name, module in model.named_modules(): + if isinstance(module, LoraLayer): + if training_args.bf16: + module = module.to(torch.bfloat16) + if 'norm' in name: + module = module.to(torch.float32) + if 'lm_head' in name or 'embed_tokens' in name: + if hasattr(module, 'weight'): + if training_args.bf16 and module.weight.dtype == torch.float32: + module = module.to(torch.bfloat16) + + data_module = make_supervised_data_module(tokenizer=tokenizer, + data_args=data_args, + voco_token=voco_token) + trainer = LLaVATrainer(model=model, + tokenizer=tokenizer, + args=training_args, + **data_module) + if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")): + trainer.train(resume_from_checkpoint=True) + else: + trainer.train() + trainer.save_state() + + model.config.use_cache = True + + if training_args.lora_enable: + state_dict = get_peft_state_maybe_zero_3( + model.named_parameters(), training_args.lora_bias + ) + non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3( + model.named_parameters() + ) + if training_args.local_rank == 0 or training_args.local_rank == -1: + model.config.save_pretrained(training_args.output_dir) + model.save_pretrained(training_args.output_dir, state_dict=state_dict) + torch.save(non_lora_state_dict, os.path.join(training_args.output_dir, 'non_lora_trainables.bin')) + else: + safe_save_model_for_hf_trainer(trainer=trainer, + output_dir=training_args.output_dir) + + +if __name__ == "__main__": + train() diff --git a/playground/data.z01 b/playground/data.z01 new file mode 100644 index 0000000000000000000000000000000000000000..5907eea7a49a997f0ac02ee7a8e18bdb66592b2f --- /dev/null +++ b/playground/data.z01 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1ceb2163b22aa3902a15ce2437f706aa40fc1db138a7c4b716ed142198467cb +size 2147483648 diff --git a/playground/data.z02 b/playground/data.z02 new file mode 100644 index 0000000000000000000000000000000000000000..4b4c8432c6bd28336778e2b67b74098f2daca030 --- /dev/null +++ b/playground/data.z02 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e138f45d610b3d95a4fc0d80d58405dbc71ccde5aa40bd3f1cefd66732426cd +size 2147483648 diff --git a/playground/data.z04 b/playground/data.z04 new file mode 100644 index 0000000000000000000000000000000000000000..5ae7cc702f2cec2ef3e04b746c62360d73c199d3 --- /dev/null +++ b/playground/data.z04 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ed07a3fd033e030566485904b51dad83a7c7ba33f84b7f695234268e4bc88f3 +size 2147483648 diff --git a/playground/data.z05 b/playground/data.z05 new file mode 100644 index 0000000000000000000000000000000000000000..c3ee3c59cd6af0af26814f0b168c4e5fbac30152 --- /dev/null +++ b/playground/data.z05 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e05dd44b56489f31294ee72cd53584f8696989b28d004967e0f1a21346fcf0d8 +size 2147483648 diff --git a/playground/data.z06 b/playground/data.z06 new file mode 100644 index 0000000000000000000000000000000000000000..aa503f05201f7aa19fe282aa0e517ee4fbdb4aec --- /dev/null +++ b/playground/data.z06 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d46ed522f29fd1f5c9005802169aadf02f79f33a47a1fc4d92f7ae164729a72 +size 2147483648 diff --git a/playground/data.z07 b/playground/data.z07 new file mode 100644 index 0000000000000000000000000000000000000000..f2db9d0f6badcb2a845b13727f42091d8cbf4947 --- /dev/null +++ b/playground/data.z07 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f497e6b0591cd77b9c98f347ddaa30033fc68d7c11d7285e438e4e15bac4030 +size 2147483648 diff --git a/playground/data.z08 b/playground/data.z08 new file mode 100644 index 0000000000000000000000000000000000000000..eb08cc78cbde75a4747533f9aabc3d013c2671ba --- /dev/null +++ b/playground/data.z08 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b86e4ec1984deb85e4328a651d577864d52a2d26b671bf0e14c24e6b0c7355a +size 2147483648 diff --git a/playground/data.z09 b/playground/data.z09 new file mode 100644 index 0000000000000000000000000000000000000000..18ae8368b1e5ec2dbcc989c95312f3d4b4a15321 --- /dev/null +++ b/playground/data.z09 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73ebf3eaf895f6b2887fd31715476d5c002aa35fd7a639d827d3c62f849c5fb4 +size 2147483648 diff --git a/playground/data.z10 b/playground/data.z10 new file mode 100644 index 0000000000000000000000000000000000000000..516417f1d527697cfc57145c7f000be41c71715d --- /dev/null +++ b/playground/data.z10 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3021d11b512ecef45411d98c050f4624c08e9b8ae9c1f92080193fba6ae2893 +size 2147483648 diff --git a/playground/data.z11 b/playground/data.z11 new file mode 100644 index 0000000000000000000000000000000000000000..f03254d7c5df3aa6ef41d7c8423c6de739e9626f --- /dev/null +++ b/playground/data.z11 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9a561fe234189d270e848c9a20bb333b571d8d9be301478506098a696146748 +size 2147483648 diff --git a/playground/data.z12 b/playground/data.z12 new file mode 100644 index 0000000000000000000000000000000000000000..3a09a6996784a7e21f766481d55b180a05fffddb --- /dev/null +++ b/playground/data.z12 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b7fc0a9d324138c93d23e9625c05e93f848ebb10ab97f69adcd3a31df1edf9d +size 2147483648 diff --git a/playground/data.z14 b/playground/data.z14 new file mode 100644 index 0000000000000000000000000000000000000000..e608ce27dde1bf39a57ecc73e28fb778a4e3b377 --- /dev/null +++ b/playground/data.z14 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a43c6a02297e630589bfdae80253abd36844e0a4bc10878f1605ac49111c14ce +size 2147483648 diff --git a/playground/data.z15 b/playground/data.z15 new file mode 100644 index 0000000000000000000000000000000000000000..1fa44e03fac814ef005e4fc3fa9fe5c95be4b2f8 --- /dev/null +++ b/playground/data.z15 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2e6ba5d89dc588bbc9961b487e9dc9f7eb0179b7d8b349b4135f5a279741ec1 +size 2147483648 diff --git a/playground/data.z16 b/playground/data.z16 new file mode 100644 index 0000000000000000000000000000000000000000..b751f5faa7e31177e3fe21f11ac52612b54e58a9 --- /dev/null +++ b/playground/data.z16 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:688f74622bf5d37b662328ffe3ef7633832239e85e59dad53dd05755e6aae80f +size 2147483648 diff --git a/playground/data.z18 b/playground/data.z18 new file mode 100644 index 0000000000000000000000000000000000000000..bf48dd4f552ad5d4758ad3d0504740420621203c --- /dev/null +++ b/playground/data.z18 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:734c4ddaf5495e9d46a46f94f5e5c66ca5ce99726dbfb07f910343066d7a2a5e +size 2147483648 diff --git a/playground/data.z19 b/playground/data.z19 new file mode 100644 index 0000000000000000000000000000000000000000..7c1024477da2228351dbeeefe1d79d08ceb516b8 --- /dev/null +++ b/playground/data.z19 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4dab9b7cd28541bce4280501891d0f416dc5fae6c752f6cef953b319df3d2b5 +size 2147483648 diff --git a/playground/data.z20 b/playground/data.z20 new file mode 100644 index 0000000000000000000000000000000000000000..e314ab641705f0a0307969da035f6a000527d4dc --- /dev/null +++ b/playground/data.z20 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85e2c888eaae0ea32b2d7165df9396741f5de2e60538147b096ed8de1272761b +size 2147483648 diff --git a/playground/data.z21 b/playground/data.z21 new file mode 100644 index 0000000000000000000000000000000000000000..0c7f281faf51a2b4ea578d8d0933374368c60f0e --- /dev/null +++ b/playground/data.z21 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9d0a980e330cb5c3ba0b0b11c18e6f423e2e6e9a6dd471a3cc9a2964f07f49d +size 2147483648 diff --git a/playground/data.z23 b/playground/data.z23 new file mode 100644 index 0000000000000000000000000000000000000000..5350936462873520e5b88466bcd9942ccc715548 --- /dev/null +++ b/playground/data.z23 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b73e57e422c0b13a245bffae386710156395896275be0f1a79f2d82f960cd9a +size 2147483648 diff --git a/playground/data.z24 b/playground/data.z24 new file mode 100644 index 0000000000000000000000000000000000000000..3f5a32cdfbbf50018d4805d4d716ec032837464c --- /dev/null +++ b/playground/data.z24 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78c020681da40773468f8acb64367b4a34434d387dd98e19d2bc92fb459547c7 +size 2147483648 diff --git a/playground/data.z25 b/playground/data.z25 new file mode 100644 index 0000000000000000000000000000000000000000..f426217a04ee4d2ddf16bd67ab8aa7ca0ae3a779 --- /dev/null +++ b/playground/data.z25 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf9201ad221bbea10b9ca674aa09749e9ac85c1aa38faef43ccb68635f6c2d00 +size 2147483648 diff --git a/playground/data.z26 b/playground/data.z26 new file mode 100644 index 0000000000000000000000000000000000000000..a8cbfe75875be78f131c6eb0b7acbfbb9caf4c76 --- /dev/null +++ b/playground/data.z26 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74f50f8cbad2263539c7abd33c238a7a5d3fee2925cacc14c939c5eab0abd345 +size 2147483648 diff --git a/playground/data.z27 b/playground/data.z27 new file mode 100644 index 0000000000000000000000000000000000000000..fd97382277afa306cd3680cc46839dba31485cc1 --- /dev/null +++ b/playground/data.z27 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e466a0f229e226b1f7375854e8828cf5f633c5c787e63affc75d56b965aaeda +size 2147483648 diff --git a/playground/data.z28 b/playground/data.z28 new file mode 100644 index 0000000000000000000000000000000000000000..04b34bdf4b0644f7094a3268821d3fd6bfe6c494 --- /dev/null +++ b/playground/data.z28 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:107c1a049083baad33211500368dbca8a309cceb28fe9c39d34203684b81cc53 +size 2147483648 diff --git a/playground/data.z30 b/playground/data.z30 new file mode 100644 index 0000000000000000000000000000000000000000..4a76b944047ffc2d154d38fc9377787f599c38be --- /dev/null +++ b/playground/data.z30 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f114630e80b9f0f943340b98042f797f81b186d53e4ebb6e87f0a661a8bfaf15 +size 2147483648 diff --git a/playground/data.z31 b/playground/data.z31 new file mode 100644 index 0000000000000000000000000000000000000000..834c3e30883b81c56782d95db0d5e86ec7346b57 --- /dev/null +++ b/playground/data.z31 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bc882b1e4fc0efd333993494cf144e70063f0893be4f9370d894524502fba91 +size 2147483648 diff --git a/playground/data.z33 b/playground/data.z33 new file mode 100644 index 0000000000000000000000000000000000000000..92b9889d6e0f36a96eabb6777da38ca0f96922b0 --- /dev/null +++ b/playground/data.z33 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ae213216938e1c50505a483666617e29b951d4d8b15dca9e6412744b1f96321 +size 2147483648 diff --git a/playground/data.z35 b/playground/data.z35 new file mode 100644 index 0000000000000000000000000000000000000000..626086784affb7fef6ef0d996ae45390733f0205 --- /dev/null +++ b/playground/data.z35 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0da4ea5b6beea7bd44be79e324a55c59223356c268cb2aff806ca2e20e1c488a +size 2147483648 diff --git a/playground/data.z36 b/playground/data.z36 new file mode 100644 index 0000000000000000000000000000000000000000..1e13b4ddec640f6db3cff6bc119997b1534b80a8 --- /dev/null +++ b/playground/data.z36 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f02365adb503d2b3ce660b954b434e4dbc8b38e09a15e1dd848c6c90191629f8 +size 2147483648 diff --git a/playground/data.z37 b/playground/data.z37 new file mode 100644 index 0000000000000000000000000000000000000000..a22fc27811de3603ce8ced89f529c343a116fdea --- /dev/null +++ b/playground/data.z37 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d6b13ecd1417adc6b71d12af762ff5329bb9cb13c2e082416995b2b066bf1a4 +size 2147483648 diff --git a/playground/data.z38 b/playground/data.z38 new file mode 100644 index 0000000000000000000000000000000000000000..bcd1d961d9fd7e4572d73461cf3ff20823643dbe --- /dev/null +++ b/playground/data.z38 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66c18b3dec39b3e16c92a53615ce2e49368b6973110b1dc094f9663e1ef62041 +size 2147483648 diff --git a/playground/data.z39 b/playground/data.z39 new file mode 100644 index 0000000000000000000000000000000000000000..add0aefcad0bc7626a31cd1b0e754fda02ab67b8 --- /dev/null +++ b/playground/data.z39 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4c43a55e90309f2b55af32d0f2f811fb34ddf0d75b535d89cd1b47e3d6963db +size 2147483648 diff --git a/playground/data.z40 b/playground/data.z40 new file mode 100644 index 0000000000000000000000000000000000000000..6711533a1fb3b989a3e954b8f30e4987881c0850 --- /dev/null +++ b/playground/data.z40 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59bc62ac2c79dfb6a1dc1a717f5256b21ed6465ac8f900e7cf0d7fd8b85ff006 +size 2147483648 diff --git a/playground/data.z41 b/playground/data.z41 new file mode 100644 index 0000000000000000000000000000000000000000..a0446568e9260a35f925f583e11704e0959b5e90 --- /dev/null +++ b/playground/data.z41 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3df6db4d69e7d592b3c0aa8ee3fa58066e794c0b12d8edab2e6c282cd027c1e2 +size 2147483648 diff --git a/playground/data.z42 b/playground/data.z42 new file mode 100644 index 0000000000000000000000000000000000000000..656339977a893b45da272950ce2fa24223145b5b --- /dev/null +++ b/playground/data.z42 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af46c6dc4e880e88dc71198edc8280b6b9b0cc1cbff9b260eb1d90caca764cec +size 2147483648 diff --git a/playground/data.z43 b/playground/data.z43 new file mode 100644 index 0000000000000000000000000000000000000000..69ff06d49eb0fd328e163bde65a710f9bc03fe41 --- /dev/null +++ b/playground/data.z43 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eba6265642a3fcdbee851485f1151b8ea306f2a69a7578e6782d2577932bbe2 +size 2147483648 diff --git a/playground/data.z44 b/playground/data.z44 new file mode 100644 index 0000000000000000000000000000000000000000..0a373f65a1d81006d4152ff3fd29ecd707454ff7 --- /dev/null +++ b/playground/data.z44 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d26327eb02286efd841b984f13e0d21fe26591f0359a5ee523522c71c35820e +size 2147483648 diff --git a/playground/data.z45 b/playground/data.z45 new file mode 100644 index 0000000000000000000000000000000000000000..06cbac438929ba6fa1583379a975f2d353b01d56 --- /dev/null +++ b/playground/data.z45 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b2583493550a387a5c4bcb48b9d6b2b38b59208049b55dbc621f50f74482a6b +size 2147483648 diff --git a/playground/data.z46 b/playground/data.z46 new file mode 100644 index 0000000000000000000000000000000000000000..8b62ac05b74238f84aa5eabe9045ab2d70ec46aa --- /dev/null +++ b/playground/data.z46 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6122e9dfa489d2f2cae3f2e4fc5f21738fc84077bec614a410b0405e79747f56 +size 2147483648 diff --git a/playground/data.z47 b/playground/data.z47 new file mode 100644 index 0000000000000000000000000000000000000000..e0eb5b0e527ffdd1a5cccf8e5ac3a6e2c97088b7 --- /dev/null +++ b/playground/data.z47 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f033cbb04e667c413cd8b8f72f13674d1dba09214b65a47f0e383c6066a1809 +size 2147483648 diff --git a/playground/data.z48 b/playground/data.z48 new file mode 100644 index 0000000000000000000000000000000000000000..53a655ad58a3d4260af4b721e9c019e919b58aea --- /dev/null +++ b/playground/data.z48 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7463e9905c7a7624d56136105b20d2d0e11d69a420e8d46026ac40daeae71b64 +size 2147483648 diff --git a/playground/data.z49 b/playground/data.z49 new file mode 100644 index 0000000000000000000000000000000000000000..fac056c42d1e7ca43fdbeaf5b98df0c03bd024a5 --- /dev/null +++ b/playground/data.z49 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5adde7638344aa156ff6b23b0e6819aefc34e28de9a22a8fcb37d922d24a2eca +size 2147483648 diff --git a/playground/data.z50 b/playground/data.z50 new file mode 100644 index 0000000000000000000000000000000000000000..0b293e013f51714e89b9b83b64281c3a922cc5a6 --- /dev/null +++ b/playground/data.z50 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43fb48b393b73017b1f032a8776a395cb9cf6f50e0c2a7691dad06b3f977774b +size 2147483648 diff --git a/playground/data.z51 b/playground/data.z51 new file mode 100644 index 0000000000000000000000000000000000000000..5ffb385adcdeee903dd0297d28670d68e53b5e22 --- /dev/null +++ b/playground/data.z51 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6dd8247192fd94bb01157876fb1c198be228a1548b2c76e3f471c1d6cae5aab +size 2147483648 diff --git a/playground/data.z52 b/playground/data.z52 new file mode 100644 index 0000000000000000000000000000000000000000..4f01d5e028a827b221cfb84c15fc3db64f6af6ec --- /dev/null +++ b/playground/data.z52 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fff12d6b3a24e890d7178cd07dcaa375daa468cbcfd3065f8de94eec64e1cc51 +size 2147483648 diff --git a/playground/data.z53 b/playground/data.z53 new file mode 100644 index 0000000000000000000000000000000000000000..3f02a9bbcb8a4f8ff1b251791260f25780f4a0be --- /dev/null +++ b/playground/data.z53 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a77308e45a49ecf53a279d509b85cd35688440624b137756e081d042df5e22f5 +size 2147483648 diff --git a/playground/data.z55 b/playground/data.z55 new file mode 100644 index 0000000000000000000000000000000000000000..2aae67f8b3f12f4acef0b72063c168bacd7b250d --- /dev/null +++ b/playground/data.z55 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ebdcd76e39ee9ce97f4fbf7c80750f579b96092348b6e2f1b40328b0a20dc2b +size 2147483648 diff --git a/playground/data.z57 b/playground/data.z57 new file mode 100644 index 0000000000000000000000000000000000000000..42f9006d4742294bb93225590efa6d5a6cb6727a --- /dev/null +++ b/playground/data.z57 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdda18e6d2f948e552d552a91a9e5d9049b18ed3495e35d28a1b77f3d3a9d2c6 +size 2147483648 diff --git a/playground/data.z58 b/playground/data.z58 new file mode 100644 index 0000000000000000000000000000000000000000..5947587469521a78f271d45f2a3531f7eb5962f6 --- /dev/null +++ b/playground/data.z58 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f753d8d60c922dc8b4dedc296308879dce09fe6ab7ab61b5b9aab1a6a4a7dc79 +size 2147483648 diff --git a/playground/data.z60 b/playground/data.z60 new file mode 100644 index 0000000000000000000000000000000000000000..80cf1d5d99084fba1d12cd8dcd5bb05cc14ef4d8 --- /dev/null +++ b/playground/data.z60 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50cd6bb43a1b066a5e73380c17d802a1670f11c414c5c036f06d288529f5f8fe +size 2147483648 diff --git a/playground/data.z61 b/playground/data.z61 new file mode 100644 index 0000000000000000000000000000000000000000..4cc42dd7aad0aad8f04e25e426fe34d3d03e4550 --- /dev/null +++ b/playground/data.z61 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba17578f50888cc4aa6bd6197a7632e5ed666476924dfcef3e91747cc8193ced +size 2147483648 diff --git a/playground/data.z62 b/playground/data.z62 new file mode 100644 index 0000000000000000000000000000000000000000..afb223a751caa872ad3ad8a550643d40f79300b7 --- /dev/null +++ b/playground/data.z62 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1e10368eae999160750592e0e3c642f0719e596b6ad92f6a4ab9e040e19696c +size 2147483648 diff --git a/playground/data.z63 b/playground/data.z63 new file mode 100644 index 0000000000000000000000000000000000000000..2b49827f5bfb2131d8a462f49aafdd11a236fe1f --- /dev/null +++ b/playground/data.z63 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0932166e3143cd067c0553a7eaea319fb43f945324b9b8c51c4a596cde2bc6e +size 2147483648 diff --git a/playground/data.z64 b/playground/data.z64 new file mode 100644 index 0000000000000000000000000000000000000000..65f72ada48b192cdbd7d04c72f8ff6cbd8659c0a --- /dev/null +++ b/playground/data.z64 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33702143d5b6fe91535f34cdc9c16e2374aa7cf751fc033b85f6720bc123c57b +size 2147483648 diff --git a/playground/data.z65 b/playground/data.z65 new file mode 100644 index 0000000000000000000000000000000000000000..3a2156563c52d01c8796c2c2a7be7a9461f0712a --- /dev/null +++ b/playground/data.z65 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:415a624a5f7f37d423d36e0791d50d9d099cf62b9a803ddc6760edbc5bcfcd70 +size 2147483648 diff --git a/playground/data.z67 b/playground/data.z67 new file mode 100644 index 0000000000000000000000000000000000000000..db9a72b26c5234bafce060306e5afb25020132e2 --- /dev/null +++ b/playground/data.z67 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4310ba90f39cacd73139a3c49f11be59f684ef47922e42d8765e8a9d976945a +size 2147483648 diff --git a/playground/data.z68 b/playground/data.z68 new file mode 100644 index 0000000000000000000000000000000000000000..f46d9f75ebc5fdf0a30e184537a5879ea74f5fcf --- /dev/null +++ b/playground/data.z68 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a260d4892d0246b4c7c84a8b409964a9e17545f4943e7a41d4f3a6496349e5e3 +size 2147483648 diff --git a/playground/data.z69 b/playground/data.z69 new file mode 100644 index 0000000000000000000000000000000000000000..136013e4d980b409d07115a1e4640e691226d27c --- /dev/null +++ b/playground/data.z69 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12c9f416490d229954b92210cdf60188f4ba4002024f8c84423bfa8a1f91f47a +size 2147483648 diff --git a/playground/data.z70 b/playground/data.z70 new file mode 100644 index 0000000000000000000000000000000000000000..38da67c654c4dbfb7d5c2d954437fbfddbebb894 --- /dev/null +++ b/playground/data.z70 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a4629776c7f54cdbe5a922314cbb940bb39cdf92403f3d81fd5da064286b878 +size 2147483648 diff --git a/playground/data.z71 b/playground/data.z71 new file mode 100644 index 0000000000000000000000000000000000000000..13b8704184045ec52f38153311ffddf9b197f5a7 --- /dev/null +++ b/playground/data.z71 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ad089bfb66de642edcab6d7fad34266220b2aed14ebbbd39f9541ef6db4c639 +size 2147483648 diff --git a/playground/data.z73 b/playground/data.z73 new file mode 100644 index 0000000000000000000000000000000000000000..9ca02376c3da93715578574167dc059ac2f5221c --- /dev/null +++ b/playground/data.z73 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a2e2a2f8da4b0a92a93ad269fd6ca61179161e03d0824fb30b8f6ccc3614b51 +size 2147483648 diff --git a/playground/data.z74 b/playground/data.z74 new file mode 100644 index 0000000000000000000000000000000000000000..8a24b261498d0cdd1a4ce87f0449f5015cb307fc --- /dev/null +++ b/playground/data.z74 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c944ac3962a74f5538f43064c98c6f110456ec5fe22ad7aa7e4c8637372b947c +size 2147483648 diff --git a/playground/data.z75 b/playground/data.z75 new file mode 100644 index 0000000000000000000000000000000000000000..ac25c1b95e935e24f851f78f701e2590666e866f --- /dev/null +++ b/playground/data.z75 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fc3b8b8ea198a511855f3758863cfd2e7d4191d28885a8f4b8f400058b9a746 +size 2147483648 diff --git a/playground/data.zip b/playground/data.zip new file mode 100644 index 0000000000000000000000000000000000000000..8eb27dc2153f7ff27f3bbd33c59061b77427fa52 --- /dev/null +++ b/playground/data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32ad1470b8f738d4c5cf99ab411b90d36665dbb20fd443e1e1ed15cfc033fef7 +size 550115970