machuofan
commited on
Commit
·
6125d7f
1
Parent(s):
0cc8a43
update readme
Browse files- README.md +8 -17
- config.json +115 -0
- external/dinov2_vits14_pretrain.pth +0 -3
- external/lpips_with_vgg.pth +0 -3
- external/weights-inception-2015-12-05-6726825d.pth +0 -3
README.md
CHANGED
@@ -11,14 +11,17 @@ Project Page: https://foundationvision.github.io/UniTok/
|
|
11 |
|
12 |
Code: https://github.com/FoundationVision/UniTok
|
13 |
|
14 |
-
|
|
|
|
|
15 |
|
16 |
UniTok encodes fine-grained details for generation and captures high-level semantics for understanding. It's compatible with autoregressive generative models (e.g., LlamaGen), multimodal understanding models (e.g., LLaVA), and unified MLLMs (e.g., Chameleon and Liquid).
|
17 |
|
18 |
-
|
19 |
Built upon UniTok, we construct an MLLM capable of both multimodal generation and understanding, which sets a new state-of-the-art among unified autoregressive MLLMs. The weights of our MLLM will be released soon.
|
20 |
|
21 |
-
|
|
|
|
|
22 |
|
23 |
## Performance
|
24 |
|
@@ -105,22 +108,10 @@ Built upon UniTok, we construct an MLLM capable of both multimodal generation an
|
|
105 |
</table>
|
106 |
|
107 |
|
108 |
-
|
109 |
-
we notice that random initialization leads to better downstream understanding performance.
|
110 |
-
We thus release the model checkpoint of UniTok that is trained from scratch.
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
## Model Weights
|
115 |
-
|
116 |
-
| Model | Res. | #Token | Code Shape | rFID | Checkpoint |
|
117 |
-
|:------------:|:----:|:------:|:-------------------------:|:----:|:------------:|
|
118 |
-
| UniTok-Large | 256 | 256 | 16 $\times$ 16 $\times$ 8 | 0.39 | [Download](https://huggingface.co/FoundationVision/UniTok/blob/main/unitok_tokenizer.pth) |
|
119 |
-
|
120 |
|
121 |
-
|
122 |
|
123 |
-
(... rest of README content ...)
|
124 |
|
125 |
## Citation
|
126 |
|
|
|
11 |
|
12 |
Code: https://github.com/FoundationVision/UniTok
|
13 |
|
14 |
+
<p align="center">
|
15 |
+
<img src="https://github.com/FoundationVision/UniTok/blob/main/assets/teaser.png" width=93%>
|
16 |
+
<p>
|
17 |
|
18 |
UniTok encodes fine-grained details for generation and captures high-level semantics for understanding. It's compatible with autoregressive generative models (e.g., LlamaGen), multimodal understanding models (e.g., LLaVA), and unified MLLMs (e.g., Chameleon and Liquid).
|
19 |
|
|
|
20 |
Built upon UniTok, we construct an MLLM capable of both multimodal generation and understanding, which sets a new state-of-the-art among unified autoregressive MLLMs. The weights of our MLLM will be released soon.
|
21 |
|
22 |
+
<p align="center">
|
23 |
+
<img src="https://github.com/FoundationVision/UniTok/blob/main/assets/samples.png" width=93%>
|
24 |
+
<p>
|
25 |
|
26 |
## Performance
|
27 |
|
|
|
108 |
</table>
|
109 |
|
110 |
|
111 |
+
This repo is used for hosting UniTok's checkpoints.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
+
For more details or tutorials see https://github.com/FoundationVision/UniTok.
|
114 |
|
|
|
115 |
|
116 |
## Citation
|
117 |
|
config.json
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model": "vitamin_large",
|
3 |
+
"exp_name": "unitok_large_causal",
|
4 |
+
"output_dir": "local_output",
|
5 |
+
"resume_from": "",
|
6 |
+
"lpips_path": "",
|
7 |
+
"dino_path": "",
|
8 |
+
"fid_eval_src": "",
|
9 |
+
"fid_eval_dst": "",
|
10 |
+
"vis_img_dir": "asset/vis_imgs/",
|
11 |
+
"fid_feature_extractor": "",
|
12 |
+
"clip_pretrain_path": "",
|
13 |
+
"fp16": false,
|
14 |
+
"bf16": true,
|
15 |
+
"tf32": true,
|
16 |
+
"compile_model": false,
|
17 |
+
"ddp_static": false,
|
18 |
+
"grad_ckpt": true,
|
19 |
+
"grad_accu": 1,
|
20 |
+
"train_data": "",
|
21 |
+
"val_data": null,
|
22 |
+
"dataset_type": "webdataset",
|
23 |
+
"imagenet_val": "",
|
24 |
+
"imagenet_v2": null,
|
25 |
+
"subset_ratio": 1.0,
|
26 |
+
"img_size": 256,
|
27 |
+
"resize_ratio": 1.125,
|
28 |
+
"hflip": false,
|
29 |
+
"workers": 16,
|
30 |
+
"train_num_samples": 1280000000,
|
31 |
+
"train_data_upsampling_factors": null,
|
32 |
+
"dataset_resampled": false,
|
33 |
+
"use_aug": false,
|
34 |
+
"vocab_size": 32768,
|
35 |
+
"vocab_width": 64,
|
36 |
+
"vocab_norm": true,
|
37 |
+
"vq_beta": 0.25,
|
38 |
+
"num_codebooks": 8,
|
39 |
+
"quant_proj": "attn",
|
40 |
+
"embed_dim": 768,
|
41 |
+
"num_query": 0,
|
42 |
+
"use_clip_pretrain": false,
|
43 |
+
"patch_size": 16,
|
44 |
+
"drop_path": 0.1,
|
45 |
+
"text_width": 768,
|
46 |
+
"text_heads": 12,
|
47 |
+
"text_layers": 12,
|
48 |
+
"text_vocab_size": 49408,
|
49 |
+
"text_context_length": 77,
|
50 |
+
"local_loss": true,
|
51 |
+
"gather_with_grad": true,
|
52 |
+
"pretrained_clip": null,
|
53 |
+
"pretrained_clip_text": null,
|
54 |
+
"lock_text": false,
|
55 |
+
"lock_text_unlocked_layers": 0,
|
56 |
+
"lock_text_freeze_layer_norm": false,
|
57 |
+
"force_custom_text": false,
|
58 |
+
"force_custom_vision": false,
|
59 |
+
"zeroshot_eval_freq": 1,
|
60 |
+
"dino_depth": 12,
|
61 |
+
"dino_kernel_size": 9,
|
62 |
+
"disc_norm": "gn",
|
63 |
+
"disc_aug_prob": 1.0,
|
64 |
+
"disc_specnorm": false,
|
65 |
+
"step_disc_every": 1,
|
66 |
+
"vae_init": -0.5,
|
67 |
+
"vocab_init": -1,
|
68 |
+
"disc_init": -0.5,
|
69 |
+
"epoch": 1,
|
70 |
+
"local_bs": 56,
|
71 |
+
"vae_local_bs": 56,
|
72 |
+
"global_bs": 16384,
|
73 |
+
"lr": 0.0005,
|
74 |
+
"wd": 0.02,
|
75 |
+
"disc_lr": 2e-05,
|
76 |
+
"disc_wd": 0.2,
|
77 |
+
"grad_clip": 10,
|
78 |
+
"ema": 0.9999,
|
79 |
+
"warmup_iter": null,
|
80 |
+
"warmup_ep": 0.01,
|
81 |
+
"disc_start_ep": 0.375,
|
82 |
+
"disc_warmup_ep": 0.03,
|
83 |
+
"schedule": "cos",
|
84 |
+
"lr_start_ratio": 0.0,
|
85 |
+
"lr_end_ratio": 0.1,
|
86 |
+
"disc_lr_end_ratio": 0.1,
|
87 |
+
"custom_lr_multiplier": null,
|
88 |
+
"optimizer": "adamw",
|
89 |
+
"optim_eps": 1e-06,
|
90 |
+
"fuse_opt": false,
|
91 |
+
"optim_beta": "0.9_0.95",
|
92 |
+
"disc_optim_beta": "0.5_0.9",
|
93 |
+
"l1": 0.2,
|
94 |
+
"l2": 1.0,
|
95 |
+
"lp": 1.0,
|
96 |
+
"lpr": 48,
|
97 |
+
"ld": 0.4,
|
98 |
+
"le": 0.0,
|
99 |
+
"lq": 1.0,
|
100 |
+
"lc": 1.0,
|
101 |
+
"e_temp": 0.01,
|
102 |
+
"gada": 1,
|
103 |
+
"bcr": 4.0,
|
104 |
+
"bcr_cut": 0.2,
|
105 |
+
"dcrit": "hg",
|
106 |
+
"report_wandb": true,
|
107 |
+
"wandb_notes": null,
|
108 |
+
"run_id": null,
|
109 |
+
"eval_per_epoch": 8,
|
110 |
+
"dbg_unused_param": false,
|
111 |
+
"dbg_nan": false,
|
112 |
+
"seed": null,
|
113 |
+
"deterministic": false,
|
114 |
+
"same_seed_for_all_ranks": 0
|
115 |
+
}
|
external/dinov2_vits14_pretrain.pth
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b938bf1bc15cd2ec0feacfe3a1bb553fe8ea9ca46a7e1d8d00217f29aef60cd9
|
3 |
-
size 88283115
|
|
|
|
|
|
|
|
external/lpips_with_vgg.pth
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:ea641e3531bbfad58e97e06e8d6c17dcefc5621f17ade3f921c52f4766fb92fb
|
3 |
-
size 58875208
|
|
|
|
|
|
|
|
external/weights-inception-2015-12-05-6726825d.pth
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:6726825d0af5f729cebd5821db510b11b1cfad8faad88a03f1befd49fb9129b2
|
3 |
-
size 95628359
|
|
|
|
|
|
|
|