khanhduong commited on
Commit
dc01755
·
verified ·
1 Parent(s): 1bab227

Upload 24_10_20_18_41_28_log.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. 24_10_20_18_41_28_log.txt +84 -0
24_10_20_18_41_28_log.txt ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "run": {
3
+ "task": "image_text_pretrain",
4
+ "lr_sched": "linear_warmup_cosine_lr",
5
+ "init_lr": 0.0001,
6
+ "min_lr": 8e-05,
7
+ "warmup_lr": 1e-06,
8
+ "weight_decay": 0.05,
9
+ "max_epoch": 50,
10
+ "batch_size_train": 120,
11
+ "batch_size_eval": 3,
12
+ "num_workers": 4,
13
+ "warmup_steps": 500,
14
+ "iters_per_epoch": 500,
15
+ "seed": 42,
16
+ "output_dir": "output/xraygpt_mimic_pretrain",
17
+ "amp": true,
18
+ "resume_ckpt_path": "xraygpt/output/xraygpt_mimic_pretrain/20241020174/checkpoint_18.pth",
19
+ "evaluate": false,
20
+ "train_splits": [
21
+ "train"
22
+ ],
23
+ "device": "cuda",
24
+ "world_size": 4,
25
+ "dist_url": "env://",
26
+ "distributed": true,
27
+ "rank": 0,
28
+ "gpu": 0,
29
+ "dist_backend": "nccl"
30
+ },
31
+ "model": {
32
+ "arch": "mini_gpt4",
33
+ "image_size": 224,
34
+ "drop_path_rate": 0,
35
+ "use_grad_checkpoint": false,
36
+ "vit_precision": "fp16",
37
+ "freeze_vit": true,
38
+ "freeze_qformer": true,
39
+ "num_query_token": 32,
40
+ "llama_model": "Joycean0301/Llama-3.2-3B-Instruct-Medical-Conversational",
41
+ "prompt": "",
42
+ "model_type": "pretrain_vicuna"
43
+ },
44
+ "preprocess": {
45
+ "vis_processor": {
46
+ "train": {
47
+ "name": "blip2_image_train",
48
+ "image_size": 224
49
+ },
50
+ "eval": {
51
+ "name": "blip2_image_eval",
52
+ "image_size": 224
53
+ }
54
+ },
55
+ "text_processor": {
56
+ "train": {
57
+ "name": "blip_caption"
58
+ },
59
+ "eval": {
60
+ "name": "blip_caption"
61
+ }
62
+ }
63
+ },
64
+ "datasets": {
65
+ "mimic": {
66
+ "data_type": "images",
67
+ "build_info": {
68
+ "storage": "dataset/mimic"
69
+ },
70
+ "vis_processor": {
71
+ "train": {
72
+ "name": "blip2_image_train",
73
+ "image_size": 224
74
+ }
75
+ },
76
+ "text_processor": {
77
+ "train": {
78
+ "name": "blip_caption"
79
+ }
80
+ }
81
+ }
82
+ }
83
+ }
84
+ {"train_lr": "0.000", "train_loss": "1.248"}